input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
raise IOError('{}: No such directory'.format(dirname))
# Write the file with a header
head = '{}\nWavelength [{}], Flux Density [{}]'.format(name, self.wave_units, self.flux_units)
if isinstance(header, str):
head += '\n{}'.format(header)
t_data = np.asarray(self.spectrum).T
np.savetxt(filepath, t_data, header=head)
def fit(self, spec, weights=None, wave_units=None, scale=True, resample=True, plot=False):
"""Determine the goodness of fit between this and another spectrum
Parameters
----------
spec: sedkit.spectrum.Spectrum, np.ndarray
The spectrum object or [W, F] array to fit
wave_units: astropy.units.quantity.Quantity
The wavelength units of the input spectrum if
it is a numpy array
scale: bool
Scale spec when measuring the goodness of fit
Returns
-------
tuple
The fit statistic, and normalization for the fit
"""
# In case the wavelength units are different
xnorm = 1
wav = self.wave
if hasattr(spec, 'spectrum'):
# Resample spec onto self wavelength
spec2 = spec.resamp(self.spectrum[0])
flx2 = spec2.flux
err2 = np.ones_like(spec2.flux) if spec2.unc is None else spec2.unc
elif isinstance(spec, (list, tuple, np.ndarray)):
spec2 = copy.copy(spec)
# Convert wave units
wave_units = wave_units or q.AA
xnorm = q.Unit(wave_units).to(self.wave_units)
spec2[0] = spec2[0] * xnorm
# Resample spec onto self wavelength
if resample:
spec2 = u.spectres(self.wave, *spec2)
wav = spec2[0]
flx2 = spec2[1]
err2 = np.ones_like(flx2) if len(spec2) == 2 else spec2[2]
else:
raise TypeError("Only an sedkit.spectrum.Spectrum or numpy.ndarray can be fit.")
# Get the self data
flx1 = self.flux
err1 = np.ones_like(flx1) if self.unc is None else self.unc
# Make default weights the bin widths, excluding gaps in spectra
if weights is None:
weights = np.ones_like(wav)
# weights = np.gradient(wav)
# weights[weights > np.std(weights)] = 1
# Run the fitting and get the normalization
gstat, ynorm = u.goodness(flx1, flx2, err1, err2, weights)
# Run it again with the scaling removed
if scale:
gstat, _ = u.goodness(flx1, flx2 * ynorm, err1, err2 * ynorm, weights)
if plot:
fig = self.plot(best_fit=False)
fig.line(spec.wave, spec.flux * ynorm, legend_label='Fit')
show(fig)
return gstat, ynorm, xnorm
# def fit_blackbody(self, init=8000, epsilon=0.0001, acc=1, maxiter=500, **kwargs):
# """
# Fit a blackbody spectrum to the spectrum
#
# Returns
# -------
# int
# The best fit blackbody temperature
# """
# # Determine optimal parameters for data
# wav, flx, err = self.spectrum
#
# @models.custom_model
# def blackbody(wavelength, temperature=2000):
# wavelength *= q.um
# temperature *= q.K
#
# bb = models.BlackBody(temperature=temperature)
# flux = (bb(wavelength) * q.sr / bb.bolometric_flux.value).to(u.FLAM, q.spectral_density(wavelength)) * 1E-8
#
# max_val = blackbody_lambda((ac.b_wien / temperature).to(q.um), temperature).value
# return blackbody_lambda(wavelength, temperature).value / max_val
#
# bb = blackbody(temperature=init)
# fit = fitting.LevMarLSQFitter()
# bb_fit = fit(bb, wav.to(q.AA).value/10000, flx.to(u.FLAM).value)
# teff = int(bb_fit.temperature.value) * q.K
#
# self.message('{} blackbody fit to {}'.format(teff, self.name))
#
# fig = figure()
# fig.line(wav, flx)
# fig.line(wav, blackbody_lambda(wav.to(q.AA).value, teff), color='red')
# show(fig)
#
# return teff
@property
def flux(self):
"""Getter for the flux"""
return self._flux * self.const
@copy_raw
def flux_calibrate(self, distance, target_distance=10 * q.pc, flux_units=None):
"""Flux calibrate the spectrum from the given distance to the target distance
Parameters
----------
distance: astropy.unit.quantity.Quantity, sequence
The current distance or (distance, uncertainty) of the spectrum
target_distance: astropy.unit.quantity.Quantity
The distance to flux calibrate the spectrum to
flux_units: astropy.unit.quantity.Quantity
The desired flux units of the output
Returns
-------
sedkit.spectrum.Spectrum
The flux calibrated spectrum object
"""
# Set target flux units
if flux_units is None:
flux_units = self.flux_units
# Calculate the scaled flux
flux = (self.spectrum[1] * (distance[0] / target_distance)**2).to(flux_units)
# Calculate the scaled uncertainty
if self.unc is None:
unc = None
else:
term1 = (self.spectrum[2] * distance[0] / target_distance).to(flux_units)
term2 = (2 * self.spectrum[1] * (distance[1] * distance[0] / target_distance**2)).to(flux_units)
unc = np.sqrt(term1**2 + term2**2)
return Spectrum(self.spectrum[0], flux, unc, name=self.name)
@property
def flux_units(self):
"""A property for flux_units"""
return self._flux_units
@flux_units.setter
def flux_units(self, flux_units):
"""A setter for flux_units
Parameters
----------
flux_units: astropy.units.quantity.Quantity
The astropy units of the SED wavelength
"""
# Check the units
if not u.equivalent(flux_units, u.FLAM):
raise TypeError("flux_units must be in flux density units, e.g. 'erg/s/cm2/A'")
# Update the flux and unc arrays
self._flux = self._flux * self.flux_units.to(flux_units)
if self.unc is not None:
self._unc = self._unc * self.flux_units.to(flux_units)
# Set the flux_units
self._flux_units = flux_units
self._set_units()
def integrate(self, units=q.erg / q.s / q.cm**2):
"""Calculate the area under the spectrum
Parameters
----------
units: astropy.units.quantity.Quantity
The target units for the integral
Returns
-------
sequence
The integrated flux and uncertainty
"""
# Make sure the target units are flux units
if not u.equivalent(units, q.erg / q.s / q.cm**2):
raise TypeError("units must be in flux units, e.g. 'erg/s/cm2'")
# Calculate the factor for the given units
m = self.flux_units * self.wave_units
# Scrub the spectrum
spec = u.scrub(self.data)
val = (np.trapz(spec[1], x=spec[0]) * m).to(units)
if self.unc is None:
unc = None
else:
unc = np.sqrt(np.nansum((spec[2] * np.gradient(spec[0]) * m)**2)).to(units)
return val, unc
@copy_raw
def interpolate(self, wave):
"""Interpolate the spectrum to another wavelength array
Parameters
----------
wave: astropy.units.quantity.Quantity, sedkit.spectrum.Spectrum
The wavelength array to interpolate to
Returns
-------
sedkit.spectrum.Spectrum
The interpolated spectrum object
"""
# Pull out wave if its a Spectrum object
if hasattr(wave, 'spectrum'):
wave = wave.spectrum[0]
# Test units
if not u.equivalent(wave, q.um):
raise ValueError("New wavelength array must be in units of length.")
# Get the data and make into same wavelength units
w0 = self.wave * self.wave_units.to(wave.unit)
f0 = self.spectrum[1]
if len(self.spectrum) > 2:
e0 = self.spectrum[2]
else:
e0 = np.zeros_like(f0)
# Interpolate self to new wavelengths
f1 = np.interp(wave.value, w0, f0.value, left=np.nan, right=np.nan) * self.flux_units
e1 = np.interp(wave.value, w0, e0.value, left=np.nan, right=np.nan) * self.flux_units
return Spectrum(wave, f1, e1, name=self.name)
def message(self, msg, pre='[sedkit]'):
"""
Only print message if verbose=True
Parameters
----------
msg: str
The message to print
pre: str
The stuff to print before
"""
if self.verbose:
if pre is None:
print(msg)
else:
print("{} {}".format(pre, msg))
def mcmc_fit(self, model_grid, params=['teff'], walkers=5, steps=20, name=None, report=None):
"""
Produces a marginalized distribution plot of best fit parameters from the specified model_grid
Parameters
----------
model_grid: sedkit.modelgrid.ModelGrid
The model grid to use
params: list
The list of model grid parameters to fit
walkers: int
The number of walkers to deploy
steps: int
The number of steps for each walker to take
name: str
Name for the fit
plot: bool
Make plots
"""
# Specify the parameter space to be walked
for param in params:
if param not in model_grid.parameters:
raise ValueError("'{}' not a parameter in this model grid, {}".format(param, model_grid.parameters))
# A name for the fit
name = name or model_grid.name
# Ensure modelgrid and spectruym are the same wave_units
model_grid.wave_units = self.wave_units
# Set up the sampler object
self.sampler = mc.SpecSampler(self, model_grid, params)
# Run the mcmc method
self.sampler.mcmc_go(nwalk_mult=walkers, nstep_mult=steps)
# Save the chi-sq best fit
self.best_fit[name + ' (chi2)'] = self.sampler.spectrum.best_fit['best']
# Make plots
if report is not None:
self.sampler.plot_chains()
# Generate best fit spectrum the 50th quantile value
best_fit_params = {k: v for k, v in zip(self.sampler.all_params, self.sampler.all_quantiles.T[1])}
params_with_unc = self.sampler.get_error_and_unc()
for param, quant in zip(self.sampler.all_params, params_with_unc):
best_fit_params['{}_unc'.format(param)] = np.mean([quant[0], quant[2]])
# Add missing parameters
for param in model_grid.parameters:
if param not in best_fit_params:
best_fit_params[param] = getattr(model_grid, '{}_vals'.format(param))[0]
# Get best fit model and scale to spectrum
model = model_grid.get_spectrum(**{param: best_fit_params[param] for param in model_grid.parameters})
model = model.norm_to_spec(self)
model.phot = model_grid.phot
# Make dict for best fit model
best_fit_params['label'] = model.name
best_fit_params['filepath'] = None
best_fit_params['spectrum'] = np.array(model.spectrum)
best_fit_params['full_model'] = model
best_fit_params['const'] = 1.
best_fit_params['fit_to'] = 'phot' if model_grid.phot else 'spec'
self.best_fit[name] = best_fit_params
@copy_raw
def norm_to_mags(self, photometry, force=False, exclude=[], include=[]):
"""
Normalize the spectrum to the given bandpasses
Parameters
----------
photometry: astropy.table.QTable
A table of the photometry
force: bool
Force the normalization even if bandpass is not completely covered by spectrum
exclude: sequence (optional)
A list of bands to exclude from the normalization
include: sequence (optional)
A list of bands to include in the normalization
Returns
-------
sedkit.spectrum.Spectrum
The normalized spectrum object
"""
# Default norm
norm = 1
# Compile list of photometry to include
keep = []
for band in photometry['band']:
# Keep only explicitly included bands...
if include:
if band in include:
| |
<reponame>jordiyeh/safrs
#Failed to get col type for columns_priv.Column_priv
#Failed to get col type for event.sql_mode
#Failed to get col type for general_log.user_host
#Failed to get col type for proc.sql_mode
#Failed to get col type for procs_priv.Proc_priv
#Failed to get col type for slow_log.user_host
#Failed to get col type for tables_priv.Table_priv
#Failed to get col type for tables_priv.Column_priv
# coding: utf-8
from sqlalchemy import BIGINT, CHAR, Column, DateTime, Enum, Float, INTEGER, LargeBinary, SMALLINT, String, TEXT, TIME, TIMESTAMP, Table, Text, text
from sqlalchemy.dialects.mysql.enumerated import ENUM, SET
from sqlalchemy.dialects.mysql.types import LONGBLOB, MEDIUMBLOB, MEDIUMTEXT, TINYINT
from sqlalchemy.ext.declarative import declarative_base
########################################################################################################################
# Manually Added for safrs, TODO: improve this crap
#
Base = db.Model
metadata = Base.metadata
def BIGINT(_):
return db.SMALLINT
def SMALLINT(_):
return db.SMALLINT
def INTEGER(_):
return db.INTEGER
def TIME(**kwargs):
return db.TIME
TIMESTAMP= db.TIMESTAMP
NullType = db.String
########################################################################################################################
class ColumnsPriv(SAFRSBase, Base):
__tablename__ = 'columns_priv'
Host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Db = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
User = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Table_name = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Column_name = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Timestamp = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
Column_priv = Column(SET, nullable=False, server_default=text("''"))
class Db(SAFRSBase, Base):
__tablename__ = 'db'
Host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Db = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
User = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, index=True, server_default=text("''"))
Select_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Insert_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Update_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Delete_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Drop_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Grant_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
References_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Index_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Alter_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_tmp_table_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Lock_tables_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_view_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Show_view_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_routine_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Alter_routine_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Execute_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Event_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Trigger_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
class EngineCost(SAFRSBase, Base):
__tablename__ = 'engine_cost'
engine_name = Column(String(64), primary_key=True, nullable=False)
device_type = Column(INTEGER(11), primary_key=True, nullable=False)
cost_name = Column(String(64), primary_key=True, nullable=False)
cost_value = Column(Float)
last_update = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
comment = Column(String(1024))
class Event(SAFRSBase, Base):
__tablename__ = 'event'
db = Column(CHAR(64), primary_key=True, nullable=False, server_default=text("''"))
name = Column(CHAR(64), primary_key=True, nullable=False, server_default=text("''"))
body = Column(LONGBLOB, nullable=False)
definer = Column(CHAR(93), nullable=False, server_default=text("''"))
execute_at = Column(DateTime)
interval_value = Column(INTEGER(11))
interval_field = Column(Enum('YEAR', 'QUARTER', 'MONTH', 'DAY', 'HOUR', 'MINUTE', 'WEEK', 'SECOND', 'MICROSECOND', 'YEAR_MONTH', 'DAY_HOUR', 'DAY_MINUTE', 'DAY_SECOND', 'HOUR_MINUTE', 'HOUR_SECOND', 'MINUTE_SECOND', 'DAY_MICROSECOND', 'HOUR_MICROSECOND', 'MINUTE_MICROSECOND', 'SECOND_MICROSECOND'))
created = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
modified = Column(TIMESTAMP, nullable=False, server_default=text("'0000-00-00 00:00:00'"))
last_executed = Column(DateTime)
starts = Column(DateTime)
ends = Column(DateTime)
status = Column(Enum('ENABLED', 'DISABLED', 'SLAVESIDE_DISABLED'), nullable=False, server_default=text("'ENABLED'"))
on_completion = Column(Enum('DROP', 'PRESERVE'), nullable=False, server_default=text("'DROP'"))
sql_mode = Column(SET, nullable=False, server_default=text("''"))
comment = Column(CHAR(64), nullable=False, server_default=text("''"))
originator = Column(INTEGER(10), nullable=False)
time_zone = Column(CHAR(64), nullable=False, server_default=text("'SYSTEM'"))
character_set_client = Column(CHAR(32))
collation_connection = Column(CHAR(32))
db_collation = Column(CHAR(32))
body_utf8 = Column(LONGBLOB)
class Func(SAFRSBase, Base):
__tablename__ = 'func'
name = Column(CHAR(64, 'utf8_bin'), primary_key=True, server_default=text("''"))
ret = Column(TINYINT(1), nullable=False, server_default=text("'0'"))
dl = Column(CHAR(128, 'utf8_bin'), nullable=False, server_default=text("''"))
type = Column(ENUM('function', 'aggregate'), nullable=False)
t_general_log = Table(
'general_log', metadata,
#Column('event_time', TIMESTAMP(fsp=6), nullable=False, server_default=text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)")),
#
#MANUAL EDIT:
Column('event_time', TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)")),
Column('user_host', MEDIUMTEXT, nullable=False),
Column('thread_id', BIGINT(21), nullable=False),
Column('server_id', INTEGER(10), nullable=False),
Column('command_type', String(64), nullable=False),
Column('argument', MEDIUMBLOB, nullable=False)
)
class GtidExecuted(SAFRSBase, Base):
__tablename__ = 'gtid_executed'
source_uuid = Column(CHAR(36), primary_key=True, nullable=False)
interval_start = Column(BIGINT(20), primary_key=True, nullable=False)
interval_end = Column(BIGINT(20), nullable=False)
class HelpCategory(SAFRSBase, Base):
__tablename__ = 'help_category'
help_category_id = Column(SMALLINT(5), primary_key=True)
name = Column(CHAR(64), nullable=False, unique=True)
parent_category_id = Column(SMALLINT(5))
url = Column(Text, nullable=False)
class HelpKeyword(SAFRSBase, Base):
__tablename__ = 'help_keyword'
help_keyword_id = Column(INTEGER(10), primary_key=True)
name = Column(CHAR(64), nullable=False, unique=True)
class HelpRelation(SAFRSBase, Base):
__tablename__ = 'help_relation'
help_topic_id = Column(INTEGER(10), primary_key=True, nullable=False)
help_keyword_id = Column(INTEGER(10), primary_key=True, nullable=False)
class HelpTopic(SAFRSBase, Base):
__tablename__ = 'help_topic'
help_topic_id = Column(INTEGER(10), primary_key=True)
name = Column(CHAR(64), nullable=False, unique=True)
help_category_id = Column(SMALLINT(5), nullable=False)
description = Column(Text, nullable=False)
example = Column(Text, nullable=False)
url = Column(Text, nullable=False)
class InnodbIndexStat(SAFRSBase, Base):
__tablename__ = 'innodb_index_stats'
database_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
table_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
index_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
last_update = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
stat_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
stat_value = Column(BIGINT(20), nullable=False)
sample_size = Column(BIGINT(20))
stat_description = Column(String(1024, 'utf8_bin'), nullable=False)
class InnodbTableStat(SAFRSBase, Base):
__tablename__ = 'innodb_table_stats'
database_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
table_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
last_update = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
n_rows = Column(BIGINT(20), nullable=False)
clustered_index_size = Column(BIGINT(20), nullable=False)
sum_of_other_index_sizes = Column(BIGINT(20), nullable=False)
class NdbBinlogIndex(SAFRSBase, Base):
__tablename__ = 'ndb_binlog_index'
Position = Column(BIGINT(20), nullable=False)
File = Column(String(255), nullable=False)
epoch = Column(BIGINT(20), primary_key=True, nullable=False)
inserts = Column(INTEGER(10), nullable=False)
updates = Column(INTEGER(10), nullable=False)
deletes = Column(INTEGER(10), nullable=False)
schemaops = Column(INTEGER(10), nullable=False)
orig_server_id = Column(INTEGER(10), primary_key=True, nullable=False)
orig_epoch = Column(BIGINT(20), primary_key=True, nullable=False)
gci = Column(INTEGER(10), nullable=False)
next_position = Column(BIGINT(20), nullable=False)
next_file = Column(String(255), nullable=False)
class Plugin(SAFRSBase, Base):
__tablename__ = 'plugin'
name = Column(String(64), primary_key=True, server_default=text("''"))
dl = Column(String(128), nullable=False, server_default=text("''"))
class Proc(SAFRSBase, Base):
__tablename__ = 'proc'
db = Column(CHAR(64), primary_key=True, nullable=False, server_default=text("''"))
name = Column(CHAR(64), primary_key=True, nullable=False, server_default=text("''"))
type = Column(Enum('FUNCTION', 'PROCEDURE'), primary_key=True, nullable=False)
specific_name = Column(CHAR(64), nullable=False, server_default=text("''"))
language = Column(Enum('SQL'), nullable=False, server_default=text("'SQL'"))
sql_data_access = Column(Enum('CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA'), nullable=False, server_default=text("'CONTAINS_SQL'"))
is_deterministic = Column(Enum('YES', 'NO'), nullable=False, server_default=text("'NO'"))
security_type = Column(Enum('INVOKER', 'DEFINER'), nullable=False, server_default=text("'DEFINER'"))
param_list = Column(LargeBinary, nullable=False)
returns = Column(LONGBLOB, nullable=False)
body = Column(LONGBLOB, nullable=False)
definer = Column(CHAR(93), nullable=False, server_default=text("''"))
created = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
modified = Column(TIMESTAMP, nullable=False, server_default=text("'0000-00-00 00:00:00'"))
sql_mode = Column(SET, nullable=False, server_default=text("''"))
comment = Column(TEXT, nullable=False)
character_set_client = Column(CHAR(32))
collation_connection = Column(CHAR(32))
db_collation = Column(CHAR(32))
body_utf8 = Column(LONGBLOB)
class ProcsPriv(SAFRSBase, Base):
__tablename__ = 'procs_priv'
Host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Db = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
User = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Routine_name = Column(CHAR(64), primary_key=True, nullable=False, server_default=text("''"))
Routine_type = Column(ENUM('FUNCTION', 'PROCEDURE'), primary_key=True, nullable=False)
Grantor = Column(CHAR(93, 'utf8_bin'), nullable=False, index=True, server_default=text("''"))
Proc_priv = Column(SET, nullable=False, server_default=text("''"))
Timestamp = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class ProxiesPriv(SAFRSBase, Base):
__tablename__ = 'proxies_priv'
Host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
User = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Proxied_host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Proxied_user = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
With_grant = Column(TINYINT(1), nullable=False, server_default=text("'0'"))
Grantor = Column(CHAR(93, 'utf8_bin'), nullable=False, index=True, server_default=text("''"))
Timestamp = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class ServerCost(SAFRSBase, Base):
__tablename__ = 'server_cost'
cost_name = Column(String(64), primary_key=True)
cost_value = Column(Float)
last_update = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
comment = Column(String(1024))
class Server(SAFRSBase, Base):
__tablename__ = 'servers'
Server_name = Column(CHAR(64), primary_key=True, server_default=text("''"))
Host = Column(CHAR(64), nullable=False, server_default=text("''"))
Db = Column(CHAR(64), nullable=False, server_default=text("''"))
Username = Column(CHAR(64), nullable=False, server_default=text("''"))
Password = Column(CHAR(64), nullable=False, server_default=text("''"))
Port = Column(INTEGER(4), nullable=False, server_default=text("'0'"))
Socket = Column(CHAR(64), nullable=False, server_default=text("''"))
Wrapper = Column(CHAR(64), nullable=False, server_default=text("''"))
Owner = Column(CHAR(64), nullable=False, server_default=text("''"))
class SlaveMasterInfo(SAFRSBase, Base):
__tablename__ = 'slave_master_info'
Number_of_lines = Column(INTEGER(10), nullable=False)
Master_log_name = Column(TEXT, nullable=False)
Master_log_pos = Column(BIGINT(20), nullable=False)
Host = Column(CHAR(64))
User_name = Column(TEXT)
User_password = Column(TEXT)
Port = Column(INTEGER(10), nullable=False)
Connect_retry = Column(INTEGER(10), nullable=False)
Enabled_ssl = Column(TINYINT(1), nullable=False)
Ssl_ca = Column(TEXT)
Ssl_capath = Column(TEXT)
Ssl_cert = Column(TEXT)
Ssl_cipher = Column(TEXT)
Ssl_key = Column(TEXT)
Ssl_verify_server_cert = Column(TINYINT(1), nullable=False)
Heartbeat = Column(Float, nullable=False)
Bind = Column(TEXT)
Ignored_server_ids = Column(TEXT)
Uuid = Column(TEXT)
Retry_count = Column(BIGINT(20), nullable=False)
Ssl_crl = Column(TEXT)
Ssl_crlpath = Column(TEXT)
Enabled_auto_position = Column(TINYINT(1), nullable=False)
Channel_name = Column(CHAR(64), primary_key=True)
Tls_version = Column(TEXT)
class SlaveRelayLogInfo(SAFRSBase, Base):
__tablename__ = 'slave_relay_log_info'
Number_of_lines = Column(INTEGER(10), nullable=False)
Relay_log_name = Column(TEXT, nullable=False)
Relay_log_pos = Column(BIGINT(20), nullable=False)
Master_log_name = Column(TEXT, nullable=False)
Master_log_pos = Column(BIGINT(20), nullable=False)
Sql_delay = Column(INTEGER(11), nullable=False)
Number_of_workers = Column(INTEGER(10), nullable=False)
Id = Column(INTEGER(10), nullable=False)
Channel_name = Column(CHAR(64), primary_key=True)
class SlaveWorkerInfo(SAFRSBase, Base):
__tablename__ = 'slave_worker_info'
Id = Column(INTEGER(10), primary_key=True, nullable=False)
Relay_log_name = Column(TEXT, nullable=False)
Relay_log_pos = Column(BIGINT(20), nullable=False)
Master_log_name = Column(TEXT, nullable=False)
Master_log_pos = Column(BIGINT(20), nullable=False)
Checkpoint_relay_log_name = Column(TEXT, nullable=False)
Checkpoint_relay_log_pos = Column(BIGINT(20), nullable=False)
Checkpoint_master_log_name = Column(TEXT, nullable=False)
Checkpoint_master_log_pos = Column(BIGINT(20), nullable=False)
Checkpoint_seqno = Column(INTEGER(10), nullable=False)
Checkpoint_group_size = Column(INTEGER(10), nullable=False)
Checkpoint_group_bitmap = Column(LargeBinary, nullable=False)
Channel_name = Column(CHAR(64), primary_key=True, nullable=False)
t_slow_log = Table(
'slow_log', metadata,
#Column('start_time', TIMESTAMP(fsp=6), nullable=False, server_default=text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)")),
#
#Manual Edit:
Column('start_time', TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)")),
Column('user_host', MEDIUMTEXT, | |
import argparse
import os
import random
import shutil
import time
import warnings
import numpy as np
from progress.bar import (Bar, IncrementalBar)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import folder2lmdb
import CustomBatchSampler
import cv2
from models.voc.mbv2_yolo import yolo
from models.voc.yolo_loss import *
from utils import Bar, Logger, AverageMeter
from utils.eval_mAP import *
from pprint import PrettyPrinter
import yaml
import nni
from nni.utils import merge_parameter
from nni.trial import get_sequence_id
from nni.trial import get_trial_id
pp = PrettyPrinter()
from torch.utils.tensorboard import SummaryWriter
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
def main(args):
#print('NNI_OUTPUT_DIR',os.environ["NNI_OUTPUT_DIR"])
#writer = SummaryWriter(os.environ["NNI_OUTPUT_DIR"]+'/tensorboard/')
if 'NNI_OUTPUT_DIR' not in os.environ:
writer = SummaryWriter('tensorboard/')
else:
writer = SummaryWriter(os.environ["NNI_OUTPUT_DIR"]+'/tensorboard/')
with open('models/voc/config.yaml', 'r') as f:
config = yaml.load(f)
with open('data/voc_data.yaml', 'r') as f:
dataset_path = yaml.load(f)
if args.ignore_thresh_1 != None :
config["yolo"]["ignore_thresh"][0] = args.ignore_thresh_1
if args.ignore_thresh_2 != None :
config["yolo"]["ignore_thresh"][1] = args.ignore_thresh_2
if args.iou_thresh != None :
config["yolo"]["iou_thresh"] = args.iou_thresh
if args.expand_scale != None :
config["expand_scale"] = args.expand_scale
if args.mosaic_num != None :
config["mosaic_num"] = args.mosaic_num
if args.iou_weighting != None :
config["iou_weighting"] = args.iou_weighting
print(config)
best_acc = 0 # best test accuracy
#args = parser.parse_args()
start_epoch = 0
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
image_folder = folder2lmdb.ImageFolderLMDB
train_dataset = image_folder(
db_path=dataset_path["trainval_dataset_path"]["lmdb"],
transform_size=config["train_img_size"],
phase='train',batch_size = config["batch_size"],
expand_scale=config["expand_scale"]
)
test_dataset = image_folder(
db_path=dataset_path["test_dataset_path"]["lmdb"],
transform_size=[[config["img_w"],config["img_h"]]],
phase='test',batch_size = config["batch_size"]
)
BatchSampler = CustomBatchSampler.GreedyBatchSampler
sampler = BatchSampler (
torch.utils.data.sampler.RandomSampler(train_dataset),
batch_size=config["batch_size"],
drop_last=False,sample=config["mosaic_num"])
train_loader = torch.utils.data.DataLoader(
train_dataset,batch_sampler = sampler,
num_workers=4, pin_memory=True,collate_fn=train_dataset.collate_fn,
worker_init_fn=seed_worker)
test_loader = torch.utils.data.DataLoader(
test_dataset, config["batch_size"], shuffle=False,
num_workers=4, pin_memory=True,collate_fn=test_dataset.collate_fn)
model = yolo(config=config)
#model_for_graph = yolo_graph(config=config)
#input = torch.randn(1, 3, 352, 352)
#writer.add_graph(model_for_graph,input)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.cuda()
# Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
biases = list()
not_biases = list()
params = model.parameters()
optimizer = optim.AdamW(params=params,lr = args.learning_rate,weight_decay= args.weight_decay)
if not os.path.exists(args.checkpoint):
os.makedirs(args.checkpoint)
title = 'voc-training-process'
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
print(args.resume)
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
model.yolo_losses[0].val_conf = checkpoint['conf']
model.yolo_losses[1].val_conf = checkpoint['conf']
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
#for param_group in optimizer.param_groups:
# param_group['lr'] = args.lr
else:
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Epoch ', 'Loss ', 'Precision ', 'Time ', 'IOU ', 'Learning Rate'])
test_acc = 0
if args.evaluate:
for epoch in range(1):
test_acc = test(test_loader, model, optimizer, epoch , config)
return
#ls = len(args.warm_up)
for epoch in range(start_epoch, args.epochs):
if epoch in args.warm_up:
adjust_learning_rate(optimizer, 0.5)
for epoch in range(start_epoch, args.epochs):
# train for one epoch
if epoch in args.warm_up:
adjust_learning_rate(optimizer, 2)
if epoch in args.schedule:
#load_best_checkpoint(model=model, save_path=args.save_path)
save_checkpoint({
'epoch': epoch ,
'model': model.state_dict(),
'acc': test_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
'conf' : model.yolo_losses[0].val_conf,
}, False,model,config, checkpoint=args.checkpoint,filename='epoch%d_checkpoint.pth.tar'%epoch,export_path = args.export)
adjust_learning_rate(optimizer, 0.5)
print('adjusted to current lr: '
'{}'.format([param_group['lr'] for param_group in optimizer.param_groups]))
log = False
if epoch%2 == 0 :
log = True
st = time.time()
print('\nEpoch: [%3d | %3d] LR: %f | loss | cnt | iou | obj | no_obj | class | recall | cnt2 | iou2 | obj2 | no_obj2 | class2 | recall2 |' \
% (epoch, args.epochs, optimizer.param_groups[0]['lr']))
train_loss,iou = train(train_loader, model, optimizer, epoch,sampler)
writer.add_scalar('Loss/train', train_loss, epoch)
writer.add_scalar('iou/train', iou, epoch)
if not log :
test_acc = test(test_loader, model, optimizer, epoch , config)
nni.report_intermediate_result(test_acc)
logger.append([epoch + 1, train_loss , test_acc, time.time()-st,iou, optimizer.param_groups[0]['lr']])
# save model
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'model': model.state_dict(),
'acc': test_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
'conf' : model.yolo_losses[0].val_conf,
}, is_best,model,config, checkpoint=args.checkpoint,export_path = args.export)
writer.add_scalar('Accuracy/test', test_acc, epoch+ 1)
nni.report_final_result(best_acc)
def train(train_loader, model, optimizer,epoch,sampler):
model.train()
bar = IncrementalBar('Training', max=len(sampler),width=12)
#batch_time = AverageMeter()
#data_time = AverageMeter()
losses = AverageMeter()
recall = [AverageMeter(),AverageMeter()]
iou = [AverageMeter(),AverageMeter()]
obj = [AverageMeter(),AverageMeter()]
no_obj = [AverageMeter(),AverageMeter()]
conf_loss = [AverageMeter(),AverageMeter()]
cls_loss = [AverageMeter(),AverageMeter()]
cls_score = [AverageMeter(),AverageMeter()]
count = [AverageMeter(),AverageMeter()]
#end = time.time()
for batch_idx, (images,targets,total_num) in enumerate(train_loader):
#print('\n1-',sum(sampler.get_mosaic_array()),'\n')
#print('1-',sampler.mosaic_array,'\n')
#print(targets)
#data_time.update(time.time() - end)
bs = images.size(0)
#print(images.shape)
#print(i,targets[0])
optimizer.zero_grad()
images = images.to(device) # (batch_size (N), 3, H, W)
outputs = model(images,targets)
#losses0 = yolo_losses[0](outputs[0],targets)
#losses1 = yolo_losses[1](outputs[1],targets)
t_loss = list()
for i,l in enumerate(outputs):
#print(l[0])
t_loss.append(l[0])
recall[i].update(l[1])
iou[i].update(l[2])
obj[i].update(l[3])
no_obj[i].update(l[4])
cls_score[i].update(l[5])
count[i].update(l[6])
#conf_loss.update(l[5])
#cls_loss.update(l[6])
loss = sum(t_loss)
losses.update(loss.item(),bs)
loss.backward()
optimizer.step()
# measure elapsed time
#batch_time.update(time.time() - end)
#end = time.time()
bar.suffix = \
'%(percent)3d%% | {total:} | {loss:.4f} | {cnt1:2.1f} | {iou1:.3f} | {obj1:.3f} | {no_obj1:.4f} | {cls1:.3f} | {rec1:.3f} | {cnt2:2.1f} | {iou2:.3f} | {obj2:.3f} | {no_obj2:.4f} | {cls2:.3f} | {rec2:.3f} |'\
.format(
#batch=batch_idx + 1,
#size=len(train_loader),
#data=data_time.avg,
#bt=batch_time.avg,
total=bar.elapsed_td,
loss=losses.avg,
#loss1=losses[0].avg,
#loss2=losses[1].avg,
cnt1=(count[0].avg),
cnt2=(count[1].avg),
#recall=recall.avg,
iou1=iou[0].avg,
iou2=iou[1].avg,
obj1=obj[0].avg,
no_obj1=no_obj[0].avg,
cls1=cls_score[0].avg,
obj2=obj[1].avg,
no_obj2=no_obj[1].avg,
cls2=cls_score[1].avg,
rec1=recall[0].avg,
rec2=recall[1].avg,
#cls=cls_loss.avg,
)
bar.next(total_num)
bar.finish()
return losses.avg,(iou[0].avg+iou[1].avg)/2
def test(test_loader, model, optimizer,epoch , config):
# switch to evaluate mode
model.eval()
n_classes = config['yolo']['classes'];
end = time.time()
#bar = Bar('Validating', max=len(test_loader))
bar = IncrementalBar('Validating', max=len(test_loader),width=32)
#for batch_idx, (inputs, targets) in enumerate(testloader):
n_gt = [0]*n_classes
correct = [0]*n_classes
n_pred = [0]*n_classes
n_iou = [0]*n_classes
n_images = 0
det_boxes = list()
det_labels = list()
det_scores = list()
true_boxes = list()
true_labels = list()
true_difficulties = list()
gt_box = 0
pred_box = 0
for batch_idx, (images,targets) in enumerate(test_loader):
images = images.to(device) # (batch_size (N), 3, H, W)
labels = [torch.Tensor(l).to(device) for l in targets]
bs = len(labels)
# compute output
with torch.no_grad():
detections = model(images) # (N, num_defaultBoxes, 4), (N, num_defaultBoxes, n_classes)
for sample_i in range(bs):
# Get labels for sample where width is not zero (dummies)
# print(len(labels[0]),labels[sample_i])
target_sample = labels[sample_i]
gt_box = gt_box + len(target_sample)
tx1, tx2 = torch.unsqueeze((target_sample[...,1] - target_sample[...,3] / 2),1), torch.unsqueeze((target_sample[...,1] + target_sample[...,3] / 2),1)
ty1, ty2 = torch.unsqueeze((target_sample[...,2] - target_sample[...,4] / 2),1), torch.unsqueeze((target_sample[...,2] + target_sample[...,4] / 2),1)
box = torch.cat((tx1,ty1,tx2,ty2),1)
size = target_sample.size(0)
true_boxes.append(box)
true_labels.append(target_sample[...,0])
true_difficulties.append(torch.zeros(size, requires_grad=False))
#print(detections[0][sample_i].shape,detections[1][sample_i].shape)
preds = detections[sample_i]
pred_box = pred_box + len(preds)
if preds is not None:
det_boxes.append(preds[...,:4])
det_labels.append((preds[...,6]+1).to(device))
conf = (preds[...,4] * preds[...,5]).to(device)
det_scores.append(conf)
else :
empty = torch.empty(0).to(device)
det_boxes.append(empty)
det_labels.append(empty)
det_scores.append(empty)
n_images = n_images + 1
# measure elapsed time
sum_gt = sum(n_gt)
sum_n_pred= sum(n_pred)
# plot progress
bar.suffix = '({batch}/{size}) | Total: {total:} | ETA: {eta:}| n_img: {n_img:} | gt_box: {gt_box:} | pred_box: {pred_box:}'.format(
batch=batch_idx + 1,
size=len(test_loader),
total=bar.elapsed_td,
eta=bar.eta_td,
n_img=n_images,
gt_box=gt_box,
pred_box=pred_box
)
bar.next()
bar.finish()
print("\nVal conf. is %f\n" % (model.yolo_losses[0].val_conf))
model.yolo_losses[0].val_conf = adjust_confidence(gt_box,pred_box,model.yolo_losses[0].val_conf)
model.yolo_losses[1].val_conf = adjust_confidence(gt_box,pred_box,model.yolo_losses[1].val_conf)
# Calculate mAP
APs, mAP, TP, FP = calculate_mAP(det_boxes, det_labels, det_scores, true_boxes, true_labels, true_difficulties, n_classes=21)
pp.pprint(APs)
print('\nMean Average Precision (mAP): %.3f' % mAP)
return mAP
def save_checkpoint(state, is_best,model,config, checkpoint='checkpoint', filename='checkpoint.pth.tar',export_path = 'checkpoint'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
#save_onnx(filepath,model)
if is_best:
torch.save(model, os.path.join(checkpoint, 'model_best.pth.tar'))
#dummy_input = torch.randn(1, 3, config["img_w"], config["img_h"]) #
#torch.onnx.export(model, dummy_input,os.path.join(export_path, 'model_best.onnx'))
def adjust_confidence(gt_box_num,pred_box_num,conf):
if pred_box_num>gt_box_num*3 :
conf = conf + 0.01
elif pred_box_num<gt_box_num*2 and conf>0.01:
conf = conf - 0.01
return conf
def adjust_learning_rate(optimizer, scale):
"""
Scale learning rate by a specified factor.
:param optimizer: optimizer whose learning rate must be shrunk.
:param scale: factor to multiply learning rate with.
"""
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * scale
print("Change learning rate.\n The new LR is %f\n" % (optimizer.param_groups[0]['lr']))
def get_params():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=0.0004, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--learning_rate', default=0.0007, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--warm-up', '--warmup', default=[], type=float,
metavar='warmup', help='warm up learning rate')
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--schedule', type=int, nargs='+', default=[100,170,240],
help='Decrease learning rate at these epochs.')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
#parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
# help='evaluate model on validation set')
| |
NotAVerb(Exception):
pass
class NotAWord(Exception):
pass
def _from_idn(self, idn):
"""
Construct a Word from its idn.
:type idn: Number
"""
assert isinstance(idn, Number)
self.lex.populate_word_from_idn(self, idn)
# TODO: Do something with return value?
# NOTE: If this returned True, it already called populate_from_word()
# and so the word now exists()
def _from_definition(self, txt):
"""
Construct a Word from its name, aka its definition txt.
That is, look for a word with
sbj=lex
vrb=define
obj=(can be anything)
txt=whatever
"""
assert Text.is_valid(txt)
assert isinstance(self.lex, LexSentence)
if not self.lex.populate_word_from_definition(self, txt):
self._fields = dict(
txt=Text(txt)
)
def _from_word(self, other):
assert isinstance(other, Word) # Not necessarily type(self)
assert self.lex == other.lex
assert self.lex is other.lex
assert isinstance(self, other.lex.word_class)
assert isinstance(other, self.lex.word_class)
# noinspection PyProtectedMember
if other._is_inchoate:
self._inchoate(other.idn)
else:
assert other.exists()
self.set_idn_if_you_really_have_to(other.idn)
self._from_idn(other.idn)
# TODO: Why not copy-construct a choate other into an inchoate self?
# (Find out whether this populated self is now choate.)
def _from_sbj_vrb_obj(self):
"""Construct a word by looking up its subject-verb-object."""
assert isinstance(self.sbj, Word)
assert isinstance(self.vrb, Word)
assert isinstance(self.obj, Word)
self.lex.populate_word_from_sbj_vrb_obj(
self,
self.sbj,
self.vrb,
self.obj
)
def _from_sbj_vrb_obj_num_txt(self):
"""Construct a word by looking up its subject-verb-object and its num and txt."""
assert isinstance(self.sbj, Word)
assert isinstance(self.vrb, Word)
assert isinstance(self.obj, Word)
assert isinstance(self.num, Number)
assert isinstance(self.txt, Text)
self.lex.populate_word_from_sbj_vrb_obj_num_txt(
self,
self.sbj,
self.vrb,
self.obj,
self.num,
self.txt
)
def inchoate_copy(self):
"""
Word clones itself but the copy is inchoate.
Useful for words as dictionary keys.
"""
return self.lex[self.idn]
def populate_from_word(self, word):
word_dict = dict(
idn=word.idn,
sbj=word.sbj.idn,
vrb=word.vrb.idn,
obj=word.obj.idn,
num=word.num,
txt=word.txt,
whn=word.whn,
)
self.populate_from_row(word_dict)
def populate_from_row(self, row, prefix=''):
assert isinstance(row[prefix + 'idn'], Number)
assert isinstance(row[prefix + 'sbj'], Number), type_name(row[prefix + 'sbj'])
assert isinstance(row[prefix + 'vrb'], Number)
assert isinstance(row[prefix + 'obj'], Number)
assert isinstance(row[prefix + 'num'], Number)
assert isinstance(row[prefix + 'txt'], Text)
assert isinstance(row[prefix + 'whn'], Number)
self.set_idn_if_you_really_have_to(row[prefix + 'idn'])
self._now_it_exists()
# NOTE: Is this comment on the _now_it_exists() call obsolete?
# Must come before spawn(sbj) for lex's sake.
self._fields = dict(
sbj=self.lex[row[prefix + 'sbj']],
vrb=self.lex[row[prefix + 'vrb']],
obj=self.lex[row[prefix + 'obj']],
num=row[prefix + 'num'],
txt=row[prefix + 'txt'],
whn=row[prefix + 'whn'],
)
def populate_from_num_txt(self, num, txt):
assert isinstance(txt, Text), "Need Text, not a {t}: `{r}'".format(
t=type_name(txt),
r=repr(txt)
)
assert isinstance(num, Number)
self._now_it_exists()
self._fields = dict(
num=num,
txt=txt,
)
def is_a(self, word, reflexive=True, recursion=10):
assert recursion >= 0
if reflexive and self.idn == word.idn:
return True
if recursion <= 0:
return False
if not self.exists():
return False
if not hasattr(self, 'vrb'):
return False
if self.vrb.idn != self.lex.IDN_DEFINE:
return False
if self.obj == word:
return True
parent = self.lex[self.obj]
if parent.idn == self.idn:
return False
return parent.is_a(word, reflexive=reflexive, recursion=recursion-1)
def is_a_noun(self, reflexive=True, **kwargs):
"""Noun is a noun. Really, everything is a noun."""
assert hasattr(self, 'lex')
return self.is_a(self.lex.noun(), reflexive=reflexive, **kwargs)
def is_a_verb(self, reflexive=False, **kwargs):
"""Verb is not a verb. But anything defined as a verb is a verb."""
assert hasattr(self, 'lex')
return self.is_a(self.lex.verb(), reflexive=reflexive, **kwargs)
def is_define(self):
"""Is this word the one and only verb (whose txt is) 'define'."""
return self.idn == self.lex.IDN_DEFINE
def is_defined(self):
"""
Test whether a word is the product of a definition.
That is, whether the sentence that creates it uses the verb 'define'.
"""
return self.vrb is not None and self.vrb.idn == self.lex.IDN_DEFINE
def is_noun(self):
return self.idn == self.lex.IDN_NOUN
def is_verb(self):
"""
Not to be confused with is_a_verb().
is_a_verb() -- is this word in a []-(define)-[verb] sentence, recursively.
is_verb() -- is this the one-and-only "verb" word,
i.e. [lex](define, "verb")[noun],
i.e. idn == IDN_VERB
"""
return self.idn == self.lex.IDN_VERB
def is_agent(self):
return self.idn == self.lex.IDN_AGENT
def is_lex(self):
# return isinstance(self, Lex) and self.exists() and self.idn == self.lex.IDN_LEX
return self.exists() and self.idn == self.lex.IDN_LEX
def description(self):
return u"[{sbj}]({vrb}{maybe_num}{maybe_txt})[{obj}]".format(
sbj=str(self.sbj),
vrb=str(self.vrb),
obj=str(self.obj),
# TODO: Would str(x) cause infinite recursion?
# Not if str() does not call description()
maybe_num=(", " + self.presentable(self.num)) if self.num != 1 else "",
maybe_txt=(", " + repr(self.txt)) if self.txt != '' else "",
)
def to_dict(self):
"""Expose all 7 properties of a word as a dict."""
d = dict(
idn=self.idn,
sbj=self.sbj.idn,
vrb=self.vrb.idn,
obj=self.obj.idn,
whn=float(self.whn),
)
if self.txt != "":
d['txt'] = self.txt
if self.num != 1:
d['num'] = self.num
# FALSE WARNING: Unresolved attribute reference 'jbo' for class 'Word'
# noinspection PyUnresolvedReferences
if hasattr(self, 'jbo') and len(self.jbo) > 0:
d['jbo'] = self.jbo
return d
def to_json(self):
"""
A little help converting words to JSON.
SEE: The test with the BetterJson class in test_word.py
"""
d = self.to_dict()
del d['whn'] # TODO: Do we want whn fields in JSON or not?!?
return d
@staticmethod
def presentable(num):
if num.is_suffixed() or not num.is_reasonable():
return num.qstring()
try:
is_it_whole = num.is_whole()
except TypeError: # Number.WholeError:
return num.qstring()
else:
if is_it_whole:
return str(int(num))
else:
return str(float(num))
def __format__(self, format_spec):
# THANKS: format > repr > str, https://stackoverflow.com/a/40600544/673991
if format_spec == '':
return repr(self)
else:
return "Word({})".format(",".join(self._word_attributes(format_spec)))
def _word_attributes(self, format_spec):
for c in format_spec:
if c == 'i': yield "idn={}".format(self.presentable(self.idn))
elif c == 's': yield "sbj={}".format(str(self.sbj))
elif c == 'v': yield "vrb={}".format(str(self.vrb))
elif c == 'o': yield "obj={}".format(str(self.obj))
elif c == 't': yield "txt='{}'".format(str(self.txt))
elif c == 'n': yield "num={}".format(self.presentable(self.num))
elif c == 'w': yield "whn={}".format(str(TimeLex()[self.whn].txt))
else:
raise ValueError("'{}' unknown in .format(word)".format(c))
def __repr__(self):
# THANKS: repr() conventions,
# https://codingkilledthecat.wordpress.com/2012/06/03/please-dont-abuse-repr/
if self.exists():
if self.is_defined() and self.txt:
# TODO: Undo comma_num (WTF is this?)
if self.num == Number(1):
comma_num = ""
else:
comma_num = ", num={num}".format(num=repr(self.num))
# TODO: comma_idn -- Show idn if txt,num is not the latest
return "Word('{txt}'{comma_num})".format(
comma_num=comma_num,
txt=self.txt
)
else:
return "Word({})".format(self.presentable(self.idn))
elif (
isinstance(self.sbj, Word) and
isinstance(self.vrb, Word) and
isinstance(self.obj, Word) and
isinstance(self.txt, Text) and
isinstance(self.num, Number)
):
return("Word(sbj={sbj}, vrb={vrb}, obj={obj}, txt={txt}, num={num})".format(
sbj=self.sbj.idn.qstring(),
vrb=self.vrb.idn.qstring(),
obj=self.obj.idn.qstring(),
txt=repr(self.txt),
num=self.num.qstring(),
))
elif Text.is_valid(self.txt):
return "Word(undefined {})".format(repr(self.txt))
else:
try:
idn_repr = repr(self.idn)
except ValueError:
if self.txt:
return "Word(nonexistent {})".format(repr(self.txt))
# TODO: unit test
else:
return "Word(in a corrupt state)" # can't show idn nor txt
# TODO: unit test this?
else:
return "Word(unidentified {})".format(idn_repr)
def __str__(self):
if hasattr(self, 'txt') and self.txt is not None:
return self.txt.native()
else:
return repr(self)
# TODO: Should this be encoded for PY2?
def __unicode__(self):
if hasattr(self, 'txt'):
assert isinstance(self.txt, Text)
return self.txt.unicode()
else:
return repr(self)
def __hash__(self):
return hash(self.idn)
def __eq__(self, other):
try:
return self.idn == other.idn
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def idn(self):
try:
return Number(self._idn)
# Copy constructor so e.g. w.idn.suffix(n) will not modify w.idn.
# TODO: but then what about w.sbj.add_suffix(n), etc.?
# (But there's no more add_suffix, only new-number-generating plus_suffix)
# So this passing through Number() is a bad idea.
# Plus this makes x.idn a different object from x._idn, burdening debug.
except AttributeError:
return Number.NAN
def save(self, override_idn=None):
# TODO: Move to Lex? It's only ever called by create_word() anyway...
assert isinstance(self.idn, Number)
assert isinstance(self.sbj, Word)
assert isinstance(self.vrb, Word)
assert isinstance(self.obj, Word), "{obj} is not a Word".format(obj=repr(self.obj))
assert isinstance(self.num, Number)
assert isinstance(self.txt, Text)
if override_idn is None:
self.lex.insert_next_word(self)
else:
self.set_idn_if_you_really_have_to(override_idn)
self.lex.insert_word(self)
class SubjectedVerb(object):
# TODO: Move this to inside Word? Or LexSentence!??
"""
Intermediary in the bracket syntax lex[s](v)[o].
An instance of this class is the lex[s](v) part.
For example, the word getter expression w = lex[s](v)[o] breaks down into
x = lex[s](o); w = x[o].
x is a SubjectVerb instance that remembers subject and verb.
This instance is the Python-object that is "returned" when you "call" a subject
and pass it a verb. So that call is a factory for this class.
Besides tne verb, that call can pass txt and num in flexible order.
The methods of this class implement the remainder of the bracket syntax: the [o] part.
Getting or setting that part leads to Lex.read_word() or Lex.create_word().
There is one exception to that neat correspondence.
The lex[s](v,n,t)[o] variation on the bracket syntax.
That looks like a getter to Python, but performs like a setter.
w = lex[s](v)[o] SubjectVerb.__getitem__() Lex.read_word()
w = lex[s](v,n,t)[o] SubjectVerb.__getitem__() Lex.create_word()
lex[s](v)[o] = n,t SubjectVerb.__setitem__() Lex.create_word()
"""
def __init__(self, sbj, vrb, *args, | |
float(x), bbox.split(',')) if bbox!="" else mapExtent,
"mapSize": self.dame_mapserver_size,
"fileName": self.dame_filename,
"mapType": self.tipo_de_mapa,
"metadata": {
"ows_title": unicode(self.dame_titulo).encode('UTF-8'),
"ows_abstract": unicode(self.dame_descripcion.replace('\r\n', ' ')).encode('UTF-8'),
"ows_attribution_title": unicode(self.dame_fuente.replace('\r\n', ' ')).encode('UTF-8'),
"ows_contactorganization": unicode(self.dame_contacto.replace('\r\n', ' ')).encode('UTF-8'),
"wms_onlineresource": wxs_url,
"wfs_onlineresource": wxs_url,
"mg_onlineresource": unicode(self.dame_tilesurl).encode('UTF-8'),
"mg_siteurl": unicode(settings.SITE_URL).encode('UTF-8'),
"mg_baselayerurl": self.tms_base_layer.url if self.tms_base_layer else settings.MAPCACHE_URL+'tms/1.0.0/world_borders@GoogleMapsCompatible/{z}/{x}/{y}.png',
"mg_tmsbaselayer": str(self.tms_base_layer.tms) if self.tms_base_layer else str(True),
"mg_iswmslayer": str(self.showAsWMSLayer),
"mg_mapid": unicode(self.id_mapa),
"mg_layername": unicode(c.nombre) if c is not None else "",
"mg_enablecontextinfo": str(enableContextInfo),
"ows_srs": 'epsg:%s epsg:4326 epsg:3857'%(srid) if RepresentsPositiveInt(srid) else 'epsg:4326 epsg:3857', # dejamos proyecciones del mapa y 4326 fijas. esta logica la repetimos en las capas
"wfs_getfeature_formatlist": 'geojson,shapezip,csv',
"ows_encoding": 'UTF-8', # siempre
"ows_enable_request": '*',
"labelcache_map_edge_buffer": '10'
},
"layers": layers
}
return data
def create_mapfile(self, save=True):
return mapserver.create_mapfile(self.dame_mapserver_map_def(), save)
def generar_thumbnail_y_legend(self):
print '...Grabando mapa e imagen de %s (tipo %s)'%(self.id_mapa, self.tipo_de_mapa)
# mapa=self.dame_mapserver_mapObj()
# mapa.save(os.path.join(settings.MAPAS_PATH, self.id_mapa+'.map'))
# print "......mapa guardado %s"%(self.id_mapa+'.map')
if self.tipo_de_mapa in ('layer_original_srs', 'general', 'layer_raster_band'):
thumb = self.generar_thumbnail()
print "......imagen creada: %s"%(thumb)
if self.tipo_de_mapa in ('general', 'layer', 'layer_raster_band'):
self.generar_legend()
return True
def agregar_a_mapcache(self):
print "agregar_a_mapcache %s"%(self.id_mapa)
# rm_tileset(self.id_mapa)
# Si estamos en una arquitectura distribuida los tiles son locales
mapcache.remove_tileset(self.id_mapa)
sld_url = ''
default_sld_url = ''
srid = MAPA_DEFAULT_SRS
if self.tipo_de_mapa in ('layer', 'layer_raster_band'):
capa = self.mapserverlayer_set.first().capa
# params = ':%s:%d'%(capa.nombre, MAPA_DEFAULT_SRS)
layers = capa.nombre
srid = MAPA_DEFAULT_SRS
for sld in capa.archivosld_set.all():
# mapcache.remove_map(self.id_mapa, sld.id)
# rm_tileset(self.id_mapa, sld.id)
print "sld #%d - %s"%(sld.id, sld.filename.url)
# Si estamos en una arquitectura distribuida los tiles son locales
mapcache.remove_tileset(self.id_mapa, sld.id)
sld_url = urlparse.urljoin(settings.SITE_URL, sld.filename.url)
# mapcache.add_map(self.id_mapa, layers, srid, sld.id, sld_url)
add_or_replace_tileset(self.id_mapa, layers, srid, sld.id, sld_url)
if sld.default:
print "default sld: %s"%(sld.filename.url)
default_sld_url = urlparse.urljoin(settings.SITE_URL, sld.filename.url)
elif self.tipo_de_mapa == 'general':
layers = 'default'
# mapcache.add_map(self.id_mapa, layers, srid, '', sld_url)
add_or_replace_tileset(self.id_mapa, layers, srid, '', default_sld_url)
def generar_thumbnail(self):
mapfile=ManejadorDeMapas.commit_mapfile(self.id_mapa)
if self.tipo_de_mapa in ('general', 'layer_raster_band'):
for c in self.capas.all(): # es necesario regenerar todo mapfile inexistente
ManejadorDeMapas.commit_mapfile(c.id_capa)
wms_url = mapserver.get_wms_request_url(self.id_mapa, 'default', self.srs, 110, 150, self.dame_extent(',','3857'))
elif self.tipo_de_mapa=='layer_original_srs':
c=self.capas.first()
if (c.srid > 0):
wms_url = mapserver.get_wms_request_url(self.id_mapa, c.nombre, str(c.srid), 110, 150, c.dame_extent(',', self.srs))
try:
sld=c.archivosld_set.filter(default=True)[0]
sld_url = getSldUrl(sld.filename.url)
wms_url = mapserver.get_wms_request_url(self.id_mapa, c.nombre, str(c.srid), 110, 150, c.dame_extent(',', self.srs), sld_url)
except:
pass
else:
wms_url = ''
print wms_url
thumb=os.path.join(settings.MEDIA_ROOT, self.id_mapa+'.png')
if wms_url != '':
return urlToFile(wms_url, thumb)
else:
return mapserver.draw_map_to_file(self.id_mapa, thumb)
def generar_legend(self):
# capa = self.capas.first()
mapfile=ManejadorDeMapas.commit_mapfile(self.id_mapa)
filelist = []
for mslayer in self.mapserverlayer_set.all():
if self.tipo_de_mapa == 'layer_raster_band':
sld = urlparse.urljoin(settings.SITE_URL, mslayer.archivo_sld.filename.url) if mslayer.archivo_sld else None
else:
sld = urlparse.urljoin(settings.SITE_URL, mslayer.archivo_sld.filename.url) if mslayer.archivo_sld else mslayer.capa.dame_sld_default()
url = mapserver.get_legend_graphic_url(self.id_mapa, mslayer.capa.nombre, sld)
filename=os.path.join(settings.MEDIA_ROOT, self.id_mapa+('_legend_%i.png'%mslayer.orden_de_capa))
filelist.append(filename)
try:
urlToFile(url, filename)
except:
print '\nFailed to create legend file %s\n'%filename
return False
try:
call('convert %s -background "rgba(0,0,0,0)" -append %s'%(' '.join(filelist), os.path.join(settings.MEDIA_ROOT, self.id_mapa+'_legend.png')), shell=True)
except:
return False
for filename in filelist:
try:
os.remove(filename)
except:
return False
return True
objects = SearchManager(
fields = ('input_search_index',), # esa coma final debe ir si o si
config = 'pg_catalog.spanish', # this is default
search_field = 'search_index', # this is default
auto_update_search_field = True
)
class MapServerLayer(models.Model):
capa = models.ForeignKey(Capa,null=False,blank=False)
mapa = models.ForeignKey(Mapa)
orden_de_capa = models.IntegerField(null=False)
bandas = models.CharField(null=False, blank=True, max_length=100) # string que representa una tupla de tipo (<variable>, <bandas>), por ejemplo "('WIND', '3,4')"
feature_info = models.BooleanField(u'Feature Info', null=False, default=True)
archivo_sld = models.ForeignKey(ArchivoSLD, null=True, blank=True, on_delete=models.SET_NULL)
texto_input = models.TextField(u'Texto Input', null=False, blank=True, max_length=10000)
texto_output = models.TextField(u'Texto Output', null=False, blank=True, max_length=10000)
class Meta:
verbose_name = 'MapServer Layer'
verbose_name_plural = 'MapServer Layers'
def __unicode__(self):
return '%s.%s (%s)'%(unicode(self.mapa),unicode(self.capa),unicode(self.orden_de_capa))
def dame_layer_connection(self, connectiontype):
if connectiontype == 'WMS':
if self.bandas != "":
return mapserver.get_wms_url(self.bandas)
else:
return mapserver.get_wms_url(self.capa.id_capa)
else:
return self.capa.dame_connection_string
def dame_layer_connection_type(self):
return self.capa.dame_connection_type
def dame_data(self, srid=None):
return self.capa.dame_data(srid)
def save(self, srid=None, *args, **kwargs):
if self.archivo_sld is not None and self.archivo_sld.capa != self.capa:
self.archivo_sld = None
# innecesario por el momento
# try:
# mslo=self.dame_mapserver_layerObj()
# self.texto_output=mslo.convertToString()
# except:
# self.texto_output=''
super(MapServerLayer, self).save(*args, **kwargs)
ManejadorDeMapas.delete_mapfile(self.mapa.id_mapa)
return True
def dame_mapserver_layer_def(self, connectiontype='POSTGIS'):
include_items, items_aliases = self.capa.metadatos.dame_gml_atributos()
srid = 4326 if self.mapa.tipo_de_mapa in ('public_layers', 'user') and self.capa.srid != 4326 else int(self.capa.dame_projection)
if self.capa.tipo_de_capa == CONST_VECTOR:
data = {
"connectionType": connectiontype,
"layerName": self.capa.nombre,
"layerTitle": self.capa.dame_titulo.encode('utf-8'),
"layerConnection": self.dame_layer_connection(connectiontype),
"layerData": self.dame_data(srid),
"sldUrl": (urlparse.urljoin(settings.SITE_URL, self.archivo_sld.filename.url)) if self.archivo_sld is not None else "",
"layerType": 'RASTER' if connectiontype == 'WMS' else self.capa.tipo_de_geometria.mapserver_type,
"srid": srid,
"metadataIncludeItems": include_items,
"metadataAliases": items_aliases,
"layerDefinitionOverride": self.texto_input,
"metadata": {},
"driver": self.capa.gdal_driver_shortname,
"rasterBandInfo": "",
"proj4": '',
}
# print "Data sources #: %d"%len(VectorDataSource.objects.filter(capa=self.capa))
if len(VectorDataSource.objects.filter(capa=self.capa)) > 1 and connectiontype!='WMS':
ds = VectorDataSource.objects.filter(capa=self.capa).order_by('data_datetime')
data["timeItem"] = 'data_datetime'
data["timeExtent"] = ','.join([rec.data_datetime.replace(second=0,microsecond=0).isoformat() for rec in ds])
# Por ahora dejo el max...
data["timeDefault"] = ds.last().data_datetime.replace(second=0,microsecond=0).isoformat()
elif self.capa.tipo_de_capa == CONST_RASTER:
data = {
"connectionType": connectiontype,
"layerName": self.capa.nombre,
"layerTitle": self.capa.dame_titulo.encode('utf-8'),
"layerConnection": self.dame_layer_connection(connectiontype),
"layerData": self.dame_data(srid),
"sldUrl": (urlparse.urljoin(settings.SITE_URL, self.archivo_sld.filename.url)) if self.archivo_sld is not None else "",
"layerType": 'RASTER',
"srid": srid,
"metadataIncludeItems": include_items,
"metadataAliases": items_aliases,
"layerDefinitionOverride": self.texto_input,
"metadata": {},
"driver": self.capa.gdal_driver_shortname,
"rasterBandInfo": "",
"proj4": self.capa.proyeccion_proj4,
"layerExtent": self.capa.layer_srs_extent,
}
# print "Data sources #: %d"%RasterDataSource.objects.filter(capa=self.capa).count()
if RasterDataSource.objects.filter(capa=self.capa).count() > 0 and self.capa.gdal_driver_shortname not in ('netCDF', 'HDF5') and connectiontype!='WMS':
ds = RasterDataSource.objects.filter(capa=self.capa).order_by('data_datetime')
data["timeItem"] = 'data_datetime'
data["tileItem"] = 'location'
data["timeIndexData"] = self.capa.dame_datasource_data()
data["timeExtent"] = ','.join([rec.data_datetime.isoformat() for rec in ds])
# Por ahora dejo el max...
data["timeDefault"] = ds.last().data_datetime.isoformat()
# En el caso de GRIB, generamos info extra en rasterBandInfo para aplicar template especifico a posteriori
if self.capa.gdal_driver_shortname == 'GRIB' and connectiontype!='WMS':
if self.mapa.tipo_de_mapa == 'layer_raster_band': # es el caso de una banda específica, tenemos que ver metadatos
data['rasterBandInfo'] = (self.bandas, self.capa.gdal_metadata['variables_detectadas'][self.bandas])
else: # es el caso del mapa por defecto de GRIB, sin variables específicas
if len(self.capa.gdal_metadata['variables_detectadas']) > 0:
# buscamos la banda de temperatura, aunque podría ser cualquier otra definición, y armamos una tupla
# primero una default cualquiera
cualquier_banda = self.capa.gdal_metadata['variables_detectadas'].keys()[0]
data['rasterBandInfo'] = (cualquier_banda, self.capa.gdal_metadata['variables_detectadas'][cualquier_banda])
# luego overrideamos si existe alguna de TMP
for banda, variable in self.capa.gdal_metadata['variables_detectadas'].iteritems():
if variable['elemento'] == 'TMP':
data['rasterBandInfo'] = (banda, variable)
# En el caso de netCDF y HDF5, solo tenemos que overridear el DATA de la capa
elif self.capa.gdal_driver_shortname in ('netCDF', 'HDF5') and connectiontype!='WMS':
prefijo_data = self.capa.gdal_driver_shortname.upper() # NETCDF|HDF5
if self.mapa.tipo_de_mapa == 'layer_raster_band':
data["layerData"] = '{}:{}'.format(data["layerData"], self.bandas)
else:
# if len(self.capa.gdal_metadata['variables_detectadas']) == 0:
if len(self.capa.gdal_metadata['subdatasets']) != 0:
# hay subdatasets y es mapa de capa => estamos obligados a renderizar alguno pues mapserver no se banca el render directo en este caso (NETCDF|HDF5:/path/al/archivo:subdataset)
primer_subdataset_identificador = self.capa.gdal_metadata['subdatasets'][0]['identificador'] # Ejemplo: "formato:/path/al/archivo:subdataset"
data["layerData"] = '{}:{}'.format(data["layerData"], primer_subdataset_identificador)
return data
def dame_metadatos_asociado_a_banda(self):
""" Esta version del metodo tiene en cuenta el raster_layer del mapa actual,
o sea, solo devuelve metadatos de ese "subproducto", pensado para llamar desde la vista detalle del mapa
"""
if self.bandas != '':
if len(self.capa.gdal_metadata['subdatasets']) > 0:
for b in self.capa.gdal_metadata['subdatasets']:
if b.get('identificador') == self.bandas:
return [sorted(b['gdalinfo']['metadata'][''].iteritems())]
else:
try:
res = []
bandas = str(self.bandas).split(',') # array de bandas, Ej: ['4'], ['5', '6']
for b in self.capa.gdal_metadata['gdalinfo']['bands']:
if str(b['band']) in bandas:
metadatos = b['metadata']['']
metadatos['BAND'] = b['band']
res.append(sorted(metadatos.iteritems()))
return res
except:
return []
else:
return []
def inicializarMapasDeCapa(instance):
# ------------ creamos/actualizamos mapas
# creamos el mapa canónico
mapa = Mapa(owner=instance.owner, nombre=instance.nombre, id_mapa=instance.id_capa, tipo_de_mapa='layer')
if instance.tipo_de_capa == CONST_RASTER:
try:
print "Intentando setear baselayer..."
mapa.tms_base_layer = TMSBaseLayer.objects.get(pk=1)
except:
pass
mapa.save(escribir_imagen_y_mapfile=False)
MapServerLayer(mapa=mapa, capa=instance, orden_de_capa=0).save()
mapa.save()
# creamos el mapa en la proyeccion original
extent_capa = instance.layer_srs_extent
mapa_layer_srs = Mapa(owner=instance.owner, nombre=instance.nombre + '_layer_srs', id_mapa=instance.id_capa + '_layer_srs', tipo_de_mapa='layer_original_srs', srs=instance.srid, extent=extent_capa)
# Esto es para cuando tenemos una proyeccion no identificada
if instance.proyeccion_proj4 is not None and instance.proyeccion_proj4 != '' and not RepresentsPositiveInt(instance.srid):
print "Seteando proyeccion custom para el mapa {}".format(instance.proyeccion_proj4)
mapa_layer_srs.srs = instance.proyeccion_proj4
mapa_layer_srs.save(escribir_imagen_y_mapfile=False)
MapServerLayer(mapa=mapa_layer_srs, capa=instance, orden_de_capa=0).save()
mapa_layer_srs.save()
if instance.tipo_de_capa == CONST_RASTER:
for bandas, variable in take(settings.CANTIDAD_MAXIMA_DE_BANDAS_POR_RASTER, sorted(instance.gdal_metadata['variables_detectadas'].iteritems())):
id_banda = str(bandas).replace(',', '_').replace('/', '').replace('\\', '.').lower()
sufijo_mapa = '_band_{}_{}'.format(id_banda, variable['elemento'].lower())
mapa = Mapa(
owner=instance.owner,
nombre=instance.nombre + sufijo_mapa,
id_mapa=instance.id_capa + sufijo_mapa,
titulo='{} - {}{}'.format(bandas, variable['elemento'], ': {}'.format(variable['descripcion']) if variable['descripcion'] != '' else ''),
tipo_de_mapa='layer_raster_band')
try:
print "Intentando setear baselayer..."
mapa.tms_base_layer = TMSBaseLayer.objects.get(pk=1)
except:
pass
mapa.save(escribir_imagen_y_mapfile=False)
MapServerLayer.objects.create(
mapa=mapa,
capa=instance,
bandas=bandas,
orden_de_capa=0)
mapa.save()
# actualizamos el mapa de usuario
ManejadorDeMapas.delete_mapfile(instance.owner.username)
# actualizamos el mapa de capas públicas
ManejadorDeMapas.delete_mapfile('mapground_public_layers')
@receiver(post_save, sender=Capa)
def onCapaPostSave(sender, instance, created, **kwargs):
print 'onCapaPostSave %s'%(str(instance))
if created:
print '...capa creada'
# ------------ creamos y completamos metadatos y atributos
metadatos | |
for project"""
_path = repo.get_project_path()
if not os.path.isdir(_path):
raise click.BadOptionUsage('name', f"Project '{repo.active_project}': Could not find directory '{_path}'")
_l = Task.get_list(_path, repo.active_task)
if not repo.active_project:
output.error(f"No active project selected")
else:
output.comment(f'Task list for project: {repo.active_project}')
output.comment(f'Active task: {repo.active_task}\n\n')
output.table(_l, headers=['#', '', 'name', 'path'], showindex="always")
# ######################### LOAD #############################################
def rsync_meta(options: List[str], remote_path: str, local_path: str):
""" Download data with rsync command
:param options:
:param remote_path:
:param local_path:
:return:
"""
cmd = os.path.join(cmd_dir, 'rsync-metadata.sh')
# os.chmod(cmd, stat.S_IXUSR)
os.makedirs(local_path, exist_ok=True)
subprocess.run([cmd, f'{" ".join(options)}', remote_path, local_path])
# TODO invoke check that loaded data is consistent (some SAFE dirs in dias are empty or contains only preview folder)
@cli_task.command('get-data')
@option_locate_task
@click.option('-d', '--data', 'data', is_flag=True, default=False, help='load meta-data and data')
@click.option('-m', '--master', is_flag=True, default=False, help='load master')
@click.option('-s', '--slave', is_flag=True, default=False, help='load slave')
@click.option('--dry-run', is_flag=True, default=False, help='dry-run, do not perform actual download')
@pass_task
@ensure_task_resolved
def task_get(task: Task, data, master, slave, dry_run):
""" load satellite date into task.eodata directory"""
# Zsh and other crazy shells extends patterns passed arguments, so be shure rsync runs in BASH!!!
if task.config.get('source') != 'Sentinel-1':
raise click.BadParameter(f"Only Sentinel-1 supported for now, task source is {task.get_valid_key('source')}")
if not master and not slave:
raise click.BadOptionUsage('master', "at least on of --master or --salve option is required")
ks = ['eodata', 'master']
if task.kind == 'cluster':
ks.append('slave')
for k in ks:
e = task.validate(k)
if e is not None:
raise click.BadArgumentUsage(f"'{k}' is invalid, reason: {','.join(e)}")
def _rsync_meta(key, task: Task, options):
try:
_p = _local_eodata_relative_path(task.config['eodata'], task.config[key + '_path'])
local_path = os.path.dirname(_p)
output.info(f"loading {key} into '{local_path}'")
rsync_meta(options, task.config[key + '_path'], local_path)
output.success(f"{key} metadata loaded")
# rsync_meta('',_target)
# output.info(f"loading {key} into '{_target}'")
# os.makedirs(_target, exist_ok=True)
# subprocess.run([cmd, f'{" ".join(options)}', task.config[key + '_path'], _target])
# output.success(f"{key} metadata loaded")
except OSError as er:
log.exception(e)
raise click.BadParameter(f"{er}")
opts = []
# if not data:
# opts.append("--exclude '*.tiff'")
if dry_run:
opts.append('--dry-run')
opts.append('-vv')
if data:
e, iw = task.get_valid_key('swath') # type: str
if e:
raise OCLIException(f"Swath is invalid: {e}")
opts.append('--include "*/"')
opts.append(f'--include "*{iw.lower()}*.tiff"')
else:
opts.append("--exclude 'support/*'")
opts.append("--exclude 'preview/*'")
opts.append("--exclude 'annotation/calibration/*'")
opts.append("--exclude '*.tiff'")
if master:
_rsync_meta('master', task, opts)
if slave:
_rsync_meta('slave', task, opts)
# ######################### LS #############################################
@cli_task.command('ls')
@option_locate_task
@click.option('-m', '--master', is_flag=True, default=False, help='list master directory')
@click.option('-s', '--slave', is_flag=True, default=False, help='list slave directory')
@click.option('-a', '--list_all', is_flag=True, default=False, help='list all task directories')
@click.option('--ai', 'ai_results', is_flag=True, default=False, help='list slave directory')
@click.option('--stack', 'stack_results', is_flag=True, default=False, help='list slave directory')
@click.option('-t', '--terse', is_flag=True, default=False, help='terse output')
@pass_task
@ensure_task_resolved
def task_ls(task: Task, master, slave, ai_results, stack_results, terse, list_all):
""" list content of task master or slave directory"""
def comment(str):
if not terse:
output.comment(str)
e, eo_data = task.get_valid_key('eodata')
if not any([master, slave, ai_results, stack_results, list_all]):
list_all = terse = True
if terse:
cmd = ['du', '-shc']
else:
cmd = ['ls', '-lahR']
if e:
raise click.BadArgumentUsage(f"Task config key 'eodata' is invalid, reason: {','.join(e)}")
paths = []
_, kind = task.get_valid_key('kind')
if list_all or master:
e, _m = task.get_valid_key('master_path')
if e:
raise click.BadArgumentUsage(f"Task config key 'master_path' is invalid, reason: {','.join(e)}")
_p = _local_eodata_relative_path(eo_data, _m)
comment(f"master path: {_p}\n\n")
paths += [_p]
if kind in ['cluster'] and (list_all or slave):
e, _s = task.get_valid_key('slave_path')
if e:
raise click.BadArgumentUsage(f"Task config key 'slave_path' is invalid, reason: {','.join(e)}")
_p = _local_eodata_relative_path(eo_data, _s)
comment(f"master path: {_p}\n\n")
paths += [_p]
if list_all or ai_results:
try:
_p = task.get_ai_results_path(full=True)
comment(f"ai results path: {_p}\n\n")
paths += [_p]
except AssertionError as e:
raise click.UsageError(str(e))
if list_all or stack_results:
try:
_p = task.get_stack_path(full=True)
comment(f"Stack results path: {_p}\n\n")
paths += [_p]
except AssertionError as e:
raise click.UsageError(str(e))
if not len(paths):
raise click.UsageError("No ls targets provided")
try:
subprocess.run(cmd + paths)
except Exception as e:
log.exception(e)
# ######################### CLEAR #############################################
@cli_task.command('clear')
@option_yes
@click.option('-d', '--data', 'data', is_flag=True, default=False, help='clear meta-data and data')
@click.option('-m', '--master', is_flag=True, default=False, help='clear master')
@click.option('-s', '--slave', is_flag=True, default=False, help='clear slave')
@pass_task
@ensure_task_resolved
def task_clear(task: Task, master, slave, data, yes):
""" delete task data and results"""
e, eo_data = task.get_valid_key('eodata')
if e is not None:
raise click.BadArgumentUsage(f"Task config key 'eodata' is invalid, reason: {','.join(e)}")
def __clear_data(key):
e, _p = task.get_valid_key(key)
if e is not None:
raise click.BadArgumentUsage(f"Task config key '{key}' is invalid, reason: {','.join(e)}")
try:
shutil.rmtree(_local_eodata_relative_path(eo_data, task.config[key]))
except OSError as er:
raise click.UsageError(f"{er}")
if data & yes_or_confirm(yes, f'Remove all product data for task {task.name}?'):
if master:
__clear_data('master_path')
if slave:
__clear_data('slave_path')
if master or slave:
# TODO clean snap and AI out data
output.comment("Not implemented - remove SNAP intermediate data")
output.comment("Not implemented - remove AI out data")
# ######################### CLONE #############################################
# todo --activate option
@cli_task.command('clone')
@option_yes
@click.option('--quiet' '-q', 'quiet', is_flag=True, default=False, help='Show cloned task')
@click.option('-a', '--activate', 'activate', is_flag=True, default=False, help="Activate cloned task")
@click.option('--target', 'project', help="target project name , default to active project")
@option_locate_task
@click.argument('new_name', metavar="<NEW NAME>", type=click.STRING, required=True)
@click.argument('args', nargs=-1)
@pass_task
@ensure_task_resolved
def task_clone(task: Task, project, new_name, yes, args, activate, quiet):
""" clone existed task """
# TODO - SHARED code with task_create
try:
if project:
task.project = project
old_name = task.name
task.name = new_name
task.path = task.get_path_by_name()
if not is_path_exists_or_creatable(task.path):
raise click.UsageError(f"Could not create {task.path}")
_exists, _rc = task.get_task_rc()
if _exists and not yes_or_confirm(yes,
f"Target task directory '{task.path}' contains existed task, Overwrite?"):
return
task.create()
output.success(f'task "{old_name}" cloned to "{task.name}" in project "{task.project}"')
if len(args):
click.get_current_context().invoke(task_set, args=args, yes=yes)
if activate:
click.get_current_context().invoke(task_activate, name=new_name, quiet=quiet)
# if quiet:
# # print task
# click.get_current_context().invoke(task_list)
except OSError as e:
raise click.BadParameter(f"Could not not clone task: {e}")
# ######################### MAKE #############################################
@cli_task.group('make')
# @pass_task
# @ensure_task_resolved
def task_run():
""" Run data-processing """
# output.comment(f"Start processing task '{task.name}'")
pass
@task_run.group('stack')
def task_run_stack():
"""Make products stack"""
pass
@task_run_stack.command('snap')
@option_locate_task
@click.option('--dry-run', is_flag=True, default=False, help='dry-run, do not perform actual running')
@click.option('--gpt-cache', default='40G', help='ESA SNAP gpt RAM cache max size')
@option_yes
@pass_task
@pass_repo
@ensure_task_resolved
def task_run_stack_snap(repo: Repo, task: Task, yes, dry_run, gpt_cache):
"""Make stack with ESA SNAP pipeline"""
kind = task.config.get('kind')
if kind not in ['cluster']:
raise click.UsageError(f'Only task with kind "cluster" supported with snap')
e = task.validate_all(ignore=['predictor'])
if e:
raise click.UsageError(f'Task config is invalid, reason: {" ".join(e)} ')
output.comment(f"Start stacking '{task.name}'")
snap_path = task.get_stack_path(full=True)
if os.path.isdir(snap_path):
if len(os.listdir(snap_path)) != 0 and not yes_or_confirm(yes,
f"Stack directory '{snap_path}' exists. Override?"):
return
task_stack_snap(task,
dry_run=dry_run,
gpt_cache=gpt_cache,
cmd_dir=cmd_dir,
log=click.echo
)
# ############################### STACK SARPY ################################
# TODO use original commant from sarpy-cli, mount it here (like pro)
@task_run_stack.command('sarpy')
@option_locate_task
@click.option('--skip-verified', is_flag=True, default=False, help='Skip stack creation if stack is valid', )
@click.option('--dry-run', is_flag=True, default=False, help='dry-run, do not perform actual running')
@click.option('--decimation', 'decimation',
help='decimation vertical horizontal',
type=(int, int), default=(1, 6),
show_default=True,
)
@click.option('--filter', 'decimation_filter',
help='decimation filter',
type=click.Choice(['gauss', 'median']), default='gauss',
show_default=True,
)
@click.option('--single', help='Single product stack',
is_flag=True, default=False,
)
@click.option('--no-clean', help='do not clean intermediate results',
is_flag=True, default=False,
)
@option_yes
@pass_task
@pass_repo
@ensure_task_resolved
def task_run_stack_sarpy(repo: Repo, task: Task, yes, dry_run, decimation, no_clean, single, skip_verified,
decimation_filter):
"""Make stack with ESA SNAP pipeline"""
e = task.validate_all(ignore=['predictor'])
if e:
raise click.UsageError(f'Task config is invalid, reason: {" ".join(e)} ')
output.comment(f"Start stacking '{task.name}'")
try:
_eodata = task.config['eodata']
snap_path = task.get_stack_path(full=True)
output.info(f"Creating products stack in {snap_path}")
os.makedirs(snap_path, exist_ok=True)
from ocli.sarpy.cli import full_stack, single_stack
p0 = perf_counter()
kw = dict(
swath=[task.config['swath']],
pols=['VV', 'VH'],
decimation=decimation,
verbose=repo.verbose,
out=snap_path,
yes=yes,
no_clean=no_clean,
dry_run=dry_run,
skip_verified=skip_verified,
decimation_filter=decimation_filter,
)
if single:
click.get_current_context().invoke(
single_stack,
master=_local_eodata_relative_path(_eodata, task.config['master_path']),
**kw
)
else:
click.get_current_context().invoke(
full_stack,
master=_local_eodata_relative_path(_eodata, task.config['master_path']),
slave=_local_eodata_relative_path(_eodata, task.config['slave_path']),
**kw
)
p0 = perf_counter() - p0
conf = task.config
conf['stack_processor'] = 'sarpy'
conf['stack_performance'] = p0
conf['stack_created'] = datetime.now().strftime("%F %T")
except Exception as e:
# log.exception(e)
raise OCLIException(f"{e}")
@task_run.command('recipe')
@option_locate_task
@option_yes
@option_roi
@click.option('--print', 'print_results', is_flag=True, default=False,
help="Print recipe, do not save file",
cls=MutuallyExclusiveOption, mutually_exclusive=["file"],
)
@click.option('--quiet', '-q', is_flag=True, default=False)
@click.option('--edit', 'edit', default=False, is_flag=True, help='Open generated recipe in editor')
@click.option('--override', 'override', is_flag=True, default=False,
help='Override default recipe file if exists')
@click.option('--force', 'force', is_flag=True, default=False,
help='dry-run, do not perform most of error checks, use to generate AI recipe for learning phase')
@click.option('-f', '--file', default=None, help='Override auto-generated AI recipe filename and path ',
cls=MutuallyExclusiveOption, mutually_exclusive=["print"],
)
@click.option('--zone-by-roi', is_flag=True, default=False,
cls=MutuallyExclusiveOption, mutually_exclusive=["zone"],
help='Define zone by ROI envelope (rectangular bounding box containing all ROI points)')
@click.option('-z', '--zone', type=click.INT, nargs=4, default=None,
cls=MutuallyExclusiveOption, mutually_exclusive=["zone-by-roi"],
help='Define zone as minY minX maxY maxX in Pixels coordinates')
@click.option('--from-template',
type=click.STRING,
cls=MutuallyExclusiveOption, mutually_exclusive=["clusters"],
default=None,
help='Add recipe values from template')
@click.option('-c', '--clusters', type=click.INT, default=None,
cls=MutuallyExclusiveOption, mutually_exclusive=["from-template"],
help='number of generated clusters in predictor. '
'NOTE: used only in fit (learn) phase, ignored in predict phase')
@pass_task
@pass_repo
@ensure_task_resolved
def task_recipe(repo: Repo, task: Task, force, override,
roi_id: int, file: str, edit: bool,
zone: tuple, zone_by_roi: bool,
from_template,
clusters: int,
quiet,
yes,
print_results):
""" Generate AI recipe file
\b
* use --force to generate recipe file with incomplete task settings (for ex. when creating predictors in learning phase)
* use --override to override existed task's default recipe | |
type stringMaxLength30
self.validate_stringMaxLength30(self.addressLine2)
elif nodeName_ == 'addressLine3':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'addressLine3')
value_ = self.gds_validate_string(value_, node, 'addressLine3')
self.addressLine3 = value_
self.addressLine3_nsprefix_ = child_.prefix
# validate type stringMaxLength30
self.validate_stringMaxLength30(self.addressLine3)
elif nodeName_ == 'town':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'town')
value_ = self.gds_validate_string(value_, node, 'town')
self.town = value_
self.town_nsprefix_ = child_.prefix
# validate type stringMaxLength40
self.validate_stringMaxLength40(self.town)
elif nodeName_ == 'exactMatch':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'exactMatch')
value_ = self.gds_validate_string(value_, node, 'exactMatch')
self.exactMatch = value_
self.exactMatch_nsprefix_ = child_.prefix
# validate type booleanEnum
self.validate_booleanEnum(self.exactMatch)
elif nodeName_ == 'province':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'province')
value_ = self.gds_validate_string(value_, node, 'province')
self.province = value_
self.province_nsprefix_ = child_.prefix
# validate type stringMaxLength30
self.validate_stringMaxLength30(self.province)
elif nodeName_ == 'postcode':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'postcode')
value_ = self.gds_validate_string(value_, node, 'postcode')
self.postcode = value_
self.postcode_nsprefix_ = child_.prefix
# validate type stringMaxLength9
self.validate_stringMaxLength9(self.postcode)
elif nodeName_ == 'country':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'country')
value_ = self.gds_validate_string(value_, node, 'country')
self.country = value_
self.country_nsprefix_ = child_.prefix
# validate type stringMinLength2MaxLength2
self.validate_stringMinLength2MaxLength2(self.country)
# end class nameAndAddressRequestType
class nameAndAddressResponseType(GeneratedsSuper):
"""Information relating to name and address for a participant
in the consignment.
Examples of a participant are:
The Sender - the company sending the consignment
The Receiver - the company receiving the consignment
The Collection Address - the address from which the consignment is picked
up
The Delivery Address - the address to which the consignment should be
delivered"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, name=None, addressLine1=None, addressLine2=None, addressLine3=None, town=None, province=None, postcode=None, country=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.name = name
self.validate_stringMaxLength40(self.name)
self.name_nsprefix_ = None
self.addressLine1 = addressLine1
self.validate_stringMaxLength30(self.addressLine1)
self.addressLine1_nsprefix_ = None
self.addressLine2 = addressLine2
self.validate_stringMaxLength30(self.addressLine2)
self.addressLine2_nsprefix_ = None
self.addressLine3 = addressLine3
self.validate_stringMaxLength30(self.addressLine3)
self.addressLine3_nsprefix_ = None
self.town = town
self.validate_stringMaxLength40(self.town)
self.town_nsprefix_ = None
self.province = province
self.validate_stringMaxLength30(self.province)
self.province_nsprefix_ = None
self.postcode = postcode
self.validate_stringMaxLength9(self.postcode)
self.postcode_nsprefix_ = None
self.country = country
self.validate_stringMinLength2MaxLength2(self.country)
self.country_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nameAndAddressResponseType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nameAndAddressResponseType.subclass:
return nameAndAddressResponseType.subclass(*args_, **kwargs_)
else:
return nameAndAddressResponseType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_addressLine1(self):
return self.addressLine1
def set_addressLine1(self, addressLine1):
self.addressLine1 = addressLine1
def get_addressLine2(self):
return self.addressLine2
def set_addressLine2(self, addressLine2):
self.addressLine2 = addressLine2
def get_addressLine3(self):
return self.addressLine3
def set_addressLine3(self, addressLine3):
self.addressLine3 = addressLine3
def get_town(self):
return self.town
def set_town(self, town):
self.town = town
def get_province(self):
return self.province
def set_province(self, province):
self.province = province
def get_postcode(self):
return self.postcode
def set_postcode(self, postcode):
self.postcode = postcode
def get_country(self):
return self.country
def set_country(self, country):
self.country = country
def validate_stringMaxLength40(self, value):
result = True
# Validate type stringMaxLength40, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 40:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMaxLength40' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_stringMaxLength30(self, value):
result = True
# Validate type stringMaxLength30, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 30:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMaxLength30' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_stringMaxLength9(self, value):
result = True
# Validate type stringMaxLength9, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 9:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMaxLength9' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_stringMinLength2MaxLength2(self, value):
result = True
# Validate type stringMinLength2MaxLength2, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 2:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMinLength2MaxLength2' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
if len(value) < 2:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd minLength restriction on stringMinLength2MaxLength2' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.name is not None or
self.addressLine1 is not None or
self.addressLine2 is not None or
self.addressLine3 is not None or
self.town is not None or
self.province is not None or
self.postcode is not None or
self.country is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='nameAndAddressResponseType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nameAndAddressResponseType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'nameAndAddressResponseType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='nameAndAddressResponseType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='nameAndAddressResponseType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='nameAndAddressResponseType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='nameAndAddressResponseType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
namespaceprefix_ = self.name_nsprefix_ + ':' if (UseCapturedNS_ and self.name_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.name), input_name='name')), namespaceprefix_ , eol_))
if self.addressLine1 is not None:
namespaceprefix_ = self.addressLine1_nsprefix_ + ':' if (UseCapturedNS_ and self.addressLine1_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%saddressLine1>%s</%saddressLine1>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.addressLine1), input_name='addressLine1')), namespaceprefix_ , eol_))
if self.addressLine2 is not None:
namespaceprefix_ = self.addressLine2_nsprefix_ + ':' if (UseCapturedNS_ and self.addressLine2_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%saddressLine2>%s</%saddressLine2>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.addressLine2), input_name='addressLine2')), namespaceprefix_ , eol_))
if self.addressLine3 is not None:
namespaceprefix_ = self.addressLine3_nsprefix_ + ':' if (UseCapturedNS_ and self.addressLine3_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%saddressLine3>%s</%saddressLine3>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.addressLine3), input_name='addressLine3')), namespaceprefix_ , eol_))
if self.town is not None:
namespaceprefix_ = self.town_nsprefix_ + ':' if (UseCapturedNS_ and self.town_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%stown>%s</%stown>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.town), input_name='town')), namespaceprefix_ , eol_))
if self.province is not None:
namespaceprefix_ = self.province_nsprefix_ + ':' if (UseCapturedNS_ and self.province_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sprovince>%s</%sprovince>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.province), input_name='province')), namespaceprefix_ , eol_))
if self.postcode is not None:
namespaceprefix_ = self.postcode_nsprefix_ + ':' if (UseCapturedNS_ and self.postcode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%spostcode>%s</%spostcode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.postcode), input_name='postcode')), namespaceprefix_ , eol_))
if self.country is not None:
namespaceprefix_ = self.country_nsprefix_ + ':' if (UseCapturedNS_ and self.country_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%scountry>%s</%scountry>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.country), input_name='country')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'name':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'name')
value_ = self.gds_validate_string(value_, node, 'name')
self.name = value_
self.name_nsprefix_ = child_.prefix
# validate type stringMaxLength40
self.validate_stringMaxLength40(self.name)
elif nodeName_ == 'addressLine1':
value_ = | |
import codecs
from collections import namedtuple
from datetime import datetime, timedelta, timezone
import errno
import fcntl
import html
import os
import pty
import re
from select import select
import shlex
import signal
import tempfile
import traceback
import sublime # type: ignore
import sublime_plugin # type: ignore
this_package = os.path.dirname(__file__)
config_dir = os.path.join(this_package, 'config')
terminal_rows = 24
terminal_cols = 80
_initial_profile = r'''
# Read the standard profile, to give a familiar environment. The profile can
# detect that it is in GidTerm using the `TERM_PROGRAM` environment variable.
export TERM_PROGRAM=Sublime-GidTerm
if [ -r ~/.profile ]; then . ~/.profile; fi
# Replace the settings needed for GidTerm to work, notably the prompt formats.
PROMPT_DIRTRIM=
_gidterm_ps1 () {
status=$?
old_prompt_command=$1
PS1="\$ ";
eval "${old_prompt_command}";
PS1="\\[\\e[1p${status}@\\w\\e[~\\e[5p\\]${PS1}\\[\\e[~\\]";
tmpfile=${GIDTERM_CACHE}.$$;
{
shopt -p &&
declare -p | grep -v '^declare -[a-qs-z]*r' &&
declare -f &&
alias -p;
} > ${tmpfile} && mv ${tmpfile} ${GIDTERM_CACHE};
}
# The old `PROMPT_COMMAND` may be a function that, on reload, has not been
# declared when `_gidterm_ps1` is being declared. If `${GIDTERM_PC}` appears
# directly in the `_gidterm_ps1` declaration, the undefined function can cause
# an error. Instead we pass the old `PROMPT_COMMAND` as a parameter.
GIDTERM_PC=${PROMPT_COMMAND:-:}
PROMPT_COMMAND='_gidterm_ps1 "${GIDTERM_PC}"'
PS0='\e[0!p'
PS2='\e[2!p'
export TERM=ansi
# Set LINES and COLUMNS to a standard size for commands run by the shell to
# avoid tools creating wonky output, e.g. many tools display a completion
# percentage on the right side of the screen. man pages are formatted to fit
# the width COLUMNS. Prevent bash from resetting these variables.
#
shopt -u checkwinsize
export COLUMNS=%d
export LINES=%d
# Avoid paging by using `cat` as the default pager. This is generally nicer
# because you can scroll and search using Sublime Text. For situations where
# the pager is typically used to see the first entries, use command options
# like `git log -n 5` or pipe to `head`.
export PAGER=cat
# Don't add control commands to the history
export HISTIGNORE=${HISTIGNORE:+${HISTIGNORE}:}'*# [@gidterm@]'
# Specific configuration to make applications work well with GidTerm
GIDTERM_CONFIG="%s"
export RIPGREP_CONFIG_PATH=${GIDTERM_CONFIG}/ripgrep
''' % (terminal_cols, terminal_rows, config_dir)
_exit_status_info = {} # type: dict[str, str]
for name in dir(signal):
if name.startswith('SIG') and not name.startswith('SIG_'):
if name in ('SIGRTMIN', 'SIGRTMAX'):
continue
try:
signum = int(getattr(signal, name))
except Exception:
pass
_exit_status_info[str(signum + 128)] = '\U0001f5f2' + name
def warn(message):
# type: (str) -> None
print('GidTerm: [WARN] {}'.format(message))
def timedelta_seconds(seconds):
# type: (float) -> timedelta
s = int(round(seconds))
return timedelta(seconds=s)
TITLE_LENGTH = 32
PROMPT = '$'
ELLIPSIS = '\u2025'
LONG_ELLIPSIS = '\u2026'
def _get_package_location(winvar):
# type: (dict[str, str]) -> str
packages = winvar['packages']
this_package = os.path.dirname(__file__)
assert this_package.startswith(packages)
unwanted = os.path.dirname(packages)
# add one to remove pathname delimiter /
return this_package[len(unwanted) + 1:]
panel_cache = {} # type: dict[int, DisplayPanel|LivePanel]
def cache_panel(view, panel):
# type: (sublime.View, DisplayPanel|LivePanel) -> None
panel_cache[view.id()] = panel
def uncache_panel(view):
# type: (sublime.View) -> None
try:
del panel_cache[view.id()]
except KeyError:
warn('panel not found: {}'.format(panel_cache))
def get_panel(view):
# type: (sublime.View) -> DisplayPanel|LivePanel|None
panel = panel_cache.get(view.id())
if panel is None:
settings = view.settings()
if settings.get('is_gidterm_display'):
panel = DisplayPanel(view)
cache_panel(view, panel)
return panel
def get_display_panel(view):
# type: (sublime.View) -> DisplayPanel
panel = get_panel(view)
assert isinstance(panel, DisplayPanel)
return panel
def gidterm_decode_error(e):
# type: (...) -> tuple[str, int]
# If text is not Unicode, it is most likely Latin-1. Windows-1252 is a
# superset of Latin-1 and may be present in downloaded files.
# TODO: Use the LANG setting to select appropriate fallback encoding
b = e.object[e.start:e.end]
try:
s = b.decode('windows-1252')
except UnicodeDecodeError:
# If even that can't decode, fallback to using Unicode replacement char
s = b.decode('utf8', 'replace')
warn('{}: replacing {!r} with {!r}'.format(e.reason, b, s.encode('utf8')))
return s, e.end
codecs.register_error('gidterm', gidterm_decode_error)
class Terminal:
def __init__(self):
# type: () -> None
self.pid = None # type: int|None
self.fd = None # type: int|None
utf8_decoder_factory = codecs.getincrementaldecoder('utf8')
self.decoder = utf8_decoder_factory(errors='gidterm')
def __del__(self):
# type: () -> None
self.stop()
def start(self, workdir, init_file):
# type: (str, str) -> None
args = [
'bash', '--rcfile', init_file
]
env = os.environ.copy()
env.update({
# If COLUMNS is the default of 80, the shell will break long
# prompts over two lines, making them harder to search for. It also
# allows the shell to use UP control characters to edit lines
# during command history navigation, which is difficult to replicate
# correctly. Setting COLUMNS to a very large value avoids these
# behaviours.
#
# When displaying command completion lists, bash pages them based
# on the LINES variable. A large LINES value avoids paging.
#
# Note that we tell bash that we have a very large terminal, then,
# through the init script, tell applications started by bash that
# they have a more typical terminal size.
'COLUMNS': '32767',
'LINES': '32767',
'TERM': 'ansi',
})
self.pid, self.fd = pty.fork()
if self.pid == 0:
# child
try:
os.chdir(os.path.expanduser(workdir))
except Exception:
traceback.print_exc()
os.execvpe('bash', args, env)
else:
# Prevent this file descriptor ending up opened in any subsequent
# child processes, blocking the close(fd) in this process from
# terminating the shell.
state = fcntl.fcntl(self.fd, fcntl.F_GETFD)
fcntl.fcntl(self.fd, fcntl.F_SETFD, state | fcntl.FD_CLOEXEC)
def stop(self):
# type: () -> None
if self.fd is not None:
os.close(self.fd)
self.fd = None
if self.pid is not None:
pid, status = os.waitpid(self.pid, 0)
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
self.pid = None
def send(self, s):
# type: (str) -> bool
if self.fd is None:
return False
if s:
os.write(self.fd, s.encode('utf8'))
return True
def ready(self):
# type: () -> bool
fd = self.fd
if fd is None:
return True
rfds, wfds, xfds = select((fd,), (), (), 0)
return fd in rfds
def receive(self):
# type: () -> str
fd = self.fd
if fd is None:
return ''
try:
buf = os.read(fd, 2048)
except OSError as e:
if e.errno == errno.EIO:
return self.decoder.decode(b'', final=True)
raise
return self.decoder.decode(buf, final=not buf)
class TerminalOutput:
# Pattern to match control characters from the terminal that
# need to be handled specially.
_escape_pat = re.compile(
r'('
r'\x07|' # BEL
r'\x08+|' # BACKSPACE's
r'\r+|' # CR's
r'\n|' # NL
r'\x1b(?:' # Escapes:
r'[()*+]B|' # - codeset
r'\]0;.*?(?:\x07|\x1b\\)|' # - set title
r'\[[\x30-\x3f]*[\x20-\x2f]*[\x40-\x7e]' # - CSI
r'))'
)
# Pattern to match the prefix of above. If it occurs at the end of
# text, wait for more text to find escape.
_partial_pat = re.compile(
r'\x1b([()*+]|\](?:0;?)?.*|\[[\x30-\x3f]*[\x20-\x2f]*)?$'
)
NotReady = namedtuple('NotReady', ())
Text = namedtuple('Text', 'text')
Prompt1Starts = namedtuple('Prompt1Starts', ())
Prompt1Stops = namedtuple('Prompt1Stops', ())
Prompt2Starts = namedtuple('Prompt2Starts', ())
Prompt2Stops = namedtuple('Prompt2Stops', ())
OutputStarts = namedtuple('OutputStarts', ())
OutputStops = namedtuple('OutputStops', ('status', 'pwd'))
CursorUp = namedtuple('CursorUp', 'n')
CursorDown = namedtuple('CursorDown', 'n')
CursorLeft = namedtuple('CursorLeft', 'n')
CursorRight = namedtuple('CursorRight', 'n')
CursorMoveTo = namedtuple('CursorMoveTo', 'row col')
CursorReturn = namedtuple('CursorReturn', 'n')
LineFeed = namedtuple('LineFeed', ())
ClearToEndOfLine = namedtuple('ClearToEndOfLine', ())
ClearToStartOfLine = namedtuple('ClearToStartOfLine', ())
ClearLine = namedtuple('ClearLine', ())
Insert = namedtuple('Insert', 'n')
Delete = namedtuple('Delete', 'n')
SelectGraphicRendition = namedtuple('SelectGraphicRendition', ('foreground', 'background'))
def __init__(self, terminal):
# type: (Terminal) -> None
self.saved = ''
self.prompt_text = ''
self.in_prompt = None # type: str|None
self._csi_map = {
'@': self.handle_insert,
'A': self.handle_cursor_up,
'B': self.handle_cursor_down,
'C': self.handle_cursor_right,
'D': self.handle_cursor_left,
'H': self.handle_cursor_moveto,
'K': self.handle_clear_line,
'P': self.handle_delete,
'f': self.handle_cursor_moveto,
'm': self.handle_rendition,
}
self.iterator = self.loop(terminal)
def __iter__(self):
return self.iterator
def loop(self, terminal):
# (Terminal) -> Iterator[namedtuple]
while terminal:
if terminal.ready():
s = terminal.receive()
if s:
yield from self.handle_output(s)
else:
# terminal closed output channel
terminal = None
else:
yield TerminalOutput.NotReady()
def handle_output(self, text):
# (str) -> Iterator[namedtuple]
# Add any saved text from previous iteration, split text on control
# characters that are handled specially, then save any partial control
# characters at end of text.
text = self.saved + text
parts = self._escape_pat.split(text)
last = parts[-1]
match = self._partial_pat.search(last)
if match:
i = match.start()
parts[-1], self.saved = last[:i], last[i:]
else:
self.saved = ''
# Loop over alternating plain and control items
plain = False
for part in parts:
plain = not plain
if self.in_prompt is None:
if plain:
if part:
yield TerminalOutput.Text(part)
else:
if part[0] == '\x1b':
command = part[-1]
if command == 'p':
yield from self.handle_prompt(part)
else:
yield from self.handle_escape(part)
else:
yield from self.handle_control(part)
else:
if not plain and part == '\x1b[~':
yield from self.handle_prompt_end(part)
else:
self.prompt_text += part
def handle_prompt(self, part):
# (str) -> Iterator[namedtuple]
arg = part[2:-1]
if arg.endswith('!'):
# standalone prompt
in_prompt = arg[0]
| |
Constraint(expr= - m.b439 + m.b919 <= 0)
m.c3056 = Constraint(expr= - m.b440 + m.b920 <= 0)
m.c3057 = Constraint(expr= - m.b441 + m.b921 <= 0)
m.c3058 = Constraint(expr= - m.b442 + m.b922 <= 0)
m.c3059 = Constraint(expr= - m.b443 + m.b923 <= 0)
m.c3060 = Constraint(expr= - m.b444 + m.b924 <= 0)
m.c3061 = Constraint(expr= - m.b445 + m.b925 <= 0)
m.c3062 = Constraint(expr= - m.b446 + m.b926 <= 0)
m.c3063 = Constraint(expr= - m.b447 + m.b927 <= 0)
m.c3064 = Constraint(expr= - m.b448 + m.b928 <= 0)
m.c3065 = Constraint(expr= - m.b449 + m.b929 <= 0)
m.c3066 = Constraint(expr= - m.b450 + m.b930 <= 0)
m.c3067 = Constraint(expr= - m.b451 + m.b931 <= 0)
m.c3068 = Constraint(expr= - m.b452 + m.b932 <= 0)
m.c3069 = Constraint(expr= - m.b453 + m.b933 <= 0)
m.c3070 = Constraint(expr= - m.b454 + m.b934 <= 0)
m.c3071 = Constraint(expr= - m.b455 + m.b935 <= 0)
m.c3072 = Constraint(expr= - m.b456 + m.b936 <= 0)
m.c3073 = Constraint(expr= - m.b457 + m.b937 <= 0)
m.c3074 = Constraint(expr= - m.b434 + m.b914 <= 0)
m.c3075 = Constraint(expr= - m.b435 + m.b915 <= 0)
m.c3076 = Constraint(expr= - m.b436 + m.b916 <= 0)
m.c3077 = Constraint(expr= - m.b437 + m.b917 <= 0)
m.c3078 = Constraint(expr= - m.b438 + m.b918 <= 0)
m.c3079 = Constraint(expr= - m.b439 + m.b919 <= 0)
m.c3080 = Constraint(expr= - m.b440 + m.b920 <= 0)
m.c3081 = Constraint(expr= - m.b441 + m.b921 <= 0)
m.c3082 = Constraint(expr= - m.b442 + m.b922 <= 0)
m.c3083 = Constraint(expr= - m.b443 + m.b923 <= 0)
m.c3084 = Constraint(expr= - m.b444 + m.b924 <= 0)
m.c3085 = Constraint(expr= - m.b445 + m.b925 <= 0)
m.c3086 = Constraint(expr= - m.b446 + m.b926 <= 0)
m.c3087 = Constraint(expr= - m.b447 + m.b927 <= 0)
m.c3088 = Constraint(expr= - m.b448 + m.b928 <= 0)
m.c3089 = Constraint(expr= - m.b449 + m.b929 <= 0)
m.c3090 = Constraint(expr= - m.b450 + m.b930 <= 0)
m.c3091 = Constraint(expr= - m.b451 + m.b931 <= 0)
m.c3092 = Constraint(expr= - m.b452 + m.b932 <= 0)
m.c3093 = Constraint(expr= - m.b453 + m.b933 <= 0)
m.c3094 = Constraint(expr= - m.b454 + m.b934 <= 0)
m.c3095 = Constraint(expr= - m.b455 + m.b935 <= 0)
m.c3096 = Constraint(expr= - m.b456 + m.b936 <= 0)
m.c3097 = Constraint(expr= - m.b457 + m.b937 <= 0)
m.c3098 = Constraint(expr= - m.b434 + m.b914 <= 0)
m.c3099 = Constraint(expr= - m.b435 + m.b915 <= 0)
m.c3100 = Constraint(expr= - m.b436 + m.b916 <= 0)
m.c3101 = Constraint(expr= - m.b437 + m.b917 <= 0)
m.c3102 = Constraint(expr= - m.b438 + m.b918 <= 0)
m.c3103 = Constraint(expr= - m.b439 + m.b919 <= 0)
m.c3104 = Constraint(expr= - m.b440 + m.b920 <= 0)
m.c3105 = Constraint(expr= - m.b441 + m.b921 <= 0)
m.c3106 = Constraint(expr= - m.b442 + m.b922 <= 0)
m.c3107 = Constraint(expr= - m.b443 + m.b923 <= 0)
m.c3108 = Constraint(expr= - m.b444 + m.b924 <= 0)
m.c3109 = Constraint(expr= - m.b445 + m.b925 <= 0)
m.c3110 = Constraint(expr= - m.b446 + m.b926 <= 0)
m.c3111 = Constraint(expr= - m.b447 + m.b927 <= 0)
m.c3112 = Constraint(expr= - m.b448 + m.b928 <= 0)
m.c3113 = Constraint(expr= - m.b449 + m.b929 <= 0)
m.c3114 = Constraint(expr= - m.b450 + m.b930 <= 0)
m.c3115 = Constraint(expr= - m.b451 + m.b931 <= 0)
m.c3116 = Constraint(expr= - m.b452 + m.b932 <= 0)
m.c3117 = Constraint(expr= - m.b453 + m.b933 <= 0)
m.c3118 = Constraint(expr= - m.b454 + m.b934 <= 0)
m.c3119 = Constraint(expr= - m.b455 + m.b935 <= 0)
m.c3120 = Constraint(expr= - m.b456 + m.b936 <= 0)
m.c3121 = Constraint(expr= - m.b457 + m.b937 <= 0)
m.c3122 = Constraint(expr= - m.b434 + m.b914 <= 0)
m.c3123 = Constraint(expr= - m.b435 + m.b915 <= 0)
m.c3124 = Constraint(expr= - m.b436 + m.b916 <= 0)
m.c3125 = Constraint(expr= - m.b437 + m.b917 <= 0)
m.c3126 = Constraint(expr= - m.b438 + m.b918 <= 0)
m.c3127 = Constraint(expr= - m.b439 + m.b919 <= 0)
m.c3128 = Constraint(expr= - m.b440 + m.b920 <= 0)
m.c3129 = Constraint(expr= - m.b441 + m.b921 <= 0)
m.c3130 = Constraint(expr= - m.b442 + m.b922 <= 0)
m.c3131 = Constraint(expr= - m.b443 + m.b923 <= 0)
m.c3132 = Constraint(expr= - m.b444 + m.b924 <= 0)
m.c3133 = Constraint(expr= - m.b445 + m.b925 <= 0)
m.c3134 = Constraint(expr= - m.b446 + m.b926 <= 0)
m.c3135 = Constraint(expr= - m.b447 + m.b927 <= 0)
m.c3136 = Constraint(expr= - m.b448 + m.b928 <= 0)
m.c3137 = Constraint(expr= - m.b449 + m.b929 <= 0)
m.c3138 = Constraint(expr= - m.b450 + m.b930 <= 0)
m.c3139 = Constraint(expr= - m.b451 + m.b931 <= 0)
m.c3140 = Constraint(expr= - m.b452 + m.b932 <= 0)
m.c3141 = Constraint(expr= - m.b453 + m.b933 <= 0)
m.c3142 = Constraint(expr= - m.b454 + m.b934 <= 0)
m.c3143 = Constraint(expr= - m.b455 + m.b935 <= 0)
m.c3144 = Constraint(expr= - m.b456 + m.b936 <= 0)
m.c3145 = Constraint(expr= - m.b457 + m.b937 <= 0)
m.c3146 = Constraint(expr= - m.b434 + m.b914 <= 0)
m.c3147 = Constraint(expr= - m.b435 + m.b915 <= 0)
m.c3148 = Constraint(expr= - m.b436 + m.b916 <= 0)
m.c3149 = Constraint(expr= - m.b437 + m.b917 <= 0)
m.c3150 = Constraint(expr= - m.b438 + m.b918 <= 0)
m.c3151 = Constraint(expr= - m.b439 + m.b919 <= 0)
m.c3152 = Constraint(expr= - m.b440 + m.b920 <= 0)
m.c3153 = Constraint(expr= - m.b441 + m.b921 <= 0)
m.c3154 = Constraint(expr= - m.b442 + m.b922 <= 0)
m.c3155 = Constraint(expr= - m.b443 + m.b923 <= 0)
m.c3156 = Constraint(expr= - m.b444 + m.b924 <= 0)
m.c3157 = Constraint(expr= - m.b445 + m.b925 <= 0)
m.c3158 = Constraint(expr= - m.b446 + m.b926 <= 0)
m.c3159 = Constraint(expr= - m.b447 + m.b927 <= 0)
m.c3160 = Constraint(expr= - m.b448 + m.b928 <= 0)
m.c3161 = Constraint(expr= - m.b449 + m.b929 <= 0)
m.c3162 = Constraint(expr= - m.b450 + m.b930 <= 0)
m.c3163 = Constraint(expr= - m.b451 + m.b931 <= 0)
m.c3164 = Constraint(expr= - m.b452 + m.b932 <= 0)
m.c3165 = Constraint(expr= - m.b453 + m.b933 <= 0)
m.c3166 = Constraint(expr= - m.b454 + m.b934 <= 0)
m.c3167 = Constraint(expr= - m.b455 + m.b935 <= 0)
m.c3168 = Constraint(expr= - m.b456 + m.b936 <= 0)
m.c3169 = Constraint(expr= - m.b457 + m.b937 <= 0)
m.c3170 = Constraint(expr= - m.b434 + m.b914 <= 0)
m.c3171 = Constraint(expr= - m.b435 + m.b915 <= 0)
m.c3172 = Constraint(expr= - m.b436 + m.b916 <= 0)
m.c3173 = Constraint(expr= - m.b437 + m.b917 <= 0)
m.c3174 = Constraint(expr= - m.b438 + m.b918 <= 0)
m.c3175 = Constraint(expr= - m.b439 + m.b919 <= 0)
m.c3176 = Constraint(expr= - m.b440 + m.b920 <= 0)
m.c3177 = Constraint(expr= - m.b441 + m.b921 <= 0)
m.c3178 = Constraint(expr= - m.b442 + m.b922 <= 0)
m.c3179 = Constraint(expr= - m.b443 + m.b923 <= 0)
m.c3180 = Constraint(expr= - m.b444 + m.b924 <= 0)
m.c3181 = Constraint(expr= - m.b445 + m.b925 <= 0)
m.c3182 = Constraint(expr= - m.b446 + m.b926 <= 0)
m.c3183 = Constraint(expr= - m.b447 + m.b927 <= 0)
m.c3184 = Constraint(expr= - m.b448 + m.b928 <= 0)
m.c3185 = Constraint(expr= - m.b449 + m.b929 <= 0)
m.c3186 = Constraint(expr= - m.b450 + m.b930 <= 0)
m.c3187 = Constraint(expr= - m.b451 + m.b931 <= 0)
m.c3188 = Constraint(expr= - m.b452 + m.b932 <= 0)
m.c3189 = Constraint(expr= - m.b453 + m.b933 <= 0)
m.c3190 = Constraint(expr= - m.b454 + m.b934 <= 0)
m.c3191 = Constraint(expr= - m.b455 + m.b935 <= 0)
m.c3192 = Constraint(expr= - m.b456 + m.b936 <= 0)
m.c3193 = Constraint(expr= - m.b457 + m.b937 <= 0)
m.c3194 = Constraint(expr= - m.b434 + m.b914 <= 0)
m.c3195 = Constraint(expr= - m.b435 + m.b915 <= 0)
m.c3196 = Constraint(expr= - m.b436 + m.b916 <= 0)
m.c3197 = Constraint(expr= - m.b437 + m.b917 <= 0)
m.c3198 = Constraint(expr= - m.b438 + m.b918 <= 0)
m.c3199 = Constraint(expr= - m.b439 + m.b919 <= 0)
m.c3200 = Constraint(expr= - m.b440 + m.b920 <= 0)
m.c3201 = Constraint(expr= - m.b441 + m.b921 <= 0)
m.c3202 = Constraint(expr= - m.b442 + m.b922 <= 0)
m.c3203 = Constraint(expr= - m.b443 + m.b923 <= 0)
m.c3204 = Constraint(expr= - m.b444 + m.b924 <= 0)
m.c3205 = Constraint(expr= - m.b445 + m.b925 <= 0)
m.c3206 = Constraint(expr= - m.b446 + m.b926 <= 0)
m.c3207 = Constraint(expr= - m.b447 + m.b927 <= 0)
m.c3208 = Constraint(expr= - m.b448 + m.b928 <= 0)
m.c3209 = Constraint(expr= - m.b449 + m.b929 <= 0)
m.c3210 = Constraint(expr= - m.b450 + m.b930 <= 0)
m.c3211 = Constraint(expr= - m.b451 + m.b931 <= 0)
m.c3212 = Constraint(expr= - m.b452 + m.b932 <= 0)
m.c3213 = Constraint(expr= - m.b453 + m.b933 <= 0)
m.c3214 = Constraint(expr= - m.b454 + m.b934 <= 0)
m.c3215 = | |
= 0
addButton = QPushButton("+")
addButton.setMaximumSize(25, 25)
addButton.clicked.connect(self._on_add_dynamic_entry)
self.options_layout.addWidget(addButton)
self.count_label = QLabel('0')
self.options_layout.addWidget(self.count_label)
remButton = QPushButton("-")
remButton.setMaximumSize(25, 25)
remButton.clicked.connect(self._on_rem_dynamic_entry)
self.options_layout.addWidget(remButton)
def _on_add_dynamic_entry(self, checked=False, value=None):
self.setUpdatesEnabled(False)
try:
val = value
if val is None:
val = self._dynamic_value
if val is not None:
self._create_dynamic_frame(val)
finally:
self.setUpdatesEnabled(True)
def _create_dynamic_frame(self, value):
entry_frame = ArrayEntry(self._dynamic_items_count, self.type_msg)
self.param_widget.layout().addRow(entry_frame)
entry_frame._createFieldFromDict(value)
self._dynamic_items_count += 1
self.count_label.setText(utf8(self._dynamic_items_count))
def _on_rem_dynamic_entry(self):
if self._dynamic_items_count > 0:
self._dynamic_items_count -= 1
item = self.param_widget.layout().takeAt(self._dynamic_items_count)
self.param_widget.layout().removeItem(item)
try:
# remove the referenced parameter, too
for child in item.widget().children():
if isinstance(child, MyComboBox):
child.parameter_description.setWidget(None)
self.params.remove(child.parameter_description)
elif isinstance(child, MainBox):
child.removeAllFields()
self.param_widget.layout().removeWidget(child)
child.parameter_description.setWidget(None)
self.params.remove(child.parameter_description)
item.widget().setParent(None)
del item
except Exception:
print(traceback.format_exc(3))
self.count_label.setText(utf8(self._dynamic_items_count))
def createFieldFromValue(self, value, clear_origin_value=False):
self.setUpdatesEnabled(False)
try:
if self._is_dynamic:
self.addDynamicBox()
# Set value used to add dynamic array fields.
# On republish there is an array filled array. So only last enry will be used on add new entry.
if isinstance(value, list):
if value:
self._dynamic_value = value[-1]
else:
self._dynamic_value = value
self.set_values(value)
except Exception:
print(traceback.format_exc())
finally:
self.setUpdatesEnabled(True)
def value(self, with_tags=False, only_changed=False):
'''
Goes through the list and creates dictionary with values of each element.
Returns a list with dictionaries, e.g. [{name: value}, {name: value}].
If with_tags is True the result is a dictionary, e.g. {':type': type[], ':value': [{name: value}, {name: value}]}
:rtype: list or dict, if with_tags==True
'''
result_list = list()
for i in range(self.param_widget.layout().rowCount()):
item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole)
if item and isinstance(item.widget(), ArrayEntry):
value = item.widget().value(with_tags=with_tags, only_changed=only_changed)
result_list.append(value)
result = result_list
if with_tags:
result = {}
result[':type'] = self.type_msg
result[':value'] = result_list
return result
def set_values(self, values):
'''
Create a list of the elements and sets their values.
:param list values: The list of dictionaries with parameter values
'''
if isinstance(values, list):
count_entries = 0
# determine the count of existing elements
for i in range(self.param_widget.layout().rowCount()):
item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole)
if item and isinstance(item.widget(), ArrayEntry):
count_entries += 1
# create the list of the elements of the length of values
if count_entries < len(values):
for i in range(len(values) - count_entries):
# use array entry
self._on_add_dynamic_entry(value=values[i])
elif count_entries > len(values):
for i in range(count_entries - len(values)):
self._on_rem_dynamic_entry()
# set the values
for i in range(self.param_widget.layout().rowCount()):
item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole)
if item and isinstance(item.widget(), ArrayEntry):
item.widget().set_values(values[i])
class ScrollArea(QScrollArea):
'''
ScrollArea provides the maximal width of the internal widget.
'''
def viewportEvent(self, arg):
if self.widget() and self.viewport().size().width() != self.widget().maximumWidth():
self.widget().setMaximumWidth(self.viewport().size().width())
return QScrollArea.viewportEvent(self, arg)
class ParameterDialog(QDialog):
'''
This dialog creates an input mask for the given parameter and their types.
'''
def __init__(self, params=dict(), buttons=QDialogButtonBox.Cancel | QDialogButtonBox.Ok, sidebar_var='', parent=None, store_geometry=''):
'''
Creates an input dialog.
:param dict params: a (recursive) dictionary with parameter names and their values.
A value can be of primitive type (int, bool, string), a list or dictionary. If it is
of list type, the list should contains dictionaries with parameter and values.
If value is of dictionary type it is a recursive include or value with tags.
If it is a recursive include a group will be created. The key is the name of the group.
If it is a value with tags it should contains at least a ':value' tag.
All attributes begin with ':'. Other key attributes:
-':type': type, overwrites the autodetection
-':ro': read only
-':hint': description of the parameter
-':default': default value
-':min': minimum value
-':max': maximum value
-':alt': a list of alternative values
-'path': 'dir' or 'file'
:param str sidebar_var: the name of the key in first level of params. Creates a sidebar if
it is not empty. Cached and alternative values are used to fill the sidebar.
'''
QDialog.__init__(self, parent=parent)
self.setObjectName('ParameterDialog - %s' % utf8(params))
self.__current_path = nm.settings().current_dialog_path
self.horizontalLayout = QHBoxLayout(self)
self.horizontalLayout.setObjectName("horizontalLayout")
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout.setContentsMargins(3, 3, 3, 3)
# add filter row
self.filter_field = EnhancedLineEdit(self)
self.filter_field.setPlaceholderText("filter")
self.filter_field.textChanged.connect(self._on_filter_changed)
self.filter_visible = True
self.verticalLayout.addWidget(self.filter_field)
# create area for the parameter
self.scrollArea = scrollArea = ScrollArea(self)
scrollArea.setObjectName("scrollArea")
self.content = MainBox('/', 'string', False, self)
scrollArea.setFrameStyle(QFrame.NoFrame)
scrollArea.setWidget(self.content)
scrollArea.setWidgetResizable(True)
self.verticalLayout.addWidget(scrollArea)
# add info text field
self.info_field = QTextEdit(self)
palette = QPalette()
brush = QBrush(QColor(255, 254, 242))
brush.setStyle(Qt.SolidPattern)
palette.setBrush(QPalette.Active, QPalette.Base, brush)
brush = QBrush(QColor(255, 254, 242))
brush.setStyle(Qt.SolidPattern)
palette.setBrush(QPalette.Inactive, QPalette.Base, brush)
brush = QBrush(QColor(244, 244, 244))
brush.setStyle(Qt.SolidPattern)
palette.setBrush(QPalette.Disabled, QPalette.Base, brush)
self.info_field.setPalette(palette)
self.info_field.setFrameShadow(QFrame.Plain)
self.info_field.setReadOnly(True)
self.info_field.setTextInteractionFlags(Qt.LinksAccessibleByKeyboard | Qt.LinksAccessibleByMouse | Qt.TextBrowserInteraction | Qt.TextSelectableByKeyboard | Qt.TextSelectableByMouse)
self.info_field.setObjectName("dialog_info_field")
self.verticalLayout.addWidget(self.info_field)
self.info_field.setVisible(False)
# create buttons
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setObjectName("buttonBox")
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(buttons)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.verticalLayout.addWidget(self.buttonBox)
self.horizontalLayout.addLayout(self.verticalLayout)
# add side bar for checklist
values = nm.history().cachedParamValues('/%s' % sidebar_var)
self.sidebar_frame = QFrame(self)
self.sidebar_frame.setObjectName(sidebar_var)
sidebarframe_verticalLayout = QVBoxLayout(self.sidebar_frame)
sidebarframe_verticalLayout.setObjectName("sidebarframe_verticalLayout")
sidebarframe_verticalLayout.setContentsMargins(3, 3, 3, 3)
self._sidebar_selected = 0
if len(values) > 0 and sidebar_var in params:
self.horizontalLayout.addWidget(self.sidebar_frame)
try:
if ':value' in params[sidebar_var]:
self.sidebar_default_val = params[sidebar_var][':value']
else:
self.sidebar_default_val = params[sidebar_var][1]
# add default value to sidebar
if self.sidebar_default_val and self.sidebar_default_val not in values:
values.append(self.sidebar_default_val)
except Exception:
self.sidebar_default_val = ''
values.sort()
for v in values:
checkbox = QCheckBox(v)
checkbox.setObjectName(v)
checkbox.stateChanged.connect(self._on_sidebar_stateChanged)
self.sidebar_frame.layout().addWidget(checkbox)
self.sidebar_frame.layout().addItem(QSpacerItem(100, 20, QSizePolicy.Minimum, QSizePolicy.Expanding))
# set the input fields
if params:
try:
self.content.createFieldFromValue(params)
self.setInfoActive(False)
except Exception:
print(traceback.format_exc())
if self.filter_field.isVisible():
self.filter_field.setFocus()
# restore from configuration file
self._geometry_name = store_geometry
if store_geometry and nm.settings().store_geometry:
settings = nm.settings().qsettings(nm.settings().CFG_GUI_FILE)
self._history_selected_robot = settings.value("selected_robot", '')
settings.beginGroup(store_geometry)
self.resize(settings.value("size", QSize(600, 300)))
pos = settings.value("pos", QPoint(0, 0))
if pos.x() != 0 and pos.y() != 0:
self.move(pos)
settings.endGroup()
def __del__(self):
self.content.removeAllFields()
def _on_sidebar_stateChanged(self, state):
if state == Qt.Checked:
self._sidebar_selected += 1
elif state == Qt.Unchecked:
self._sidebar_selected -= 1
if self._sidebar_selected in [0, 1]:
try:
field = self.content.getField(self.sidebar_frame.objectName())
if field is not None and field.currentText() == self.sidebar_default_val:
field.setEnabled(True if self._sidebar_selected == 0 else False)
except Exception:
pass
def showLoadSaveButtons(self):
self.load_button = QPushButton()
self.load_button.setIcon(nm.settings().icon('load.png'))
self.load_button.clicked.connect(self._load_parameter)
self.load_button.setToolTip('Load parameters from YAML file')
self.load_button.setFlat(True)
self.buttonBox.addButton(self.load_button, QDialogButtonBox.ActionRole)
self.save_button = QPushButton()
self.save_button.clicked.connect(self._save_parameter)
self.save_button.setIcon(nm.settings().icon('save.png'))
self.save_button.setToolTip('Save parameters to YAML file')
self.save_button.setFlat(True)
self.buttonBox.addButton(self.save_button, QDialogButtonBox.ActionRole)
def _on_filter_changed(self):
self.content.filter(self.filter_field.text().lower())
def setFilterVisible(self, val):
'''
Shows or hides the filter row.
'''
self.filter_visible = val
self.filter_field.setVisible(val & self.scrollArea.isHidden())
def add_warning(self, message):
label = QLabel(self)
label.setWordWrap(True)
label.setText(''.join(["<font color='red'>Warning!\n", message, "</font>"]))
self.verticalLayout.insertWidget(1, label)
def setText(self, text):
'''
Adds a label to the dialog's layout and shows the given text.
:param str text: the text to add to the dialog
'''
self.info_field.setText(text)
self.setInfoActive(True)
def setInfoActive(self, val):
'''
Activates or deactivates the info field of this dialog. If info field is
activated, the filter frame and the input field are deactivated.
:param bool val: state
'''
if val and self.info_field.isHidden():
self.filter_field.setVisible(False & self.filter_visible)
self.scrollArea.setVisible(False)
self.info_field.setVisible(True)
elif not val and self.scrollArea.isHidden():
self.filter_field.setVisible(True & self.filter_visible)
self.scrollArea.setVisible(True)
self.info_field.setVisible(False)
if self.filter_field.isVisible():
self.filter_field.setFocus()
def setFocusField(self, field_label):
field = self.content.getField(field_label, recursive=True)
if field is not None:
field.setFocus()
def getKeywords(self, only_changed=False, with_tags=False):
'''
:param bool only_changed: returns changed parameter only (Defaul: False)
:param bool with_tags: returns parameter attributes (e.g. :ro, :hint,...) (Defaul: False)
:returns a directory with parameter and value for entered fields.
:rtype: dict
'''
# get the results of sidebar
sidebar_list = []
sidebar_name = self.sidebar_frame.objectName()
for j in range(self.sidebar_frame.layout().count() - 1):
w = self.sidebar_frame.layout().itemAt(j).widget()
if isinstance(w, QCheckBox):
if w.checkState() == Qt.Checked:
sidebar_list.append(w.objectName())
result_value = self.content.value(with_tags, only_changed)
# add the sidebar results
if sidebar_name in result_value:
# skip the default value, if elements are selected in the side_bar
sidebar_value = ''
if with_tags:
sidebar_value = result_value[sidebar_name][':value']
else:
sidebar_value = result_value[sidebar_name]
if len(sidebar_list) == 0 or self.sidebar_default_val != sidebar_value:
sidebar_list.append(sidebar_value)
if with_tags:
result_value[sidebar_name][':value'] = [v for v in set(sidebar_list)]
else:
result_value[sidebar_name] = [v for v in set(sidebar_list)]
return result_value
def keywords2params(self, keywords):
'''
Resolves the dictionary values to ROS parameter names.
:param keywords: the result of the getKeywords
:return: dictionary of (ROS parameter name : value)
'''
result = dict()
for param, value in keywords.items():
if isinstance(value, dict):
r = self.keywords2params(value)
for p, v in r.items():
result[roslib.names.ns_join(param, p)] = v
else:
result[param] = value
return result
@classmethod
def remove_attributes(cls, keywords):
# it it is a value dictionary, we need only :value attribute
if ':value' in keywords:
return keywords[':value']
# remove all attributes which starts with ':'
result = {}
for key, val in keywords.items():
clean_val = val
if isinstance(val, dict):
clean_val = cls.remove_attributes(val)
if not key.startswith(':'):
result[key] = | |
"""Initial processing of lib2to3's AST into an easier form.
The AST that lib2to3 produces is messy to process, so we convert it
into an easier format, defined in ast_cooked. While doing this, we
also mark all bindings (Python requires two passes to resolve local
variables, so this does the first pass).
By default, lib2to3 collapses parent-child nodes where there's a
single child; this is convenient for people writing 2to3 filters but
makes things more complicated for the kind of detailed AST analysis in
this module. Therefore, we define our own _convert function.
Lib2to3 supports both Python2 and Python3 syntax.
The basic usage is:
src_file = ast_node.make_file(path='...')
parse_tree = ast_raw.parse(src_content, python_version)
cooked_nodes = ast_raw.cvt_parse_tree(parse_tree, python_version, src_file)
The processing is driven off _DISPATCH[node.type]. Each function is
named cvt_XXX, where XXX is usually derived from the name of the
corresponding grammar rule.
"""
# TODO: change to using asttokens -- see the "#-#" comments
# pylint: disable=too-many-lines
# pylint: disable=too-many-public-methods
import collections
from dataclasses import dataclass
import dataclasses
import enum
import hashlib
import logging
import re
from lib2to3 import pygram
from lib2to3 import pytree
from lib2to3.pygram import python_symbols as syms
from lib2to3.pgen2 import driver, grammar as pgen2_grammar, parse as pgen2_parse, token, tokenize
from typing import Any, Callable, Dict, FrozenSet, List, Optional, Sequence, Tuple, Union
import typing
# The following requires pip3 install mypy_extensions
# and possibly symlinking into /usr/local/lib/python3.6/dist-packages
# TODO: can mypy run with python3.7?
from mypy_extensions import Arg
from . import ast_node, ast_cooked, fakesys, pod, typing_debug
from .typing_debug import cast as xcast
def cvt_parse_tree(parse_tree: Union['Node', 'Leaf'], python_version: int,
src_file: ast_node.File) -> ast_cooked.Base:
"""Convert a lib2to3.pytree to ast_cooked.Base."""
return cvt(parse_tree, new_ctx(python_version, src_file))
# pylint: disable=too-few-public-methods
# pylint: disable=no-else-return
class NameCtx(enum.Enum):
"""Context for resolving names. See Ctx.name_ctx.
Values:
BINDING: Appears on the left-hand side of an assignment in a
position that would result in a binding (e.g., `x = 1` would
be a binding for `x`, `foo.f = 2` would be a binding for `f`
but not for `foo`, and `bar[i] = 3` would not be a binding
for either `bar` or `i`).
REF: Appears on the right-hand side of an assignment or in a
position on the left-hand side that is not binding.
BARE: Appears in an `import` statement in a position where it
does not get a fully qualified name. For example, in `from
foo.bar import qqsv as zork`, `foo`, `bar`, `qqsv` are `BARE`
and `zork` is `BINDING` (and gets a FQN).
"""
BINDING = 'BINDING' # TODO: enum.auto()
REF = 'REF' # TODO: enum.auto()
BARE = 'BARE' # TODO: enum.auto()
@dataclass(frozen=True)
class Ctx(pod.PlainOldData):
"""Context for traversing the lib2to3 AST.
Note that scope_bindings, global_vars, nonlocal_vars are dicts, so
they can be updated and therefore Ctx behaves somewhat like a
mutable object (name_ctx should not be updated; instead a new Ctx
object should be created using the replace method). For those who
like functional programming, this is cheating; but Python doesn't
make it easy to have "accumulators" in the Prolog DCG or Haskell
sense.
Attributes:
name_ctx: Used to mark ast_cooked.NameNode items as being in a
binding context (left-hand-side), ref context or raw. See
NameCtx for details of these. It is the responsibility of
the parent of a node to set this appropriately -- e.g.,
for an assignment statement, the parent would set name_ctx
= NameCtx.BINDING for the node(s) to the left of the "="
and would leave it as name_ctx = NameCtx.REF for node(s)
on the right. For something like a dotted name on the
left, the name_ctx would be changed from NameCtx.BINDING
to NameCtx.REF for all except the last dotted name. The
normal value for name_ctx is NameCtx.REF; it only becomes
NameCtx.BINDING on the left-hand side of assignments, for
parameters in a function definition, and a few other
similar situations (e.g., a with_item or an
except_clause). Within import statements, name_ctx can be
NameCtx.BARE.
scope_bindings: A set of names that are bindings within this
"scope". This attribute is set to empty when entering a
new scope. To ensure consistent results, an OrderedDict
is used, with the value ignored.
global_vars: A set of names that appear in "global" statements
within the current scope.
nonlocal_vars: A set of names that appear in "nonlocal"
statements within the current scope.
python_version: 3 # TODO: make this into a triple - see fakesys.FAKE_SYS
src_file: source and offset information
"""
name_ctx: NameCtx
scope_bindings: Dict[str, None] # Set[str] (OrderedSet[str])
global_vars: Dict[str, None]
nonlocal_vars: Dict[str, None]
python_version: int # TODO: make this into a triple - see fakesys.FAKE_SYS
src_file: ast_node.File
__slots__ = [
'name_ctx', 'scope_bindings', 'global_vars', 'nonlocal_vars', 'python_version',
'src_file']
def __post_init__(self) -> None:
# scope_bindings should be collections.OrderedDicts if you want
# deterministic results.
assert self.python_version in (3, ) # TODO: make this a triple: see fakesys.FAKE_SYS
def to_BINDING(self) -> 'Ctx': # pylint: disable=invalid-name
return dataclasses.replace(self, name_ctx=NameCtx.BINDING)
def to_BARE(self) -> 'Ctx': # pylint: disable=invalid-name
return dataclasses.replace(self, name_ctx=NameCtx.BARE)
def to_REF(self) -> 'Ctx': # pylint: disable=invalid-name
return dataclasses.replace(self, name_ctx=NameCtx.REF)
@property
def is_BINDING(self) -> bool: # pylint: disable=invalid-name
return self.name_ctx is NameCtx.BINDING
@property
def is_REF(self) -> bool: # pylint: disable=invalid-name
return self.name_ctx is NameCtx.REF
def new_ctx(python_version: int, src_file: ast_node.File) -> Ctx:
"""Wrapper that creates a new Ctx object."""
return Ctx(
name_ctx=NameCtx.REF,
scope_bindings=collections.OrderedDict(),
global_vars=collections.OrderedDict(),
nonlocal_vars=collections.OrderedDict(),
python_version=python_version,
src_file=src_file,
)
def new_ctx_from(ctx: Ctx) -> Ctx:
"""Wrapper that creates a Ctx object for a new scope.
Keeps the python_version and src_file; all other fields are
set to their initial value.
"""
return new_ctx(ctx.python_version, ctx.src_file)
def cvt_annassign(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""annassign: ':' test ['=' test]"""
#-# AnnAssign(expr target, expr annotation, expr? value, int simple)
# TODO: test case
assert ctx.is_REF, [node]
if len(node.children) == 2:
expr = ast_cooked.OMITTED_NODE
else:
expr = cvt(node.children[3], ctx)
return ast_cooked.BareAnnAssignNode(
left_annotation=cvt(node.children[1], ctx),
expr=expr,
)
def cvt_arglist(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""arglist: argument (',' argument)* [',']"""
assert ctx.is_REF, [node]
return ast_cooked.BareArgListNode(args=cvt_children_skip_commas(node, ctx))
def cvt_argument(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""
argument: ( test [comp_for] |
text ':=' test |
test '=' test |
'**' test |
'*' test )
"""
#-# Assign(expr* targets, expr value)
assert ctx.is_REF, [node]
if node.children[0].type == SYMS_TEST:
if len(node.children) == 1:
return cvt(node.children[0], ctx)
if node.children[1].type == token.COLONEQUAL:
return ast_cooked.AssignMultipleExprStmt(
left_list=[cvt(node.children[0], ctx)],
expr=cvt(node.children[2], ctx))
if node.children[1].type == token.EQUAL:
# According to the grammar, the name is a `test`, which
# should always simplify to a single name, so use cvt() to
# get that name, and then extract the astn:
name_cvt = cvt(node.children[0], ctx)
if isinstance(name_cvt, ast_cooked.NameRefNode):
return ast_cooked.ArgumentNode(name=name_cvt.name, arg=cvt(node.children[2], ctx))
# logger 'pykythe' is defined in __main__
logging.getLogger('pykythe').warning(
'argument not in form name=expr: %r', node) # pragma: no cover
return cvt(node.children[2], ctx) # pragma: no cover
assert node.children[1].type == syms.comp_for, [node] # pylint: disable=no-member
assert len(node.children) == 2, [node]
# the arg is a generator
return ast_cooked.DictGenListSetMakerCompForNode(
value_expr=cvt(node.children[0], ctx),
comp_for=xcast(ast_cooked.CompForNode, cvt(node.children[1], ctx)),
)
if node.children[0].type == token.DOUBLESTAR:
return cvt(node.children[1], ctx) # Ignore the `**`
assert node.children[0].type in (SYMS_STAR_EXPR, token.STAR), dict(ch0=node.children[0],
node=node)
# TODO: need a syntax test of "'*' test" (star_expr)
return cvt(node.children[1], ctx) # Ignores the `*`
def cvt_assert_stmt(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""assert_stmt: 'assert' test [',' test]"""
#-# Assert(expr test, expr? msg)
assert ctx.is_REF, [node]
test = cvt(node.children[1], ctx)
if len(node.children) == 2:
display = ast_cooked.OMITTED_NODE
else:
display = cvt(node.children[3], ctx)
return ast_cooked.AssertStmt(items=[test, display])
def cvt_async_funcdef(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""async_funcdef: ASYNC funcdef"""
# TODO: test case
assert ctx.is_REF, [node]
return cvt(node.children[1], ctx) # Ignore the `async`
def cvt_async_stmt(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""async_stmt: ASYNC (funcdef | with_stmt | for_stmt)"""
# TODO: test case
assert ctx.is_REF, [node]
return cvt(node.children[1], ctx) # Ignore the `async`
def cvt_atom(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""
atom: ('(' [yield_expr|testlist_gexp] ')' |
'[' [listmaker] ']' |
'{' [dictsetmaker] '}' |
'`' testlist1 '`' |
NAME | NUMBER | STRING+ | '.' '.' '.')
"""
# Can appear on left of assignment
ch0 = node.children[0]
if ch0.type in _EMPTY_PAIR:
if len(node.children) == 3:
result = cvt(node.children[1], ctx)
else:
assert len(node.children) == 2, [node]
if ch0.type == token.LSQB:
result = ast_cooked.ListMakerNode(items=[], binds=ctx.is_BINDING)
elif ch0.type == token.LBRACE:
# TODO: test case to ensure grammar doesn't allow
# dictsetmaker on l.h.s. (probaly it does, so
# the following assert should | |
# -*- coding: utf-8 -*-
"""The config functions."""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import atexit
from functools import partial
import json
import os
import os.path as op
import platform
import shutil
import sys
import tempfile
import re
import numpy as np
from .check import _validate_type, _check_pyqt5_version
from ._logging import warn, logger
_temp_home_dir = None
def set_cache_dir(cache_dir):
"""Set the directory to be used for temporary file storage.
This directory is used by joblib to store memmapped arrays,
which reduces memory requirements and speeds up parallel
computation.
Parameters
----------
cache_dir : str or None
Directory to use for temporary file storage. None disables
temporary file storage.
"""
if cache_dir is not None and not op.exists(cache_dir):
raise IOError('Directory %s does not exist' % cache_dir)
set_config('MNE_CACHE_DIR', cache_dir, set_env=False)
def set_memmap_min_size(memmap_min_size):
"""Set the minimum size for memmaping of arrays for parallel processing.
Parameters
----------
memmap_min_size : str or None
Threshold on the minimum size of arrays that triggers automated memory
mapping for parallel processing, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
"""
if memmap_min_size is not None:
if not isinstance(memmap_min_size, str):
raise ValueError('\'memmap_min_size\' has to be a string.')
if memmap_min_size[-1] not in ['K', 'M', 'G']:
raise ValueError('The size has to be given in kilo-, mega-, or '
'gigabytes, e.g., 100K, 500M, 1G.')
set_config('MNE_MEMMAP_MIN_SIZE', memmap_min_size, set_env=False)
# List the known configuration values
known_config_types = (
'MNE_3D_OPTION_ANTIALIAS',
'MNE_BROWSE_RAW_SIZE',
'MNE_CACHE_DIR',
'MNE_COREG_ADVANCED_RENDERING',
'MNE_COREG_COPY_ANNOT',
'MNE_COREG_GUESS_MRI_SUBJECT',
'MNE_COREG_HEAD_HIGH_RES',
'MNE_COREG_HEAD_OPACITY',
'MNE_COREG_INTERACTION',
'MNE_COREG_MARK_INSIDE',
'MNE_COREG_PREPARE_BEM',
'MNE_COREG_PROJECT_EEG',
'MNE_COREG_ORIENT_TO_SURFACE',
'MNE_COREG_SCALE_LABELS',
'MNE_COREG_SCALE_BY_DISTANCE',
'MNE_COREG_SCENE_SCALE',
'MNE_COREG_WINDOW_HEIGHT',
'MNE_COREG_WINDOW_WIDTH',
'MNE_COREG_SUBJECTS_DIR',
'MNE_CUDA_DEVICE',
'MNE_CUDA_IGNORE_PRECISION',
'MNE_DATA',
'MNE_DATASETS_BRAINSTORM_PATH',
'MNE_DATASETS_EEGBCI_PATH',
'MNE_DATASETS_HF_SEF_PATH',
'MNE_DATASETS_MEGSIM_PATH',
'MNE_DATASETS_MISC_PATH',
'MNE_DATASETS_MTRF_PATH',
'MNE_DATASETS_SAMPLE_PATH',
'MNE_DATASETS_SOMATO_PATH',
'MNE_DATASETS_MULTIMODAL_PATH',
'MNE_DATASETS_FNIRS_MOTOR_PATH',
'MNE_DATASETS_OPM_PATH',
'MNE_DATASETS_SPM_FACE_DATASETS_TESTS',
'MNE_DATASETS_SPM_FACE_PATH',
'MNE_DATASETS_TESTING_PATH',
'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH',
'MNE_DATASETS_KILOWORD_PATH',
'MNE_DATASETS_FIELDTRIP_CMC_PATH',
'MNE_DATASETS_PHANTOM_4DBTI_PATH',
'MNE_DATASETS_LIMO_PATH',
'MNE_DATASETS_REFMEG_NOISE_PATH',
'MNE_FORCE_SERIAL',
'MNE_KIT2FIFF_STIM_CHANNELS',
'MNE_KIT2FIFF_STIM_CHANNEL_CODING',
'MNE_KIT2FIFF_STIM_CHANNEL_SLOPE',
'MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD',
'MNE_LOGGING_LEVEL',
'MNE_MEMMAP_MIN_SIZE',
'MNE_SKIP_FTP_TESTS',
'MNE_SKIP_NETWORK_TESTS',
'MNE_SKIP_TESTING_DATASET_TESTS',
'MNE_STIM_CHANNEL',
'MNE_TQDM',
'MNE_USE_CUDA',
'MNE_USE_NUMBA',
'SUBJECTS_DIR',
)
# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
known_config_wildcards = (
'MNE_STIM_CHANNEL',
)
def _load_config(config_path, raise_error=False):
"""Safely load a config file."""
with open(config_path, 'r') as fid:
try:
config = json.load(fid)
except ValueError:
# No JSON object could be decoded --> corrupt file?
msg = ('The MNE-Python config file (%s) is not a valid JSON '
'file and might be corrupted' % config_path)
if raise_error:
raise RuntimeError(msg)
warn(msg)
config = dict()
return config
def get_config_path(home_dir=None):
r"""Get path to standard mne-python config file.
Parameters
----------
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
config_path : str
The path to the mne-python configuration file. On windows, this
will be '%USERPROFILE%\.mne\mne-python.json'. On every other
system, this will be ~/.mne/mne-python.json.
"""
val = op.join(_get_extra_data_path(home_dir=home_dir),
'mne-python.json')
return val
def get_config(key=None, default=None, raise_error=False, home_dir=None,
use_env=True):
"""Read MNE-Python preferences from environment or config file.
Parameters
----------
key : None | str
The preference key to look for. The os environment is searched first,
then the mne-python config file is parsed.
If None, all the config parameters present in environment variables or
the path are returned. If key is an empty string, a list of all valid
keys (but not values) is returned.
default : str | None
Value to return if the key is not found.
raise_error : bool
If True, raise an error if the key is not found (instead of returning
default).
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
use_env : bool
If True, consider env vars, if available.
If False, only use MNE-Python configuration file values.
.. versionadded:: 0.18
Returns
-------
value : dict | str | None
The preference key value.
See Also
--------
set_config
"""
_validate_type(key, (str, type(None)), "key", 'string or None')
if key == '':
return known_config_types
# first, check to see if key is in env
if use_env and key is not None and key in os.environ:
return os.environ[key]
# second, look for it in mne-python config file
config_path = get_config_path(home_dir=home_dir)
if not op.isfile(config_path):
config = {}
else:
config = _load_config(config_path)
if key is None:
# update config with environment variables
if use_env:
env_keys = (set(config).union(known_config_types).
intersection(os.environ))
config.update({key: os.environ[key] for key in env_keys})
return config
elif raise_error is True and key not in config:
loc_env = 'the environment or in the ' if use_env else ''
meth_env = ('either os.environ["%s"] = VALUE for a temporary '
'solution, or ' % key) if use_env else ''
extra_env = (' You can also set the environment variable before '
'running python.' if use_env else '')
meth_file = ('mne.utils.set_config("%s", VALUE, set_env=True) '
'for a permanent one' % key)
raise KeyError('Key "%s" not found in %s'
'the mne-python config file (%s). '
'Try %s%s.%s'
% (key, loc_env, config_path, meth_env, meth_file,
extra_env))
else:
return config.get(key, default)
def set_config(key, value, home_dir=None, set_env=True):
"""Set a MNE-Python preference key in the config file and environment.
Parameters
----------
key : str
The preference key to set.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
set_env : bool
If True (default), update :data:`os.environ` in addition to
updating the MNE-Python config file.
See Also
--------
get_config
"""
_validate_type(key, 'str', "key")
# While JSON allow non-string types, we allow users to override config
# settings using env, which are strings, so we enforce that here
_validate_type(value, (str, 'path-like', type(None)), 'value')
if value is not None:
value = str(value)
if key not in known_config_types and not \
any(k in key for k in known_config_wildcards):
warn('Setting non-standard config type: "%s"' % key)
# Read all previous values
config_path = get_config_path(home_dir=home_dir)
if op.isfile(config_path):
config = _load_config(config_path, raise_error=True)
else:
config = dict()
logger.info('Attempting to create new mne-python configuration '
'file:\n%s' % config_path)
if value is None:
config.pop(key, None)
if set_env and key in os.environ:
del os.environ[key]
else:
config[key] = value
if set_env:
os.environ[key] = value
# Write all values. This may fail if the default directory is not
# writeable.
directory = op.dirname(config_path)
if not op.isdir(directory):
os.mkdir(directory)
with open(config_path, 'w') as fid:
json.dump(config, fid, sort_keys=True, indent=0)
def _get_extra_data_path(home_dir=None):
"""Get path to extra data (config, tables, etc.)."""
global _temp_home_dir
if home_dir is None:
home_dir = os.environ.get('_MNE_FAKE_HOME_DIR')
if home_dir is None:
# this has been checked on OSX64, Linux64, and Win32
if 'nt' == os.name.lower():
if op.isdir(op.join(os.getenv('APPDATA'), '.mne')):
home_dir = os.getenv('APPDATA')
else:
home_dir = os.getenv('USERPROFILE')
else:
# This is a more robust way of getting the user's home folder on
# Linux platforms (not sure about OSX, Unix or BSD) than checking
# the HOME environment variable. If the user is running some sort
# of script that isn't launched via the command line (e.g. a script
# launched via Upstart) then the HOME environment variable will
# not be set.
if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':
if _temp_home_dir is None:
_temp_home_dir = tempfile.mkdtemp()
atexit.register(partial(shutil.rmtree, _temp_home_dir,
ignore_errors=True))
home_dir = _temp_home_dir
else:
home_dir = os.path.expanduser('~')
if home_dir is None:
raise ValueError('mne-python config file path could '
'not be determined, please report this '
'error to mne-python developers')
return op.join(home_dir, '.mne')
def get_subjects_dir(subjects_dir=None, raise_error=False):
"""Safely use subjects_dir input to return SUBJECTS_DIR.
Parameters
----------
subjects_dir : str | None
If a value is provided, return subjects_dir. Otherwise, look for
SUBJECTS_DIR config and return the result.
raise_error : bool
If True, raise a KeyError if no value for SUBJECTS_DIR can be found
(instead of returning None).
Returns
-------
value : str | None
The SUBJECTS_DIR value.
"""
if subjects_dir is None:
subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)
return subjects_dir
def _get_stim_channel(stim_channel, info, raise_error=True):
"""Determine the appropriate stim_channel.
First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.
are read. If these are not found, it will fall back to 'STI 014' if
present, then fall back to the first channel of type 'stim', if present.
Parameters
----------
stim_channel : str | list of str | None
The stim channel selected by the user.
info : instance of Info
An information structure containing information about the channels.
Returns
-------
stim_channel : str | list of str
The name of the | |
= np.logical_and(these_t, these_can_adjust)
cNrmNow[these], MPCnow[these] = self.solution[t].cFunc[0][0].eval_with_derivative(self.mNrmNow[these])
if any(these_cant_adjust):
for portfolio_index, portfolio_value in enumerate(self.ShareNow):
these_portfolio = np.equal(portfolio_value, self.RiskySharePrev)
these = np.logical_and(these_t, these_portfolio)
cNrmNow[these], MPCnow[these] = self.solution[t].cFunc[1][portfolio_index].eval_with_derivative(self.mNrmNow[these])
self.cNrmNow = cNrmNow
self.MPCnow = MPCnow
return None
def getRisky(self):
return self.drawRiskyFunc()
class ConsIndShockPortfolioSolver(ConsIndShockSolver):
'''
A class for solving a one period consumption-saving problem with portfolio choice.
An instance of this class is created by the function solveConsPortfolio in each period.
'''
def __init__(self, solution_next, IncomeDstn, LivPrb, DiscFac, CRRA, Rfree,
PermGroFac, BoroCnstArt, aXtraGrid, vFuncBool, CubicBool,
approxRiskyDstn, RiskyCount, RiskyShareCount, RiskyShareLimitFunc,
AdjustPrb, PortfolioGrid, AdjustCount, PortfolioDomain):
ConsIndShockSolver.__init__(self, solution_next, IncomeDstn, LivPrb, DiscFac, CRRA, Rfree,
PermGroFac, BoroCnstArt, aXtraGrid, vFuncBool, CubicBool)
self.PortfolioDomain = PortfolioDomain
if isinstance(self.PortfolioDomain, DiscreteDomain):
self.DiscreteCase = True
else:
self.DiscreteCase = False
self.AdjustPrb = AdjustPrb
self.PortfolioGrid = PortfolioGrid
self.AdjustCount = AdjustCount
self.ShareNowCount = [1]
if self.DiscreteCase:
self.ShareNow = self.PortfolioDomain.getPoints()
self.ShareNowCount.append(len(self.PortfolioDomain.getPoints()))
# Store the Risky asset shock distribution
self.RiskyDstn = approxRiskyDstn(RiskyCount)
self.RiskyShareLimit = RiskyShareLimitFunc(self.RiskyDstn)
# Store the number of grid points used approximate the FOC in the port-
# folio sub-problem.
self.RiskyShareCount = RiskyShareCount
self.vFuncsNext = solution_next.vFunc
self.vPfuncsNext = solution_next.vPfunc
self.updateShockDstn()
self.makeRshareGrid()
def makeEndOfPrdvFunc(self, AdjustIndex, ShareIndex):
'''
Construct the end-of-period value function for this period, storing it
as an attribute of self for use by other methods.
Parameters
----------
none
Returns
-------
none
'''
if not self.DiscreteCase:
raise Exception("vFuncBool == True is not supported for continuous portfolio choice.")
# We will need to index vFuncNext wrt the state next period given choices
# today.
VLvlNext = (self.PermShkVals_temp**(1.0-self.CRRA)*\
self.PermGroFac**(1.0-self.CRRA))*self.vFuncsNext[AdjustIndex][ShareIndex](self.mNrmNext[AdjustIndex][ShareIndex])
EndOfPrdv = self.DiscFacEff*np.sum(VLvlNext*self.ShkPrbs_temp,axis=0)
EndOfPrdvNvrs = self.uinv(EndOfPrdv) # value transformed through inverse utility
# Manually input (0,0) pair
EndOfPrdvNvrs = np.insert(EndOfPrdvNvrs,0,0.0)
aNrm_temp = np.insert(self.aNrmNow,0,0.0)
EndOfPrdvNvrsFunc = LinearInterp(aNrm_temp,EndOfPrdvNvrs)
self.EndOfPrdvFunc = ValueFunc(EndOfPrdvNvrsFunc,self.CRRA)
def makevFunc(self,solution, AdjustIndex, ShareIndex):
'''
Creates the value function for this period, defined over market resources m.
self must have the attribute EndOfPrdvFunc in order to execute.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
vFuncNow : ValueFunc
A representation of the value function for this period, defined over
normalized market resources m: v = vFuncNow(m).
'''
# Compute expected value and marginal value on a grid of market resources
mNrm_temp = self.mNrmMinNow + self.aXtraGrid
cNrmNow = solution.cFunc[AdjustIndex][ShareIndex](mNrm_temp)
aNrmNow = mNrm_temp - cNrmNow
vNrmNow = self.u(cNrmNow) + self.EndOfPrdvFunc(aNrmNow)
# Construct the beginning-of-period value function
vNvrs = self.uinv(vNrmNow) # value transformed through inverse utility
# Manually insert (0,0) pair.
mNrm_temp = np.insert(mNrm_temp,0,0.0) # np.insert(mNrm_temp,0,self.mNrmMinNow)
vNvrs = np.insert(vNvrs,0,0.0)
vNvrsFuncNow = LinearInterp(mNrm_temp,vNvrs)
vFuncNow = ValueFunc(vNvrsFuncNow,self.CRRA)
return vFuncNow
def addvFunc(self,solution):
'''
Creates the value function for this period and adds it to the solution.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, likely including the
consumption function, marginal value function, etc.
Returns
-------
solution : ConsumerSolution
The single period solution passed as an input, but now with the
value function (defined over market resources m) as an attribute.
'''
if not self.DiscreteCase:
raise Exception('You\'re not supposed to be here. Continuous choice portfolio domain does not support vFuncBool == True or AdjustPrb < 1.0.')
vFunc = self.AdjustCount*[[]]
for AdjustIndex in range(self.AdjustCount): # nonadjuster possible!
# this is where we add to vFunc based on non-adjustment.
# Basically repeat the above with the share updated to be the "prev"
# share an. We need to keep mNrmNext at two major indeces: adjust and
# non-adjust. Adjust will just have one element, but non-adjust will need
# one for each of the possible current ("prev") values.
for ShareIndex in range(self.ShareNowCount[AdjustIndex]): # for all share level indeces in the adjuster (1) case
self.makeEndOfPrdvFunc(AdjustIndex, ShareIndex)
vFunc[AdjustIndex].append(self.makevFunc(solution, AdjustIndex, ShareIndex))
solution.vFunc = vFunc
return solution
def updateShockDstn(self):
self.ShockDstn = combineIndepDstns(self.IncomeDstn, self.RiskyDstn)
def makeRshareGrid(self):
# We set this up such that attempts to use RshareGrid will fail hard
# if we're in the discrete case
if not self.DiscreteCase:
self.RshareGrid = np.linspace(0, 1, self.RiskyShareCount)
return self.RshareGrid
return []
def prepareToCalcRiskyShare(self):
"""
Prepare variables used to find optimal portfolio shares. Branches to either
the discrete or continuous portfolio choice set.
"""
if self.DiscreteCase:
self.prepareToCalcRiskyShareDiscrete()
else:
self.prepareToCalcRiskyShareContinuous()
def prepareToCalcRiskyShareContinuous(self):
# Hard restriction on aNrm. We'd need to define more elaborate model
# specifics if a could become negative (or a positive return shock
# would make you worse off!)
aNrmPort = self.aXtraGrid[self.aXtraGrid >= 0]
self.aNrmPort = aNrmPort
RshareGrid = self.makeRshareGrid()
self.RshareNow = np.array([])
vHatP = np.zeros((len(aNrmPort), len(RshareGrid)))
# Evaluate the non-constant part of the first order conditions wrt the
# portfolio share. This requires the implied resources tomorrow given
# todays shocks to be evaluated.
i_a = 0
for a in aNrmPort:
# for all possible a's today
i_s = 0
for s in RshareGrid:
Rtilde = self.RiskyShkValsNext - self.Rfree
Reff = self.Rfree + Rtilde*s
mNext = a*Reff/(self.PermGroFac*self.PermShkValsNext) + self.TranShkValsNext
vHatP_a_s = Rtilde*self.PermShkValsNext**(-self.CRRA)*self.vPfuncNext(mNext)
vHatP[i_a, i_s] = np.dot(vHatP_a_s, self.ShkPrbsNext)
i_s += 1
i_a += 1
self.vHatP = vHatP
def prepareToCalcRiskyShareDiscrete(self):
# Hard restriction on aNrm. We'd need to define more elaborate model
# specifics if a could become negative (or a positive return shock
# would make you worse off!)
aNrmPort = self.aXtraGrid[self.aXtraGrid >= 0]
self.aNrmPort = aNrmPort
RshareGrid = self.ShareNow
self.RshareNow = np.array([])
vHat = np.zeros((len(aNrmPort), len(RshareGrid)))
# Evaluate the non-constant part of the first order conditions wrt the
# portfolio share. This requires the implied resources tomorrow given
# todays shocks to be evaluated.
i_a = 0
for a in aNrmPort:
# for all possible a's today
i_s = 0
for s in RshareGrid:
Rtilde = self.RiskyShkValsNext - self.Rfree
Reff = self.Rfree + Rtilde*s
mNrmNext = a*Reff/(self.PermGroFac*self.PermShkValsNext) + self.TranShkValsNext
VLvlNext = (self.PermShkValsNext**(1.0-self.CRRA)*\
self.PermGroFac**(1.0-self.CRRA))*self.vFuncNext(mNrmNext)
vHat_a_s = self.DiscFacEff*np.sum(VLvlNext*self.ShkPrbsNext,axis=0)
vHat[i_a, i_s] = vHat_a_s
i_s += 1
i_a += 1
self.vHat = vHat
def calcRiskyShare(self):
if self.DiscreteCase:
RiskyShareFunc = self.calcRiskyShareDiscrete()
else:
RiskyShareFunc = self.calcRiskyShareContinuous()
return RiskyShareFunc
def calcRiskyShareContinuous(self):
# This should be fixed by an insert 0
aGrid = np.array([0.0,])
Rshare = np.array([1.0,])
i_a = 0
for a in self.aNrmPort:
aGrid = np.append(aGrid, a)
if self.vHatP[i_a, -1] >= 0.0:
Rshare = np.append(Rshare, 1.0)
elif self.vHatP[i_a, 0] < 0.0:
Rshare = np.append(Rshare, 0.0)
else:
residual = LinearInterp(self.RshareGrid, self.vHatP[i_a, :])
zero = sciopt.fsolve(residual, Rshare[-1])
Rshare = np.append(Rshare, zero)
i_a += 1
RiskyShareFunc = LinearInterp(aGrid, Rshare,intercept_limit=self.RiskyShareLimit, slope_limit=0) # HAVE to specify the slope limit
return RiskyShareFunc
def calcRiskyShareDiscrete(self):
# Based on the end-of-period value function, we calculate the best
# choice today for a range of a values (those given in aNrmPort).
# Should just use insert below ( at 0)
aGrid = np.array([0.0,])
Rshare = np.array([1.0,]) # is it true for AdjustPrb < 1?
i_a = 0
# For all positive aNrms
for a in self.aNrmPort:
# all values at portfolio shares should be calculated
# argmax gives optimal portfolio
share_argmax = np.argmax(self.vHat[i_a, :])
Rshare = np.append(Rshare, self.ShareNow[share_argmax])
i_a += 1
# TODO FIXME find limiting share for perf foresight
RiskyShareFunc = scipy.interpolate.interp1d(np.insert(self.aNrmPort, 0, 0.0), Rshare, kind='zero',bounds_error=False, fill_value=Rshare[-1])
return RiskyShareFunc
def prepareToCalcEndOfPrdvP(self):
'''
Prepare to calculate end-of-period marginal value by creating an array
of market resources that the agent could have next period, considering
the grid of end-of-period assets and the distribution of shocks he might
experience next period. This method adds extra steps because it first
solves the portfolio problem given the end-of-period assets to be able
to get next period resources.
Parameters
----------
none
Returns
-------
aNrmNow : np.array
A 1D array of end-of-period assets; also stored as attribute of self.
'''
# We define aNrmNow all the way from BoroCnstNat up to max(self.aXtraGrid)
# even if BoroCnstNat < BoroCnstArt, so we can construct the consumption
# function as the lower envelope of the (by the artificial borrowing con-
# straint) uconstrained consumption function, and the artificially con-
# strained consumption function.
aNrmNow = np.asarray(self.aXtraGrid)
ShkCount = self.TranShkValsNext.size
aNrm_temp = np.tile(aNrmNow,(ShkCount,1))
# Tile arrays of the income shocks and put them into useful shapes
aNrmCount = aNrmNow.shape[0]
PermShkVals_temp = (np.tile(self.PermShkValsNext,(aNrmCount,1))).transpose()
TranShkVals_temp = (np.tile(self.TranShkValsNext,(aNrmCount,1))).transpose()
RiskyShkVals_temp = | |
This will be matched.
children.append(elt)
return Tree(production.lhs().symbol(), children)
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
:type trace: int
:param trace: The trace level. A trace level of ``0`` will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
:rtype: None
"""
self._trace = trace
def _trace_fringe(self, tree, treeloc=None):
"""
Print trace output displaying the fringe of ``tree``. The
fringe of ``tree`` consists of all of its leaves and all of
its childless subtrees.
:rtype: None
"""
if treeloc == (): print("*", end=' ')
if isinstance(tree, Tree):
if len(tree) == 0:
print(unicode_repr(Nonterminal(tree.label())), end=' ')
for i in range(len(tree)):
if treeloc is not None and i == treeloc[0]:
self._trace_fringe(tree[i], treeloc[1:])
else:
self._trace_fringe(tree[i])
else:
print(unicode_repr(tree), end=' ')
def _trace_tree(self, tree, frontier, operation):
"""
Print trace output displaying the parser's current state.
:param operation: A character identifying the operation that
generated the current state.
:rtype: None
"""
if self._trace == 2: print(' %c [' % operation, end=' ')
else: print(' [', end=' ')
if len(frontier) > 0: self._trace_fringe(tree, frontier[0])
else: self._trace_fringe(tree)
print(']')
def _trace_start(self, tree, frontier, text):
print('Parsing %r' % " ".join(text))
if self._trace > 2: print('Start:')
if self._trace > 1: self._trace_tree(tree, frontier, ' ')
def _trace_expand(self, tree, frontier, production):
if self._trace > 2: print('Expand: %s' % production)
if self._trace > 1: self._trace_tree(tree, frontier, 'E')
def _trace_match(self, tree, frontier, tok):
if self._trace > 2: print('Match: %r' % tok)
if self._trace > 1: self._trace_tree(tree, frontier, 'M')
def _trace_succeed(self, tree, frontier):
if self._trace > 2: print('GOOD PARSE:')
if self._trace == 1: print('Found a parse:\n%s' % tree)
if self._trace > 1: self._trace_tree(tree, frontier, '+')
def _trace_backtrack(self, tree, frontier, toks=None):
if self._trace > 2:
if toks: print('Backtrack: %r match failed' % toks[0])
else: print('Backtrack')
##//////////////////////////////////////////////////////
## Stepping Recursive Descent Parser
##//////////////////////////////////////////////////////
class SteppingRecursiveDescentParser(RecursiveDescentParser):
"""
A ``RecursiveDescentParser`` that allows you to step through the
parsing process, performing a single operation at a time.
The ``initialize`` method is used to start parsing a text.
``expand`` expands the first element on the frontier using a single
CFG production, and ``match`` matches the first element on the
frontier against the next text token. ``backtrack`` undoes the most
recent expand or match operation. ``step`` performs a single
expand, match, or backtrack operation. ``parses`` returns the set
of parses that have been found by the parser.
:ivar _history: A list of ``(rtext, tree, frontier)`` tripples,
containing the previous states of the parser. This history is
used to implement the ``backtrack`` operation.
:ivar _tried_e: A record of all productions that have been tried
for a given tree. This record is used by ``expand`` to perform
the next untried production.
:ivar _tried_m: A record of what tokens have been matched for a
given tree. This record is used by ``step`` to decide whether
or not to match a token.
:see: ``nltk.grammar``
"""
def __init__(self, grammar, trace=0):
self._grammar = grammar
self._trace = trace
self._rtext = None
self._tree = None
self._frontier = [()]
self._tried_e = {}
self._tried_m = {}
self._history = []
self._parses = []
# [XX] TEMPORARY HACK WARNING! This should be replaced with
# something nicer when we get the chance.
def _freeze(self, tree):
c = tree.copy()
# for pos in c.treepositions('leaves'):
# c[pos] = c[pos].freeze()
return ImmutableTree.convert(c)
def parse(self, tokens):
tokens = list(tokens)
self.initialize(tokens)
while self.step() is not None:
pass
return self.parses()
def initialize(self, tokens):
"""
Start parsing a given text. This sets the parser's tree to
the start symbol, its frontier to the root node, and its
remaining text to ``token['SUBTOKENS']``.
"""
self._rtext = tokens
start = self._grammar.start().symbol()
self._tree = Tree(start, [])
self._frontier = [()]
self._tried_e = {}
self._tried_m = {}
self._history = []
self._parses = []
if self._trace:
self._trace_start(self._tree, self._frontier, self._rtext)
def remaining_text(self):
"""
:return: The portion of the text that is not yet covered by the
tree.
:rtype: list(str)
"""
return self._rtext
def frontier(self):
"""
:return: A list of the tree locations of all subtrees that
have not yet been expanded, and all leaves that have not
yet been matched.
:rtype: list(tuple(int))
"""
return self._frontier
def tree(self):
"""
:return: A partial structure for the text that is
currently being parsed. The elements specified by the
frontier have not yet been expanded or matched.
:rtype: Tree
"""
return self._tree
def step(self):
"""
Perform a single parsing operation. If an untried match is
possible, then perform the match, and return the matched
token. If an untried expansion is possible, then perform the
expansion, and return the production that it is based on. If
backtracking is possible, then backtrack, and return True.
Otherwise, return None.
:return: None if no operation was performed; a token if a match
was performed; a production if an expansion was performed;
and True if a backtrack operation was performed.
:rtype: Production or String or bool
"""
# Try matching (if we haven't already)
if self.untried_match():
token = self.match()
if token is not None: return token
# Try expanding.
production = self.expand()
if production is not None: return production
# Try backtracking
if self.backtrack():
self._trace_backtrack(self._tree, self._frontier)
return True
# Nothing left to do.
return None
def expand(self, production=None):
"""
Expand the first element of the frontier. In particular, if
the first element of the frontier is a subtree whose node type
is equal to ``production``'s left hand side, then add a child
to that subtree for each element of ``production``'s right hand
side. If ``production`` is not specified, then use the first
untried expandable production. If all expandable productions
have been tried, do nothing.
:return: The production used to expand the frontier, if an
expansion was performed. If no expansion was performed,
return None.
:rtype: Production or None
"""
# Make sure we *can* expand.
if len(self._frontier) == 0:
return None
if not isinstance(self._tree[self._frontier[0]], Tree):
return None
# If they didn't specify a production, check all untried ones.
if production is None:
productions = self.untried_expandable_productions()
else: productions = [production]
parses = []
for prod in productions:
# Record that we've tried this production now.
self._tried_e.setdefault(self._freeze(self._tree), []).append(prod)
# Try expanding.
for _result in self._expand(self._rtext, self._tree, self._frontier, prod):
return prod
# We didn't expand anything.
return None
def match(self):
"""
Match the first element of the frontier. In particular, if
the first element of the frontier has the same type as the
next text token, then substitute the text token into the tree.
:return: The token matched, if a match operation was
performed. If no match was performed, return None
:rtype: str or None
"""
# Record that we've tried matching this token.
tok = self._rtext[0]
self._tried_m.setdefault(self._freeze(self._tree), []).append(tok)
# Make sure we *can* match.
if len(self._frontier) == 0:
return None
if isinstance(self._tree[self._frontier[0]], Tree):
return None
for _result in self._match(self._rtext, self._tree, self._frontier):
# Return the token we just matched.
return self._history[-1][0][0]
return None
def backtrack(self):
"""
Return the parser to its state before the most recent
match or expand operation. Calling ``undo`` repeatedly return
the parser to successively earlier states. If no match or
expand operations have been performed, ``undo`` will make no
changes.
:return: true if an operation was successfully undone.
:rtype: bool
"""
if len(self._history) == 0: return False
(self._rtext, self._tree, self._frontier) = self._history.pop()
return True
def expandable_productions(self):
"""
:return: A list of all the productions for which expansions
are available for the current parser state.
:rtype: list(Production)
"""
# Make sure we *can* expand.
if len(self._frontier) == 0: return []
frontier_child = self._tree[self._frontier[0]]
if (len(self._frontier) == 0 or
not isinstance(frontier_child, Tree)):
return []
return [p for p in self._grammar.productions()
if p.lhs().symbol() == frontier_child.label()]
def untried_expandable_productions(self):
"""
:return: A list of all the untried productions for which
expansions are available for the current parser state.
:rtype: list(Production)
"""
tried_expansions = self._tried_e.get(self._freeze(self._tree), [])
return [p for p in self.expandable_productions()
if p not in tried_expansions]
def untried_match(self):
"""
:return: Whether the first element of | |
> 0 and not panels:
break
ax = axes.ravel()[j]
order_labels = []
for i, n in enumerate(cycle_orders):
z = i / 20
if n not in self.orders_not_excluded and not show_excluded:
# Don't plot orders if we've excluded them
continue
order_label = n if n in [0, 1] else n - 1
if order_label == 0:
order_str = 'LO'
elif order_label == 1:
order_str = 'NLO'
else:
order_str = fr'N$^{order_label}$LO'
order_labels.append(order_str)
ax.plot(x, y[:, i], c=colors[i], label=order_str, zorder=z)
# ax.plot(kf[train], self.y[train, i], marker='o', ls='', c=colors[i], zorder=z)
if show_process:
_, std = model.predict(self.X, order=n, return_std=True, kind='trunc')
if self.body == 'Appended':
n_3bf = n if n >= 3 else 3 # 3-body forces don't enter until N3LO
_, std_3bf = model.predict(self.X, order=n_3bf, return_std=True, kind='trunc')
try:
ref3_vals = self.ref3(self.X)
except TypeError:
ref3_vals = self.ref3
try:
ref2_vals = self.ref2(self.X)
except TypeError:
ref2_vals = self.ref2
# For appended, the standard reference is the 2-body one. So swap for the 3-body ref
std_3bf *= ref3_vals / ref2_vals
std = np.sqrt(std**2 + std_3bf**2)
# ax.plot(x, y[:, i], c=colors[i], zorder=z, ls='--')
ax.fill_between(
x, y[:, i] + std, y[:, i] - std, zorder=z,
lw=0.5, alpha=1, facecolor=light_colors[i], edgecolor=colors[i]
)
# ax2.plot(d, self.y[:, 0], ls='', c=gray, zorder=-1) # Dummy data to set up ticks
# ax.axhline(0, 0, 1, ls='--', c=gray, zorder=-1)
# if self.system == 'neutron':
# y_label = fr'Energy per Neutron '
# elif self.system == 'symmetric':
# y_label = 'Energy per Particle '
# elif self.system == 'difference':
# y_label = 'Symmetry Energy '
# else:
# raise ValueError('system has wrong value')
#
# y_label += fr'${self.system_math_strings[self.system]}$'
# y_label = self.compute_y_label()
# ax.set_ylabel(y_label)
# ax.set_xlabel(r'Fermi Momentum $k_\mathrm{F}$ [fm$^{-1}$]')
# ax.set_xticks(self.X_valid.ravel(), minor=True)
# if self.system == 'neutron':
# kf_ticks = np.array([1.2, 1.4, 1.6, 1.8])
# elif self.system == 'symmetric':
# kf_ticks = np.array([1., 1.2, 1.4])
# else:
# kf_ticks = np.array([1., 1.2, 1.4])
# ax.set_xticks(kf_ticks)
for ax in axes.ravel():
ax.xaxis.set_major_locator(MultipleLocator(0.2))
# ax2 = ax.twiny()
# ax2.margins(x=0.)
ax.set_xlim(x[0], x[-1])
if self.system == 'symmetric':
self.plot_empirical_saturation(ax, is_density_primary=is_density_primary)
if panels:
# both_axes = self.setup_ticks(
# ax, is_density_primary, train=self.train, valid=self.valid, show_2nd_axis=False)
for ax in axes.ravel():
if is_density_primary:
ax.xaxis.set_major_locator(MultipleLocator(0.1))
else:
ax.xaxis.set_major_locator(MultipleLocator(0.2))
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(right=True, top=True, which='both')
d_label = r'Density $n$ [fm$^{-3}$]'
axes[1, 0].set_xlabel(d_label)
axes[1, 1].set_xlabel(d_label)
from .graphs import add_top_order_legend
fig = plt.gcf()
dark_colors = [darken_color(color) for color in colors]
add_top_order_legend(fig, axes[0, 0], axes[0, 1], order_labels, colors, light_colors, dark_colors)
else:
ax.legend()
both_axes = self.setup_ticks(
ax, is_density_primary, train=self.train, valid=self.valid, show_2nd_axis=show_2nd_axis)
if show_2nd_axis:
both_axes[-1].set_xlim(x[0], x[-1])
if savefig is None:
savefig = self.savefigs
if savefig:
fig = plt.gcf()
name = self.figure_name('obs_', breakdown=breakdown)
fig.savefig(name)
if return_info:
info = self.model_info(breakdown=breakdown)
info['name'] = path.relpath(name, self.fig_path)
return ax, info
return ax
def plot_joint_breakdown_ls(self, max_idx, return_info=False):
system_str = fr'${self.system_math_string}$'
order_str = fr'N$^{max_idx}$LO'
fig = joint2dplot(self.df_ls, self.df_breakdown, self.df_joint, system=system_str,
order=order_str, data_str=self.system_math_string)
breakdown = (self.breakdown_min, self.breakdown_max, self.breakdown_num)
ls = (self.ls_min, self.ls_max, self.ls_num)
if self.savefigs:
name = self.figure_name('ls-Lb-2d_', breakdown=breakdown, ls=ls, max_idx=max_idx)
fig.savefig(name)
if return_info:
info = self.model_info(max_idx=max_idx)
info['name'] = path.relpath(name, self.fig_path)
return fig, info
return fig
def plot_md_squared(
self, breakdown=None, ax=None, savefig=None, return_info=False, interp=False, kernel=None,
show_excluded=False
):
R"""Plots the squared Mahalanobis distance.
Parameters
----------
breakdown : float, optional
The value for the breakdown scale to use in the diagnostics. If `None`, then its MAP value is used.
ax : matplotlib.axes.Axes, optional
The axis on which to draw the coefficient plots and diagnostics
savefig : bool, optional
Whether to save the figure. If `None`, this is taken from `self.savefigs`.
Returns
-------
ax : matplotlib.axes.Axes
The axis object
"""
if ax is None:
fig, ax = plt.subplots(figsize=(1, 3.2))
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
graph = self.compute_underlying_graphical_diagnostic(
breakdown=breakdown, interp=interp, kernel=kernel, show_excluded=show_excluded)
obs = self.system_math_string
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.margins(y=0)
ax = graph.md_squared(type='box', trim=False, title=None, xlabel=rf'${self.MD_label}({obs})$', ax=ax)
ax.set_xticks([0])
ax.set_xticklabels(['0'], fontdict=dict(color='w'))
ax.tick_params(width=0, axis='x')
# plt.xticklabels()
ymin, ymax = ax.get_ylim()
ax.set_ylim(np.max([np.floor(ymin), 0]), np.ceil(ymax))
if savefig is None:
savefig = self.savefigs
if savefig:
fig = plt.gcf()
name = self.figure_name('md_under_', breakdown=breakdown)
fig.savefig(name)
if return_info:
info = self.model_info(breakdown=breakdown)
info['name'] = path.relpath(name, self.fig_path)
return ax, info
return ax
def plot_pchol(
self, breakdown=None, ax=None, savefig=None, return_info=False, interp=False, kernel=None,
show_excluded=False
):
R"""Plots the pivoted Cholesky diagnostic.
Parameters
----------
breakdown : float, optional
The value for the breakdown scale to use in the diagnostic. If `None`, then its MAP value is used.
ax : matplotlib.axes.Axes, optional
The axis on which to draw the coefficient plots and diagnostics
savefig : bool, optional
Whether to save the figure. If `None`, this is taken from `self.savefigs`.
Returns
-------
ax : matplotlib.axes.Axes
The axis object
"""
if ax is None:
fig, ax = plt.subplots(figsize=(3.2, 3.2))
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
graph = self.compute_underlying_graphical_diagnostic(
breakdown=breakdown, interp=interp, kernel=kernel, show_excluded=show_excluded
)
obs = self.system_math_string
with plt.rc_context({"text.usetex": True, "text.latex.preview": True}):
ax = graph.pivoted_cholesky_errors(ax=ax, title=None)
# ax = graph.individual_errors(ax=ax, title=None)
# ax.text(0.5, 0.95, rf'${self.PC_label}({obs})$', bbox=text_bbox, transform=ax.transAxes, va='top',
# ha='center')
# Hijack a legend to get the 'best' location to place the text
line, = ax.plot([])
# Remove the handle from the legend box.
ax.legend(
[line], [rf'${self.PC_label}({obs})$'], handlelength=0,
loc='best', handletextpad=0)
fig = plt.gcf()
if savefig is None:
savefig = self.savefigs
if savefig:
name = self.figure_name('pc_under_', breakdown=breakdown)
fig.savefig(name)
if return_info:
info = self.model_info(breakdown=breakdown)
info['name'] = path.relpath(name, self.fig_path)
return ax, info
return ax
def plot_coeff_diagnostics(
self, breakdown=None, fig=None, savefig=None, return_info=False,
interp=False, kernel=None, show_excluded=False):
R"""Plots coefficients, the squared Mahalanobis distance, and the pivoted Cholesky diagnostic.
Parameters
----------
breakdown : float, optional
The value for the breakdown scale to use in the diagnostics. If `None`, then its MAP value is used.
fig : matplotlib.figure.Figure, optional
The Figure on which to draw the coefficient plots and diagnostics
savefig : bool, optional
Whether to save the figure. If `None`, this is taken from `self.savefigs`.
Returns
-------
fig : matplotlib.figure.Figure
The figure object
"""
if fig is None:
fig = plt.figure(figsize=(7, 3.2), constrained_layout=True)
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
spec = fig.add_gridspec(nrows=1, ncols=7)
ax_cs = fig.add_subplot(spec[:, :3])
ax_md = fig.add_subplot(spec[:, 3])
ax_pc = fig.add_subplot(spec[:, 4:])
show_2nd_axis = self.system != self.system_strings['difference']
self.plot_coefficients(
breakdown=breakdown, ax=ax_cs, show_process=True, savefig=False, show_2nd_axis=show_2nd_axis,
kernel=kernel, show_excluded=show_excluded,
)
self.plot_md_squared(
breakdown=breakdown, ax=ax_md, savefig=False, interp=interp, kernel=kernel,
show_excluded=show_excluded,
)
self.plot_pchol(
breakdown=breakdown, ax=ax_pc, savefig=False, interp=interp, kernel=kernel,
show_excluded=show_excluded,
)
if savefig is None:
savefig = self.savefigs
if savefig:
name = self.figure_name('cn_diags_', breakdown=breakdown)
# fig.savefig(name, metadata={'hi': [1, 2, 3], 'wtf': 7})
fig.savefig(name)
if return_info:
info = self.model_info(breakdown=breakdown)
info['name'] = path.relpath(name, self.fig_path)
return fig, info
return fig
def plot_credible_diagnostic(
self, breakdown=None, ax=None, savefig=None, truncation=False, show_excluded=False, all_points=False,
show_legend=True, ylabel=r'Empirical Coverage [$\%$]',
):
if ax is None:
fig, ax = plt.subplots(figsize=(3.2, 3.2))
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
if truncation:
model = gm.TruncationGP(
ratio=self.ratio, ref=self.ref, excluded=self.excluded,
ratio_kws=dict(breakdown=breakdown), **self.kwargs
)
model.fit(self.X_train, y=self.y_train, orders=self.orders)
if all_points:
X = self.X
y = self.y
else:
X = self.X_valid
y = self.y_valid
if show_excluded:
orders = self.orders
colors = self.colors
else:
y = y[:, self.excluded_mask]
orders = self.orders_not_excluded
colors = self.colors_not_excluded
# Get the covariance without any Q junk
# norm_trunc_cov = model.cov(X, start=0, end=0)
ref = model.ref(X)
norm_trunc_cov = ref[:, None] * ref * model.coeffs_process.cov(X=X)
# Get the between-order residuals
residuals = np.diff(y)
Q = self.ratio(X)
# Normalize them based on the approximate size of the next order correction
# This is so that we can use the same Q-less covariance for each correction
norm_residuals = residuals / Q[:, None] ** orders[1:]
graph = gm.GraphicalDiagnostic(
norm_residuals, mean=np.zeros(X.shape[0]),
cov=norm_trunc_cov, colors=colors, gray=gray, black=softblack
)
else:
graph = self.compute_underlying_graphical_diagnostic(breakdown=breakdown, show_excluded=show_excluded)
obs = self.system_math_string
intervals = np.linspace(1e-5, 1, 100)
band_perc = [0.68, 0.95]
if show_excluded:
linestyles = self.linestyles
else:
linestyles = self.linestyles_not_excluded
ax = graph.credible_interval(
intervals=intervals, band_perc=band_perc,
# title=rf'${self.CI_label}({obs})$',
title=None,
ax=ax,
xlabel=r'Credible Interval [$\%$]', ylabel=ylabel,
linestyles=linestyles
)
ax.set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1])
ax.set_xticklabels([0, 20, 40, 60, 80, 100])
ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
ax.set_yticklabels([0, 20, 40, 60, 80, 100])
if truncation and show_legend:
handles, labels = ax.get_legend_handles_labels()
ax.set_title('')
ax.legend(handles=handles, labels=[r'LO', r'NLO', r'N$^{2}$LO'], title=rf'${self.CI_label}({obs})$')
fig = plt.gcf()
if savefig is None:
savefig | |
running on neighbour device. Thus Interface and MAC
addresses fields could be filled of data without LLDP neighbour
device. Data will be considered as LLDP information is there are
other fields than Interface and MAC addresses are found.
:return: LLDP information of the device
:rtype: dict of list of dict
"""
# Display info message
log.info("get_lldp_neighbors")
# By default nothing is returned
returned_output = {}
# Send a command
output = await self.send_command(self.cmd_get_lldp_neighbors)
# Display info message
log.info(f"get_lldp_neighbors:\n'{output}'")
# Convert a string into a list of strings
lines = output.splitlines()
# Read each line
for line in lines:
# Default value for local interface (no interface)
local_interface = None
# Initialize potential LLDP data with default values
chassis_id = ""
port_id = ""
ttl = None
port_description = ""
system_name = ""
system_description = ""
system_capabilities = []
management_address = ""
# Get local interface
if " interface=" in line:
local_interface = line.split(" interface=")[-1].split()[0].split(",")[0]
# Display info message
log.info(f"get_lldp_neighbors: local_interface: {local_interface}")
# Get Chassis ID - TLV type 1
if " mac-address=" in line:
chassis_id = line.split(" mac-address=")[-1].split()[0]
# Convert the MAC address of the Chassis ID into a lower case string
chassis_id = chassis_id.lower()
# Display info message
log.info(f"get_lldp_neighbors: chassis_id: {chassis_id}")
# Get Port ID - TLV type 2
if " interface-name=" in line:
port_id = (
line.split(" interface-name=")[-1].split("=")[0].rsplit(" ", 1)[0]
)
# Display info message
log.info(f"get_lldp_neighbors: port_id: {port_id}")
# Get Time To Live - TLV type 3
# Not available on RouterOS. "age" parameter is a decreasing counter
# Get Port description - TLV type 4
# Not available on RouterOS.
# Get System name - TLV type 5
if " identity=" in line:
system_name = line.split(" identity=")[-1].split()[0]
# Check if return value is a string "" (just double quotes which means empty data)
if system_name == '""':
# Yes, empty string
system_name = ""
# Display info message
log.info(f"get_lldp_neighbors: system_name: {system_name}")
# Get System description - TLV type 6
if " system-description=" in line:
system_description = (
line.split(" system-description=")[-1]
.split("=")[0]
.rsplit(" ", 1)[0]
)
# Display info message
log.info(
f"get_lldp_neighbors: system_description: {system_description}"
)
# Get System capabilities - TLV type 7
if " system-caps=" in line:
# First get the capablities as a string separated by commas
# e.g.: 'bridge,wlan-ap,router,station-only'
string_capability = line.split(" system-caps=")[-1].split()[0]
# Then convert them into a list of characters
# Code Capability
# B Bridge (Switch)
# C DOCSIS Cable Device
# O Other
# P Repeater
# R Router
# S Station
# T Telephone
# W WLAN Access Point
# Read each capability
for capability in string_capability.split(","):
# Check if string is not null
if len(capability) > 0:
# Get the first letter of the capability, convert this character in uppercase
# and add it to a list
system_capabilities.append(capability[0].upper())
# Display info message
log.info(
f"get_lldp_neighbors: system_capabilities: {system_capabilities}"
)
# Get Management address - TLV type 8
if " address=" in line:
management_address = line.split(" address=")[-1].split()[0]
# LLDP TLV Type 9 to 127 are currently not supported by this method
# Check if data can be considered as LLDP
if local_interface and (
port_id or system_name or system_description or management_address
):
# Probably LLDP
# Create a dictionary
returned_dict = {
"chassis_id": chassis_id,
"port_id": port_id,
"ttl": ttl,
"port_description": port_description,
"system_name": system_name,
"system_description": system_description,
"system_capabilities": system_capabilities,
"management_address": management_address,
}
# Add the information to the dict
# Each interface can get several returned_dict in a list
returned_output[local_interface] = returned_output.get(
local_interface, []
) + [returned_dict]
# Return data
return returned_output
async def get_interfaces(self):
"""
Asyn method used to get the information of ALL the interfaces of the device
some commands are used to collect interface data:
- one for status
- one for duplex/speed
- one for mode (access / trunk / hybrid)
:return: Interfaces of the device
:rtype: dict of dict
"""
# Display info message
log.info("get_interfaces")
# By default nothing is returned
returned_output = {}
# Command for the status of the interfaces
# Send a command
output_status = await self.send_command(self.cmd_get_interfaces[0])
# Display info message
log.info(f"get_interfaces: status command\n'{output_status}'")
# Command for the speed and the duplex mode of the interfaces
# Send a command
output_bitrate = await self.send_command(self.cmd_get_interfaces[1])
# Display info message
log.info(f"get_interfaces: speed duplex command\n'{output_bitrate}'")
# Command for the mode of the interfaces (access or trunk)
# Send a command
output_mode = await self.send_command(self.cmd_get_interfaces[2])
# Display info message
log.info(f"get_interfaces: mode command\n'{output_mode}'")
# Convert a string into a list of strings (status)
lines = output_status.splitlines()
# Convert a string into a list of block of strings (duplex/speed)
block_of_strings_bitrate = output_bitrate.split("\n\n")
# Convert a string into a list of block of strings (mode)
block_of_strings_mode = output_mode.splitlines()
# By default there is no trunk interface
dict_trunk_interface = {}
# Read all tagged interfaces line by line
for line in block_of_strings_mode:
# Check if a " frame-types=" is inside the string
if " frame-types=" in line:
# Yes
# Save the string with the name of the interfaces separated with a comma
frame_types = line.split(" frame-types=")[-1].split()[0]
# Mikrotik devices have 3 modes:
# access, trunk or hybrid
# (FrameTypes ::= admit-all | admit-only-untagged-and-priority-tagged | admit-only-vlan-tagged)
#
# self.interface_mode = {
# "access": "admit-only-untagged-and-priority-tagged",
# "trunk": "admit-only-vlan-tagged",
# "hybrid": "admit-all",
# }
# Check all modes an interface can get
for mode in self.interface_mode:
# Does this interface is in the current mode?
if frame_types == self.interface_mode[mode]:
# Yes
# Display info message
log.info(
f"get_interfaces: frame-types: mode found: '{frame_types}'"
)
# Get the name of the interface
interface_trunk = line.split(" interface=")[-1].split()[0]
# Display info message
log.info(
f"get_interfaces: frame-types: interface: '{interface_trunk}'"
)
# So save the interface mode with a conventional name
dict_trunk_interface[interface_trunk] = mode
# Leave the loop
break
# # Check if value is not empty
# if tagged_interfaces != '""':
# # Not empty
# # Read all trunk interfaces found and separate them
# for interface_trunk in tagged_interfaces.split(","):
# # Save the trunk interface
# dict_trunk_interface[interface_trunk] = True
# Read each line
for line in lines:
# Initialize data with default values
interface_name = ""
operational = False
admin_state = False
maximum_frame_size = 0
full_duplex = False
speed = 0 # speed is in Mbit/s
mode = "access"
description = ""
# Get interface name
if " name=" in line:
interface_name = line.split(" name=")[-1].split()[0]
# Display info message
log.info(f"get_interfaces: interface_name: {interface_name}")
# Get operational and admin_state status
if len(line) > 3:
data = line[3].upper()
# operational + admin_state = "up"?
if data == "R":
# Yes
operational = True
admin_state = True
# operational = "down" and admin_state = "up"?
elif data == " ":
# Yes
admin_state = True
# operational + admin_state = "down" means data == "X"
# No need to compare since default values are already fine
# Display info message
log.info(f"get_interfaces: operational: {operational}, admin_state")
# Get maximum frame size
if " l2mtu=" in line:
maximum_frame_size = int(line.split(" l2mtu=")[-1].split()[0])
# Display info message
log.info(
f"get_interfaces: maximum_frame_size : {maximum_frame_size}"
)
# Get speed and duplex information
for index, data_block in enumerate(block_of_strings_bitrate):
# Display info message
log.info(
f"get_interfaces: get_speed: index: {index} [{len(block_of_strings_bitrate)}]"
)
# Is the name of interface found in the block of strings?
if f"name: {interface_name}" in data_block:
# Yes, so this block of strings has information on the interface
# Display info message
log.info(f"get_interfaces: get_speed: index found: {index}")
# " rate: " field found in the block of strings? (speed)
if " rate: " in data_block:
# Yes
# Then extract the string data
rate_string = (
data_block.split(" rate: ")[-1].split()[0].lower()
)
# Is is mbps?
if "mbps" in rate_string:
# Yes
# Then speed is saved
speed = int(float(rate_string.split("mbps")[0]))
# Is is gbps?
elif "gbps" in rate_string:
# Yes
# Then speed is saved in mpbs
speed = int(float(rate_string.split("gbps")[0]) * 1000)
# Is is tbps? (not seen on current | |
from enum import IntEnum
from util import *
class CPU6502_FLAG(IntEnum):
C = (1 << 0)
Z = (1 << 1)
I = (1 << 2)
D = (1 << 3)
B = (1 << 4)
U = (1 << 5)
V = (1 << 6)
N = (1 << 7)
class OP():
def __init__(self, operate, addr_mode, cycles):
self.operate = operate
self.addr_mode = addr_mode
self.cycles = cycles
class CPU6502():
def __init__(self):
self.bus = None
# Registers
self.acc = 0x00
self.reg_x = 0x00
self.reg_y = 0x00
self.stack = 0x00
self.pcount = 0x0000
self.status = 0x00
# Assisstive variables to facilitate emulation
self.fetched = 0x00
self.addr_abs = 0x0000
self.addr_rel = 0x0000
self.opcode = 0x00
self.cycles = 0x00
self.clock_count = 0
self.lookup = [
OP(self.BRK, self.IMM, 7 ),
OP(self.ORA, self.IZX, 6 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.NOP, self.IMP, 3 ),
OP(self.ORA, self.ZP0, 3 ),
OP(self.ASL, self.ZP0, 5 ),
OP(self.XXX, self.IMP, 5 ),
OP(self.PHP, self.IMP, 3 ),
OP(self.ORA, self.IMM, 2 ),
OP(self.ASL, self.IMP, 2 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.ORA, self.ABS, 4 ),
OP(self.ASL, self.ABS, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.BPL, self.REL, 2 ),
OP(self.ORA, self.IZY, 5 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.ORA, self.ZPX, 4 ),
OP(self.ASL, self.ZPX, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.CLC, self.IMP, 2 ),
OP(self.ORA, self.ABY, 4 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.XXX, self.IMP, 7 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.ORA, self.ABX, 4 ),
OP(self.ASL, self.ABX, 7 ),
OP(self.XXX, self.IMP, 7 ),
OP(self.JSR, self.ABS, 6 ),
OP(self.AND, self.IZX, 6 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.BIT, self.ZP0, 3 ),
OP(self.AND, self.ZP0, 3 ),
OP(self.ROL, self.ZP0, 5 ),
OP(self.XXX, self.IMP, 5 ),
OP(self.PLP, self.IMP, 4 ),
OP(self.AND, self.IMM, 2 ),
OP(self.ROL, self.IMP, 2 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.BIT, self.ABS, 4 ),
OP(self.AND, self.ABS, 4 ),
OP(self.ROL, self.ABS, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.BMI, self.REL, 2 ),
OP(self.AND, self.IZY, 5 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.AND, self.ZPX, 4 ),
OP(self.ROL, self.ZPX, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.SEC, self.IMP, 2 ),
OP(self.AND, self.ABY, 4 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.XXX, self.IMP, 7 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.AND, self.ABX, 4 ),
OP(self.ROL, self.ABX, 7 ),
OP(self.XXX, self.IMP, 7 ),
OP(self.RTI, self.IMP, 6 ),
OP(self.EOR, self.IZX, 6 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.NOP, self.IMP, 3 ),
OP(self.EOR, self.ZP0, 3 ),
OP(self.LSR, self.ZP0, 5 ),
OP(self.XXX, self.IMP, 5 ),
OP(self.PHA, self.IMP, 3 ),
OP(self.EOR, self.IMM, 2 ),
OP(self.LSR, self.IMP, 2 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.JMP, self.ABS, 3 ),
OP(self.EOR, self.ABS, 4 ),
OP(self.LSR, self.ABS, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.BVC, self.REL, 2 ),
OP(self.EOR, self.IZY, 5 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.EOR, self.ZPX, 4 ),
OP(self.LSR, self.ZPX, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.CLI, self.IMP, 2 ),
OP(self.EOR, self.ABY, 4 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.XXX, self.IMP, 7 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.EOR, self.ABX, 4 ),
OP(self.LSR, self.ABX, 7 ),
OP(self.XXX, self.IMP, 7 ),
OP(self.RTS, self.IMP, 6 ),
OP(self.ADC, self.IZX, 6 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.NOP, self.IMP, 3 ),
OP(self.ADC, self.ZP0, 3 ),
OP(self.ROR, self.ZP0, 5 ),
OP(self.XXX, self.IMP, 5 ),
OP(self.PLA, self.IMP, 4 ),
OP(self.ADC, self.IMM, 2 ),
OP(self.ROR, self.IMP, 2 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.JMP, self.IND, 5 ),
OP(self.ADC, self.ABS, 4 ),
OP(self.ROR, self.ABS, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.BVS, self.REL, 2 ),
OP(self.ADC, self.IZY, 5 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.ADC, self.ZPX, 4 ),
OP(self.ROR, self.ZPX, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.SEI, self.IMP, 2 ),
OP(self.ADC, self.ABY, 4 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.XXX, self.IMP, 7 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.ADC, self.ABX, 4 ),
OP(self.ROR, self.ABX, 7 ),
OP(self.XXX, self.IMP, 7 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.STA, self.IZX, 6 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.STY, self.ZP0, 3 ),
OP(self.STA, self.ZP0, 3 ),
OP(self.STX, self.ZP0, 3 ),
OP(self.XXX, self.IMP, 3 ),
OP(self.DEY, self.IMP, 2 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.TXA, self.IMP, 2 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.STY, self.ABS, 4 ),
OP(self.STA, self.ABS, 4 ),
OP(self.STX, self.ABS, 4 ),
OP(self.XXX, self.IMP, 4 ),
OP(self.BCC, self.REL, 2 ),
OP(self.STA, self.IZY, 6 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.STY, self.ZPX, 4 ),
OP(self.STA, self.ZPX, 4 ),
OP(self.STX, self.ZPY, 4 ),
OP(self.XXX, self.IMP, 4 ),
OP(self.TYA, self.IMP, 2 ),
OP(self.STA, self.ABY, 5 ),
OP(self.TXS, self.IMP, 2 ),
OP(self.XXX, self.IMP, 5 ),
OP(self.NOP, self.IMP, 5 ),
OP(self.STA, self.ABX, 5 ),
OP(self.XXX, self.IMP, 5 ),
OP(self.XXX, self.IMP, 5 ),
OP(self.LDY, self.IMM, 2 ),
OP(self.LDA, self.IZX, 6 ),
OP(self.LDX, self.IMM, 2 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.LDY, self.ZP0, 3 ),
OP(self.LDA, self.ZP0, 3 ),
OP(self.LDX, self.ZP0, 3 ),
OP(self.XXX, self.IMP, 3 ),
OP(self.TAY, self.IMP, 2 ),
OP(self.LDA, self.IMM, 2 ),
OP(self.TAX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.LDY, self.ABS, 4 ),
OP(self.LDA, self.ABS, 4 ),
OP(self.LDX, self.ABS, 4 ),
OP(self.XXX, self.IMP, 4 ),
OP(self.BCS, self.REL, 2 ),
OP(self.LDA, self.IZY, 5 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 5 ),
OP(self.LDY, self.ZPX, 4 ),
OP(self.LDA, self.ZPX, 4 ),
OP(self.LDX, self.ZPY, 4 ),
OP(self.XXX, self.IMP, 4 ),
OP(self.CLV, self.IMP, 2 ),
OP(self.LDA, self.ABY, 4 ),
OP(self.TSX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 4 ),
OP(self.LDY, self.ABX, 4 ),
OP(self.LDA, self.ABX, 4 ),
OP(self.LDX, self.ABY, 4 ),
OP(self.XXX, self.IMP, 4 ),
OP(self.CPY, self.IMM, 2 ),
OP(self.CMP, self.IZX, 6 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.CPY, self.ZP0, 3 ),
OP(self.CMP, self.ZP0, 3 ),
OP(self.DEC, self.ZP0, 5 ),
OP(self.XXX, self.IMP, 5 ),
OP(self.INY, self.IMP, 2 ),
OP(self.CMP, self.IMM, 2 ),
OP(self.DEX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.CPY, self.ABS, 4 ),
OP(self.CMP, self.ABS, 4 ),
OP(self.DEC, self.ABS, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.BNE, self.REL, 2 ),
OP(self.CMP, self.IZY, 5 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.CMP, self.ZPX, 4 ),
OP(self.DEC, self.ZPX, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.CLD, self.IMP, 2 ),
OP(self.CMP, self.ABY, 4 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.XXX, self.IMP, 7 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.CMP, self.ABX, 4 ),
OP(self.DEC, self.ABX, 7 ),
OP(self.XXX, self.IMP, 7 ),
OP(self.CPX, self.IMM, 2 ),
OP(self.SBC, self.IZX, 6 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.CPX, self.ZP0, 3 ),
OP(self.SBC, self.ZP0, 3 ),
OP(self.INC, self.ZP0, 5 ),
OP(self.XXX, self.IMP, 5 ),
OP(self.INX, self.IMP, 2 ),
OP(self.SBC, self.IMM, 2 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.SBC, self.IMP, 2 ),
OP(self.CPX, self.ABS, 4 ),
OP(self.SBC, self.ABS, 4 ),
OP(self.INC, self.ABS, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.BEQ, self.REL, 2 ),
OP(self.SBC, self.IZY, 5 ),
OP(self.XXX, self.IMP, 2 ),
OP(self.XXX, self.IMP, 8 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.SBC, self.ZPX, 4 ),
OP(self.INC, self.ZPX, 6 ),
OP(self.XXX, self.IMP, 6 ),
OP(self.SED, self.IMP, 2 ),
OP(self.SBC, self.ABY, 4 ),
OP(self.NOP, self.IMP, 2 ),
OP(self.XXX, self.IMP, 7 ),
OP(self.NOP, self.IMP, 4 ),
OP(self.SBC, self.ABX, 4 ),
OP(self.INC, self.ABX, 7 ),
OP(self.XXX, self.IMP, 7 ),
]
def reset(self):
self.addr_abs = 0xFFFC
lo = to_16_bits(self.read(self.addr_abs + 0))
hi = to_16_bits(self.read(self.addr_abs + 1))
self.pcount = (hi << 8) | lo
self.acc = 0
self.reg_x = 0
self.reg_y = 0
self.stack = 0xFD
self.status = 0x00 | CPU6502_FLAG.U
self.addr_rel = 0x0000
self.addr_abs = 0x0000
self.fetched = 0x00
self.cycles = 8
def irq(self):
if (self.get_flag(CPU6502_FLAG.I) == 0):
self.write(0x0100 + self.stack, (self.pcount >> 8) & 0x00FF)
self.stack = 0xFF & (self.stack - 1)
self.write(0x0100 + self.stack, self.pcount & 0x00FF)
self.stack = 0xFF & (self.stack - 1)
self.set_flag(CPU6502_FLAG.B, 0)
self.set_flag(CPU6502_FLAG.U, 1)
self.set_flag(CPU6502_FLAG.I, 1)
self.write(0x0100 + self.stack, self.status)
self.stack = 0xFF & (self.stack - 1)
self.addr_abs = 0xFFFE
lo = self.read(self.addr_abs + 0) & 0XFFFF
hi = self.read(self.addr_abs + 1) & 0XFFFF
self.pcount = (hi << 8) | lo
self.cycles = 7
def nmi(self):
self.write(0x0100 + self.stack, to_8_bits(self.pcount >> 8))
| |
<reponame>jiangzoi/incubator-tvm
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, line-too-long, unused-argument
"""Caffe2 frontend"""
import tvm
from tvm.ir import IRModule
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from ... import nd as _nd
from .common import AttrCvt, Renamer
from .common import get_relay_op, new_var, infer_channels
__all__ = ['from_caffe2']
def dimension_picker(prefix, surfix=''):
def _impl(attr):
kernel = attr['kernel_shape']
if len(kernel) == 2:
return prefix + '2d' + surfix
raise tvm.error.OpAttributeUnImplemented(
'Non-2D kernels are not supported for operator {}2d'.format(prefix))
return _impl
def revert_caffe2_pad(pads):
"""Caffe2 requires two times the normal padding."""
if len(pads) == 4:
pads = pads[:2]
elif len(pads) == 2:
pass
else:
raise tvm.error.OpAttributeInvalid(
'Number of pads must equal 2 or 4.')
return pads
def dimension_constraint():
def _dim_check(args):
if len(args['kernel_shape']) == 2:
return True
return False
return _dim_check, "Only 2d kernel supported."
def _clean_up_pool_args(args):
""" A helper function to clean up common arguments in conv and pooling ops.
"""
assert isinstance(args, dict)
if 'stride_h' in args and 'stride_w' in args:
assert 'stride' not in args and 'strides' not in args
args['strides'] = [args['stride_h'], args['stride_w']]
args.pop('stride_h')
args.pop('stride_w')
elif 'stride' in args:
args['strides'] = [args['stride'], args['stride']]
args.pop('stride')
# rename 'kernel', 'kernels', to 'kernel_shape'
if 'kernel_h' in args and 'kernel_w' in args:
assert 'kernel' not in args and 'kernels' not in args
args['kernel_shape'] = [args['kernel_h'], args['kernel_w']]
args.pop('kernel_h')
args.pop('kernel_w')
elif 'kernel' in args:
args['kernel_shape'] = [args['kernel'], args['kernel']]
args.pop('kernel')
elif 'kernels' in args:
args['kernel_shape'] = args['kernels']
args.pop('kernels')
if 'pad_t' in args and 'pad_l' in args and 'pad_b' in args and 'pad_r' in args:
assert 'pad' not in args and 'pads' not in args
args['pads'] = [
args['pad_t'], args['pad_l'], args['pad_b'], args['pad_r']
]
for pad in ['pad_t', 'pad_l', 'pad_b', 'pad_r']:
args.pop(pad)
elif 'pad' in args:
args['pads'] = [args['pad'], args['pad']]
args.pop('pad')
if 'dilation_h' in args and 'dilation_w' in args:
assert 'dilation' not in args and 'dilations' not in args
args['dilations'] = [args['dilation_h'], args['dilation_w']]
args.pop('dilation_h')
args.pop('dilation_w')
elif 'dilation' in args:
args['dilations'] = [args['dilation'], args['dilation']]
args.pop('dilation')
return args
class Caffe2OpConverter(object):
""" A helper class for holding Caffe2 op converters.
"""
@classmethod
def get_converter(cls):
""" Get converter.
:return: converter, which should be `_impl`.
"""
if hasattr(cls, '_impl'):
return getattr(cls, '_impl')
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Caffe2.'.format(cls.__name__))
_caffe2_internal_args = [
# nnpack args
'algo',
'convolution_transform_strategy',
'float16_compute',
'shared_buffer',
# training args
'init_params',
'cudnn_exhaustive_search',
'exhaustive_search',
# training args
'adj',
'hwgq',
# args that we don't care
'legacy_pad',
]
class Elemwise(Caffe2OpConverter):
""" A helper class for elemwise op converters.
"""
name = ''
@classmethod
def _impl(cls, inputs, args, params):
assert len(inputs) == 2, "Math op take 2 inputs, {} given".format(
len(inputs))
op_name = cls.name
conv_ops = ["conv2d", "conv2d_transpose"]
if args.get('broadcast', 0) and any(x in str(inputs[0]) for x in conv_ops):
# TODO(zhreshold): remove hard coded infershape
axis = int(args.get('axis', 0))
inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2)
return get_relay_op(op_name)(*inputs)
class Add(Elemwise):
""" Operator converter for Add.
"""
name = 'add'
class Mul(Elemwise):
""" Operator converter for Mul.
"""
name = 'multiply'
class Pool(Caffe2OpConverter):
""" A helper class for pool op converters.
"""
name = ''
@classmethod
def _impl(cls, inputs, args, params):
_clean_up_pool_args(args)
if 'global_pooling' in args and args['global_pooling'] == 1:
op_name = dimension_picker('global_' + cls.name)
return get_relay_op(op_name(args))(*inputs)
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'strides': 'strides',
},
ignores=['dilations', 'order', 'legacy_pad', 'global_pooling'],
extras={'ceil_mode': False},
custom_check=dimension_constraint())(inputs, args, params)
class AveragePool(Pool):
name = 'avg_pool'
class MaxPool(Pool):
name = 'max_pool'
class Conv(Caffe2OpConverter):
""" Operator converter for Conv.
"""
@classmethod
def _impl(cls, inputs, args, params):
# get number of channels
channels = infer_channels(inputs[1])
args['channels'] = channels
_clean_up_pool_args(args)
out = AttrCvt(
op_name=dimension_picker('conv'),
transforms={
'group': ('groups', 1),
'kernel_shape': 'kernel_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'strides': 'strides',
'dilations': ('dilation', (1, 1)),
'order': ('data_layout', ("NCHW"), lambda x: x if isinstance(x, str) else x.decode('UTF-8')),
},
excludes=[],
ignores=_caffe2_internal_args,
custom_check=dimension_constraint())(inputs[:2], args, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class ConvTranspose(Caffe2OpConverter):
""" Operator converter for ConvTranspose.
"""
@classmethod
def _impl(cls, inputs, args, params):
# get number of channels
channels = infer_channels(inputs[1], True)
args['channels'] = channels
_clean_up_pool_args(args)
out = AttrCvt(
op_name=dimension_picker('conv', '_transpose'),
transforms={
'kernel_shape': 'kernel_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'dilations': ('dilation', (1, 1)),
'order': ('data_layout', ("NCHW"), lambda x: x if isinstance(x, str) else x.decode('UTF-8')),
},
excludes=[],
ignores=_caffe2_internal_args,
custom_check=dimension_constraint())(inputs[:2], args, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class Concat(Caffe2OpConverter):
""" Operator converter for Concat.
"""
@classmethod
def _impl(cls, inputs, args, params):
def _get_axis_from_order_str(order):
order = order if isinstance(order, str) else order.decode('UTF-8')
if order == 'NCHW':
return 1
if order == 'NHWC':
return 3
raise tvm.error.OpAttributeUnImplemented(
'Order {} is not supported in operator Concat.'.format(order))
return AttrCvt(
op_name='concatenate',
transforms={
'order': ('axis', (1), _get_axis_from_order_str),
},
excludes=['add_axis'])((inputs,), args, params)
class NormalizePlanarYUV(Caffe2OpConverter):
""" Operator converter for NormalizePlanarYUV.
caffe2 definition: https://github.com/pytorch/pytorch/blob/master/caffe2/operators/norm_planar_yuv_op.cc
"""
@classmethod
def _impl(cls, inputs, args, params):
assert len(inputs) == 3
mean = _op.expand_dims(inputs[1], axis=2, num_newaxis=2)
std = _op.expand_dims(inputs[2], axis=2, num_newaxis=2)
return _op.divide(_op.subtract(inputs[0], mean), std)
class ResizeNearest(Caffe2OpConverter):
""" Operator converter for Upsample (nearest mode).
"""
@classmethod
def _impl(cls, inputs, args, params):
width_scale = args['width_scale'] if 'width_scale' in args else 1
height_scale = args['height_scale'] if 'height_scale' in args else 1
assert width_scale == height_scale
return _op.nn.upsampling(
inputs[0], scale_h=int(width_scale), scale_w=int(width_scale), method="NEAREST_NEIGHBOR")
class Sum(Caffe2OpConverter):
""" Operator converter for Sum.
"""
@classmethod
def _impl(cls, inputs, args, params):
# Sum Operator
for in_index in range(len(inputs) - 1):
inputs[in_index + 1] = _op.add(inputs[in_index], inputs[in_index + 1])
return inputs[len(inputs) - 1]
class Softmax(Caffe2OpConverter):
""" Operator converter for Softmax.
"""
@classmethod
def _impl(cls, inputs, args, params):
# set default value when axis is not set in the model
if 'axis' not in args:
args['axis'] = 1
return AttrCvt('softmax', transforms={'axis': ('axis', args['axis'])})(inputs, args, params)
class FC(Caffe2OpConverter):
""" Operator converter for FC.
"""
@classmethod
def _impl(cls, inputs, args, params):
inputs[0] = _op.nn.batch_flatten(inputs[0])
units = infer_channels(inputs[1])
res = _op.nn.dense(inputs[0], inputs[1], units=units)
use_bias = len(inputs) == 3
if use_bias:
res = _op.nn.bias_add(res, inputs[2])
return res
class SpatialBN(Caffe2OpConverter):
""" Operator converter for SpatialBN.
"""
@classmethod
def _impl(cls, inputs, args, params):
return AttrCvt(
op_name='batch_norm',
disables=['momentum'],
ignores=[
'order', 'spatial', 'is_test', 'consumed_inputs', 'num_batches'
])(inputs, args, params)
# compatible operators that do NOT require any conversion.
_identity_list = []
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
# Minimal set of ops for squeezenet and resnet50
def _get_convert_map():
return {
# caffe2 common operators
'Add': Add.get_converter(),
'Sum': Sum.get_converter(),
'Mul': Mul.get_converter(),
'Softmax': Softmax.get_converter(),
# nn
'AveragePool': AveragePool.get_converter(),
'MaxPool': MaxPool.get_converter(),
'Conv': Conv.get_converter(),
'ConvTranspose': ConvTranspose.get_converter(),
'Concat': Concat.get_converter(),
'FC': FC.get_converter(),
'SpatialBN': SpatialBN.get_converter(),
'ResizeNearest': ResizeNearest.get_converter(),
'Relu': AttrCvt('relu', {}, ignores=['order']),
'Sigmoid': Renamer('sigmoid'),
'Dropout': AttrCvt('dropout', {'ratio': 'rate'}, ignores=['is_test']),
# c2 image preprocessing ops
'NormalizePlanarYUV': NormalizePlanarYUV.get_converter(),
}
class Caffe2NetDef(object):
"""A helper class for handling Relay expression copying from pb2.GraphProto.
Definition: https://github.com/pytorch/pytorch/blob/master/caffe2/proto/caffe2.proto
"""
def __init__(self, shape, dtype):
self._nodes = {}
self._params = {}
self._visited_nodes = set()
self._ops = {}
self._shape = shape
self._dtype = dtype
self._mod = IRModule({})
def from_caffe2(self, init_net, predict_net):
"""Construct Relay expression from caffe2 graph.
Parameters
----------
init_net : protobuf object
predict_net : protobuf object
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
# pylint: disable=import-outside-toplevel
from caffe2.python import workspace
workspace.RunNetOnce(init_net)
# Input
input_name = predict_net.op[0].input[0]
# Params
self._params = {}
| |
<filename>katcp/test/test_resource_client.py
# Copyright 2014 National Research Foundation (South African Radio Astronomy Observatory)
# BSD license - see LICENSE for details
from __future__ import absolute_import, division, print_function
from future import standard_library
standard_library.install_aliases() # noqa: E402
import copy
import gc
import logging
import time
import unittest
import weakref
from builtins import object
from concurrent.futures import TimeoutError
import mock
import tornado
# module under test
from katcp import (Message, Sensor, ioloop_manager,
resource, resource_client)
from katcp.core import AsyncEvent, AttrDict, ProtocolFlags
from katcp.testutils import (DeviceTestSensor, DeviceTestServer,
TimewarpAsyncTestCase,
TimewarpAsyncTestCaseTimeAdvancer,
start_thread_with_cleanup)
logger = logging.getLogger(__name__)
class test_transform_future(tornado.testing.AsyncTestCase):
def test_transform(self):
orig_f = tornado.concurrent.Future()
transform = mock.Mock()
trans_f = resource_client.transform_future(transform, orig_f)
retval = mock.Mock()
orig_f.set_result(retval)
self.assertIs(trans_f.result(), transform.return_value)
transform.assert_called_once_with(retval)
@tornado.testing.gen_test
def test_exception_in_future(self):
class AnException(Exception): pass
@tornado.gen.coroutine
def raiser():
raise AnException
orig_f = raiser()
transform = mock.Mock()
trans_f = resource_client.transform_future(transform, orig_f)
with self.assertRaises(AnException):
trans_f.result()
def test_exception_in_transform(self):
orig_f = tornado.concurrent.Future()
transform = mock.Mock()
class AnException(Exception): pass
transform.side_effect = AnException
trans_f = resource_client.transform_future(transform, orig_f)
retval = mock.Mock()
orig_f.set_result(retval)
transform.assert_called_once_with(retval)
with self.assertRaises(AnException):
trans_f.result()
class test_KATCPClientResourceRequest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.Mock()
self.DUT = resource_client.KATCPClientResourceRequest(
{'name': 'the-request',
'description': 'The description',
'timeout_hint': 33.34},
self.mock_client)
def test_init(self):
self.assertEqual(self.DUT.name, 'the-request')
self.assertEqual(self.DUT.description, 'The description')
self.assertEqual(self.DUT.timeout_hint, 33.34)
# Check that we are registered to the correct ABC
self.assertIsInstance(self.DUT, resource.KATCPRequest)
def test_request_with_timeout_hint(self):
reply = self.DUT('parm1', 2)
self.mock_client.wrapped_request.assert_called_once_with(
'the-request', 'parm1', 2, timeout=33.34)
self.assertIs(reply, self.mock_client.wrapped_request.return_value)
def test_request_no_timeout_hint(self):
DUT_no_timeout_hint = resource_client.KATCPClientResourceRequest(
{'name': 'the-other-request',
'description': 'The other description',
'timeout_hint': None},
self.mock_client)
reply = DUT_no_timeout_hint('aparm', 3)
self.mock_client.wrapped_request.assert_called_once_with(
'the-other-request', 'aparm', 3, timeout=None)
self.assertIs(reply, self.mock_client.wrapped_request.return_value)
class test_KATCPClientResource(tornado.testing.AsyncTestCase):
def test_init(self):
resource_spec = dict(
name='testdev',
description='resource for testing',
address=('testhost', 12345),
controlled=True)
DUT = resource_client.KATCPClientResource(dict(resource_spec))
self.assertEqual(DUT.address, resource_spec['address'])
self.assertEqual(DUT.state, 'disconnected')
self.assertEqual(DUT.name, resource_spec['name'])
self.assertEqual(DUT.description, resource_spec['description'])
self.assertEqual(DUT.parent, None)
self.assertEqual(DUT.children, {})
self.assertEqual(DUT.controlled, True)
# Now try with a parent and no control
resource_spec['controlled'] = False
parent = mock.Mock()
DUT = resource_client.KATCPClientResource(
dict(resource_spec), parent=parent)
self.assertEqual(DUT.parent, parent)
self.assertEqual(DUT.controlled, False)
@tornado.testing.gen_test
def test_dummy_requests(self):
resource_spec_nodummy = dict(
name='testdev',
description='resource for testing',
address=('testhost', 12345),
controlled=True)
resource_spec_dummy = dict(resource_spec_nodummy)
resource_spec_dummy['dummy_unknown_requests'] = True
requests = ('req-one', 'req_two')
DUT_nodummy = self.get_DUT_mock_inspecting_client(
resource_spec_nodummy)
DUT_dummy = self.get_DUT_mock_inspecting_client(
resource_spec_dummy)
yield DUT_dummy._add_requests(requests)
yield DUT_nodummy._add_requests(requests)
# Check dummy flag
self.assertFalse(DUT_nodummy.dummy_unknown_requests)
self.assertTrue(DUT_dummy.dummy_unknown_requests)
# First check that actual requests are handled correctly
for DUT in (DUT_nodummy, DUT_dummy):
# For real requests we expect a string, see
# get_DUT_mock_inspecting_client() below.
req = DUT_nodummy.req.req_one
self.assertEqual(req.name, 'req-one')
# Check that the non-dummy client doesn't have non-existing requests
with self.assertRaises(AttributeError):
DUT_nodummy.req.blah
# Check that we get a dummy request for the dummied client
dummy_req = DUT_dummy.req.blah
dummy_reply = yield dummy_req('abc', 'def', 123)
self.assertTrue(dummy_reply.succeeded)
# Repeat dummy tests for a simple ClientGroup
group = resource_client.ClientGroup('group', (DUT_nodummy, DUT_dummy))
# A real request should appear on the group level too
req = group.req.req_one
self.assertEqual(req.name, 'req_one')
# Since group contains at least one dummy client, it too has dummy requests
dummy_req = group.req.blah
dummy_reply = yield dummy_req('abc', 'def', 123)
self.assertTrue(dummy_reply.succeeded)
# Check that group without dummy clients doesn't have non-existing requests
group_nodummy = resource_client.ClientGroup('group', (DUT_nodummy,))
with self.assertRaises(AttributeError):
group_nodummy.req.blah
def get_DUT_mock_inspecting_client(self, resource_spec, *args, **kwargs):
"""Return a KATCPClientResource instance with a mocked inspecting client
Note that the inspecting client request factory is moced to return a
string matching the name of the request rather than a KATCPRequest object
"""
DUT = resource_client.KATCPClientResource(
dict(resource_spec), *args, **kwargs)
ic = DUT._inspecting_client = mock.Mock()
def future_get_request(key):
f = tornado.concurrent.Future()
req_obj = resource_client.KATCPClientResourceRequest(
dict(name=key, description=key, timeout_hint=None), ic)
f.set_result(req_obj)
return f
ic.future_get_request.side_effect = future_get_request
return DUT
@tornado.testing.gen_test
def test_control(self):
always_allow = ('req-one', 'req_two', 'exclude_one')
always_exclude = ('exclude_one', 'exclude-two')
normal = ('normal', 'another-normal')
def katcp_form(reqs):
return tuple(r.replace('_', '-') for r in reqs)
dev_requests = set(katcp_form(always_allow + always_exclude + normal))
resource_spec = dict(
name='testdev',
address=('testhost', 12345),
always_allowed_requests=always_allow,
always_excluded_requests=always_exclude,
controlled=True)
DUT = self.get_DUT_mock_inspecting_client(resource_spec)
yield DUT._add_requests(dev_requests)
# We expect all the requests, except for those in the always_exclude list to be
# available. Note, exclude-one should not be available even though it is in
# always_allow, since always_exclude overrides always_allow.
self.assertEqual(sorted(DUT.req),
sorted(['req_one', 'req_two', 'normal', 'another_normal']))
# Now try one with no control, only req-one and req-two should be available
resource_spec['controlled'] = False
DUT = self.get_DUT_mock_inspecting_client(resource_spec)
yield DUT._add_requests(dev_requests)
self.assertEqual(sorted(DUT.req), sorted(['req_one', 'req_two']))
@tornado.testing.gen_test
def test_lowlevel_client_attributes(self):
resource_spec = dict(
name='testdev',
description='resource for testing',
address=('testhost', 12345),
controlled=True)
DUT = resource_client.KATCPClientResource(dict(resource_spec))
with self.assertRaises(RuntimeError):
# Before calling start() a runtime error should be raised since the inspecting
# client has not yet been instantiated
DUT.versions
with self.assertRaises(RuntimeError):
DUT.last_connect_time
ic = DUT.inspecting_client_factory(DUT.address[0], DUT.address[1], None)
DUT._inspecting_client = mock.Mock(spec_set=ic)
DUT._inspecting_client.katcp_client = mock.Mock(spec_set=ic.katcp_client)
v = DUT._inspecting_client.katcp_client.versions = mock.Mock()
l = DUT._inspecting_client.katcp_client.last_connect_time = mock.Mock()
self.assertIs(DUT.versions, v)
self.assertIs(DUT.last_connect_time, l)
@tornado.testing.gen_test
def test_list_sensors(self):
resource_spec = dict(
name='testdev',
address=('testhost', 12345))
DUT = resource_client.KATCPClientResource(resource_spec)
sens_manager = mock.create_autospec(
resource_client.KATCPClientResourceSensorsManager(mock.Mock(), "test"))
test_sensors_info = AttrDict(
sens_one=AttrDict(name='sens-one', description='sensor one', value=1),
sens_two=AttrDict(name='sens.two', description='sensor one', value=2),
sens_three=AttrDict(name='sens_three', description='sensor three', value=3))
sensor_strategies = dict(sens_one='event', sens_three='period 10')
def make_test_sensors(sensors_info):
test_sensors = AttrDict()
for sens_pyname, info in sensors_info.items():
info = dict(info)
info['sensor_type'] = Sensor.INTEGER
val = info.pop('value')
timestamp = val*10
received_timestamp = timestamp + 1
sens = test_sensors[sens_pyname] = resource.KATCPSensor(
info, sens_manager)
sens._reading = resource.KATCPSensorReading(
received_timestamp, timestamp, Sensor.NOMINAL, val)
test_sensors[sens_pyname] = sens
return test_sensors
test_sensors = make_test_sensors(test_sensors_info)
sens_manager.get_sampling_strategy.side_effect = (
lambda sens_name: resource.normalize_strategy_parameters(
sensor_strategies.get(
resource.escape_name(sens_name), 'none')))
DUT.sensor.update(test_sensors)
# Simple search based on python identifier
result = yield DUT.list_sensors('sens_one')
self.assertEqual(len(result), 1)
self.assertEqual(result[0], resource.SensorResultTuple(
test_sensors.sens_one, test_sensors_info.sens_one.name,
'sens_one', test_sensors_info.sens_one.description, 'integer', '',
test_sensors.sens_one.reading))
# Now get all the sensors
result = yield DUT.list_sensors('')
# built-in `sorted()` and `list.sort()` use __cmp__ for ordering in Python2.
# However, this breaks compatibility in Python3 due to
# https://docs.python.org/3/whatsnew/3.0.html#ordering-comparisons
result.sort(key=lambda obj: obj.name)
expected_result = sorted([
resource.SensorResultTuple(
test_sensors[s_id], test_sensors_info[s_id].name,
s_id, test_sensors_info[s_id].description, 'integer', '',
test_sensors[s_id].reading
)
for s_id in test_sensors_info
], key=lambda obj: obj.name)
self.assertEqual(result, expected_result)
# Test that all sensors are found using their Python identifiers
result = yield DUT.list_sensors('sens_two')
self.assertEqual(len(result), 1)
self.assertEqual(result[0].object, test_sensors.sens_two)
result = yield DUT.list_sensors('sens_three')
self.assertEqual(len(result), 1)
self.assertEqual(result[0].object, test_sensors.sens_three)
# Test using actual sensor name
result = yield DUT.list_sensors('sens_one', use_python_identifiers=False)
self.assertEqual(len(result), 0)
result = yield DUT.list_sensors('sens-one', use_python_identifiers=False)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, 'sens-one')
# Now test with strategy filter
result = yield DUT.list_sensors('', strategy=True)
self.assertEqual(len(result), len(sensor_strategies))
def test_until_sync_states(self):
resource_spec = dict(
name='testdev',
address=('testhost', 12345))
DUT = resource_client.KATCPClientResource(resource_spec)
# We expect the initial state to be 'disconnected', which means until_synced()
# should return an unresolved future and until_not_synced() a resolved future
self.assertEqual(DUT.state, 'disconnected')
self.assertFalse(DUT.until_synced().done())
self.assertTrue(DUT.until_not_synced().done())
# Force state to 'syncing', same expectation as for 'disconnected'
DUT._state.set_state('syncing')
self.assertFalse(DUT.until_synced().done())
self.assertTrue(DUT.until_not_synced().done())
# Force state to 'synced', opposite expectation as for 'disconnected'
DUT._state.set_state('synced')
self.assertTrue(DUT.until_synced().done())
self.assertFalse(DUT.until_not_synced().done())
class test_KATCPClientResource_Integrated(tornado.testing.AsyncTestCase):
def setUp(self):
super(test_KATCPClientResource_Integrated, self).setUp()
self.server = DeviceTestServer('', 0)
start_thread_with_cleanup(self, self.server)
self.host, self.port = self.server.bind_address
self.default_resource_spec = dict(
name='thething',
address=self.server.bind_address,
controlled=True)
@tornado.gen.coroutine
def _get_DUT_and_sync(self, resource_spec):
DUT = resource_client.KATCPClientResource(resource_spec)
DUT.start()
yield DUT.until_state('synced')
raise tornado.gen.Return(DUT)
@tornado.testing.gen_test(timeout=1)
def test_requests(self):
DUT = yield self._get_DUT_and_sync(self.default_resource_spec)
# Check that all the test-device requests are listed
self.assertEqual(sorted(DUT.req),
sorted(n.replace('-', '_')
for n in self.server.request_names))
@tornado.testing.gen_test(timeout=1)
def test_active(self):
DUT = yield self._get_DUT_and_sync(self.default_resource_spec)
self.assertTrue(DUT.is_active(), 'Expect DUT to be active initialy')
reply = yield DUT.req.new_command()
self.assertTrue(reply.succeeded, 'Expect request to be succesful in active state')
# Set DUT to 'inactive'
DUT.set_active(False)
with self.assertRaises(resource.KATCPResourceInactive):
# Should raise if we attempt to do the request when inactive
yield DUT.req.new_command()
# Set DUT to back to 'active'
DUT.set_active(True)
reply = yield DUT.req.new_command()
self.assertTrue(reply.succeeded, 'Expect request to be succesful in active state')
@tornado.testing.gen_test(timeout=1)
def test_sensors(self):
DUT = yield self._get_DUT_and_sync(self.default_resource_spec)
# Check that all the test-device sensors are listed
self.assertEqual(sorted(DUT.sensor),
sorted(n.replace('-', '_').replace('.', '_')
for n in self.server.sensor_names))
@tornado.testing.gen_test(timeout=1)
def test_interface_change(self):
DUT = yield self._get_DUT_and_sync(self.default_resource_spec)
sensors_before = set(DUT.sensor)
reqs_before = set(DUT.req)
# Add a new sensor to the server
sensor = DeviceTestSensor(DeviceTestSensor.INTEGER, "another.int",
"An Integer.",
"count", [-5, 5], timestamp=self.io_loop.time(),
status=DeviceTestSensor.NOMINAL, value=3)
self.server.add_sensor(sensor)
# Check that the sensor does not exist currently
self.assertNotIn(resource.escape_name(sensor.name), sensors_before)
# Add a new request to the server
def request_sparkling_new(self, req, msg):
"""A new command."""
return Message.reply(msg.name, "ok", "bling1", "bling2")
self.server._request_handlers['sparkling-new'] = request_sparkling_new
# Check that the request did not exist before
self.assertNotIn('sparkling-new', reqs_before)
# Issue #interface-changed
self.server.mass_inform(Message.inform('interface-changed'))
yield DUT.until_state('syncing')
yield DUT.until_state('synced')
# Check if sensor/request was added
self.assertEqual(set(DUT.sensor) - sensors_before, set(['another_int']))
self.assertEqual(set(DUT.req) - reqs_before, set(['sparkling_new']))
# And now remove them again
self.server._request_handlers.pop('sparkling-new')
self.server.remove_sensor('another.int')
# Issue #interface-changed
self.server.mass_inform(Message.inform('interface-changed'))
yield DUT.until_state('syncing')
yield DUT.until_state('synced')
# Check if sensor/request was removed
self.assertEqual(set(DUT.sensor), sensors_before)
self.assertEqual(set(DUT.req), reqs_before)
@tornado.testing.gen_test
def test_no_memory_leak_after_usage(self):
DUT = yield self._get_DUT_and_sync(self.default_resource_spec)
wr = weakref.ref(DUT)
| |
# Watch out for this one! We're not using typical Python 2 floor division in
# this file, but rather floating point division.
from __future__ import division
from math import ceil
from google.appengine.api import namespace_manager
from google.appengine.api import users as app_engine_users
from google.appengine.ext import ndb
from webapp2_extras import sessions
from webapp2_extras.routes import RedirectRoute
import datetime
import json
import logging
import os
import re
import webapp2
from model import get_sql_models, User
import jwt_helper
import mysql_connection
import config
import util
class BaseHandler(webapp2.RequestHandler):
"""Ancestor of all other views/handlers."""
@classmethod
def using_sessions(klass):
return bool(getattr(config, 'session_cookie_name', False))
def dispatch(self):
"""Wraps the other request handlers.
* Manages sessions
* Manages request profiling
"""
util.profiler.add_event("BaseHandler.dispatch()")
# ** Code to run before all handlers goes here. ** #
# The App Engine runtime does weird caching of classes and class
# properties such that you can't expect them to be cleanly segregated
# or reset between requests. But we want to use this property to avoid
# multiple lookups of the same user within a request. So make sure it
# has a clean start.
# https://cloud.google.com/appengine/docs/standard/python/how-requests-are-handled#app-caching
self._user = None
if util.is_localhost():
# ports are arbitrary, but convenient
os.environ['YELLOWSTONE_DOMAIN'] = 'localhost:9080'
os.environ['YELLOWSTONE_PROTOCOL'] = 'http'
os.environ['NEPTUNE_DOMAIN'] = 'localhost:8080'
os.environ['NEPTUNE_PROTOCOL'] = 'http'
os.environ['TRITON_DOMAIN'] = 'localhost:10080'
os.environ['TRITON_PROTOCOL'] = 'http'
else:
# Various DOMAINs remain set as in app.yaml
os.environ['YELLOWSTONE_PROTOCOL'] = 'https'
os.environ['NEPTUNE_PROTOCOL'] = 'https'
os.environ['TRITON_PROTOCOL'] = 'https'
# Set the namespace, which varies by branch.
namespace = os.environ['NAMESPACE']
if namespace:
logging.info("Setting namespace: {}".format(namespace))
namespace_manager.set_namespace(namespace)
# Newly deployed dev branches might not have a database in their
# namespace yet.
self.init_database()
if self.using_sessions():
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
# Allow load testing services to log in quickly.
if util.is_development() and self.request.get('demo_login', None) == 'wamxdkrwnkgey':
user = User.get_by_id('User_demo')
self.log_in(user)
self.redirect(self.request.path)
# Handler classes may set a class property `requires_auth` which triggers a check
# for an authenticated user. If there isn't one, the request is immediately
# rejeted with a 401. This does not apply to preflight OPTIONS calls which never
# include Authorization headers (they're about figuring out the server's CORS
# rules, not taking any actions).
authed = getattr(self, 'requires_auth', False)
options = self.request.method == 'OPTIONS'
# This may be used by downstream handlers to override permissions if
# necessary.
self.allowed_by_jwt = self.jwt_allows_endpoint(self.get_endpoint_str())
if self.allowed_by_jwt:
logging.info("BaseHandler: this request is ALLOWED by the jwt.")
if authed and not options:
user = self.get_current_user()
if user.user_type == 'public' and not self.allowed_by_jwt:
return self.http_unauthorized()
try:
# Call the overridden dispatch(), which has the effect of running
# the get() or post() etc. of the inheriting class.
webapp2.RequestHandler.dispatch(self)
finally:
# ** Code to run after all handlers goes here. ** #
if self.using_sessions():
# Save all sessions.
self.session_store.save_sessions(self.response)
util.profiler.add_event("END")
# Turn on for debugging/profiling.
# logging.info(util.profiler)
util.profiler.clear()
def head(self, *args, **kwargs):
"""Perform everything a GET would do, but drop the response body.
This ensures all headers, like the content length, are set, but per the
HTTP spec, no body should be present.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/head
"""
if hasattr(self, 'get'):
self.get(*args, **kwargs)
# Webapp is clever and calculates content length for us, which is
# always going to be zero if we blank the body. But HEAD responses
# are supposed to have the content length the response _would_ have
# if it was a GET. So override.
body = self.response.body
self.response.clear()
self.response.headers['Content-Length'] = str(len(body))
else:
# It's against the spec to 405 a GET or HEAD. Cheat and just
# pretend it doesn't exist.
self.error(404)
def options(self, *args, **kwargs):
# OPTION Response based on ->
# http://zacstewart.com/2012/04/14/http-options-method.html
self.response.set_status(200)
self.response.headers['Allow'] = 'GET,HEAD,OPTIONS'
def get_current_user(self):
"""Get the logged in user."""
cached_user = getattr(self, '_user', None)
if cached_user:
logging.info("BaseHandler.get_current_user() returning {} from "
"cache.".format(cached_user))
return cached_user
# Jwt overrides session. I.e. if your session said "User_A", but your
# jwt says "User_B", we go with User B and change the session cookie.
jwt_user, error = self.get_jwt_user()
if jwt_user:
logging.info("BaseHandler.get_current_user() returning {} from "
"jwt.".format(jwt_user))
self.log_in(jwt_user)
return jwt_user
if self.using_sessions():
session_user = User.get_by_id(self.session.get('user', None))
if session_user:
logging.info(
"BaseHandler.get_current_user() returning {} from "
"session cookie.".format(session_user)
)
self.log_in(session_user)
return session_user
if 'user' not in self.session:
# Make sure the session keys always exist, even if they are
# empty.
self.session['user'] = None
logging.info("BaseHandler.get_current_user() returning public user.")
return User.create_public()
def get_jwt(self):
"""Attempt to read JWT from Authorization header."""
pattern = re.compile(r'^Bearer (\S+)$')
match = pattern.match(self.request.headers.get('Authorization', ''))
if match:
return match.groups()[0]
else:
# There was no recognizable JWT header.
return None
def get_jwt_user(self, jwt_kwargs={}, token=None):
"""Is there a JWT that authenticates the user?
Returns a tuple as (User or None, error str or None) where the error
may be 'not found', 'used', or 'expired', just like
AuthToken.checkTokenString. Error will only be not None if there is
a JWT present, i.e. if the client isn't even trying to use JWT, the
return value is (None, None).
"""
token = token or self.get_jwt()
payload, error = jwt_helper.decode(token, **jwt_kwargs)
if not payload:
# No valid token, so no user.
return (None, error)
if 'user_id' not in payload or 'email' not in payload:
# No user in the token; this may only specify allowed_endpoints.
return (None, jwt_helper.NO_USER)
# Retrieve or create the users information.
user = self.sync_user_with_token(payload)
return (user, None)
def jwt_allows_endpoint(self, endpoint_str=None):
"""Certain handlers are designed to be called from other platforms but
require explicit permission from that platform to use.
Returns boolean.
"""
payload, error = jwt_helper.decode(self.get_jwt())
if not payload or error:
return False
if endpoint_str is None:
endpoint_str = self.get_endpoint_str()
return endpoint_str in payload.get('allowed_endpoints', [])
def get_endpoint_str(self, method=None, platform=None, path=None):
"""Describe the current request with a formalized string.
NOT domain-specific, rather it's platform-specific, i.e. all neptune
environments have the same endpoint description.
"""
return util.get_endpoint_str(
method=method or self.request.method,
platform=platform,
path=path or self.request.path,
)
def sync_user_with_token(self, payload):
# The token is correctly signed and has valid structure.
def create_user(payload):
short_uid = User.convert_uid(payload['user_id'])
kwargs = {k: v for k, v in payload.items()
if k in ('email', 'user_type')}
# Setting the user type is a potential security hole, so this
# should only be used after the jwt has been verified.
user = User.create(id=short_uid, **kwargs)
user.put()
return user
is_auth_server = getattr(config, 'is_auth_server', False)
if is_auth_server:
# We are the auth server, the arbiter of what id goes with what
# email, so we never _change_ ids. But do create the user if
# necessary to help solve bad sync states with other systems.
user = User.get_by_id(payload['user_id'])
if not user:
user = create_user(payload)
else:
# Not the auth server, defer to the id in the payload.
if User.email_exists(payload['email']):
user = User.get_by_auth('email', payload['email'])
if user.uid != payload['user_id']:
logging.error("User id mismatch found, more info in logs.")
logging.info("Original user: {}".format(user.to_dict()))
logging.info("Received token payload: {}".format(payload))
user = User.resolve_id_mismatch(user, payload['user_id'])
else:
user = create_user(payload)
return user
def get_third_party_auth(self, auth_type):
"""Wrangle and return authentication data from third parties.
Args:
auth_type: str, currently only 'google'
Returns:
dictionary of user information, which will always contain
the key 'auth_id', or None if no third-party info is found.
"""
if auth_type == 'google':
gae_user = app_engine_users.get_current_user()
if not gae_user:
logging.error("No google login found.")
return None
# Get user first and last names from nickname
first_name = None
last_name = None
if gae_user.nickname():
nickname = gae_user.nickname()
if ' ' in nickname:
first_name = nickname.split(' ')[0]
last_name = nickname.split(' ')[1]
else:
if '@' in nickname:
first_name = nickname.split('@')[0]
else:
first_name = nickname
# Combine fields in user keyword arguments
user_kwargs = {
'email': gae_user.email(),
'google_id': gae_user.user_id(),
'first_name': first_name,
'last_name': last_name,
}
return user_kwargs
def authenticate(self, auth_type, email=None, password=None):
"""Takes various kinds of credentials (email/password, google
account) and logs you in.
Returns:
User entity the user has been successfully authenticated
'credentials_invalid' either because a password is wrong or no
account exists for those credentials
'credentials_missing' looked for credentials but didn't find any of
the appropriate kind.
'email_exists:[auth_type]' the supplied credentials are invalid AND
a user with the same email exists with
another auth type.
"""
# fetch matching | |
list(
self.labelblocksgroups[0].data.keys() - set(self.keys_ctrl)
)
def fit(
self: Any,
kind: str,
ini: int = 0,
fin: Optional[int] = None,
no_weight: bool = False,
**kwargs: Any
) -> None:
"""Fit titrations.
Here is less general. It is for 2 labelblocks.
Parameters
----------
kind
Titration type {'pH'|'Cl'}
ini
Initial point (default: 0).
fin
Final point (default: None).
no_weight
Do not use residues from single Labelblock fit as weight for global fitting.
**kwargs
Only for tval different from default=0.95 for the confint calculation.
"""
if kind == 'Cl':
self.fz = fz_Kd_singlesite
elif kind == 'pH':
self.fz = fz_pK_singlesite
x = self.conc
fittings = []
for lbg in self.labelblocksgroups:
fitting = pd.DataFrame()
for k, y in lbg.data.items():
res = fit_titration(kind, x[ini:fin], np.array(y[ini:fin]), **kwargs)
res.index = [k]
# fitting = fitting.append(res, sort=False) DDD
fitting = pd.concat([fitting, res], sort=False)
# TODO assert (fitting.columns == res.columns).all()
# better to refactor this function
fittings.append(fitting)
# global weighted on relative residues of single fittings
fitting = pd.DataFrame()
for k, y in self.labelblocksgroups[0].data.items():
y2 = np.array(self.labelblocksgroups[1].data[k])
y = np.array(y)
residue = y - self.fz(
fittings[0]['K'].loc[k],
np.array([fittings[0]['SA'].loc[k], fittings[0]['SB'].loc[k]]),
x,
)
residue /= y # TODO residue or
# log(residue/y) https://www.tandfonline.com/doi/abs/10.1080/00031305.1985.10479385
residue2 = y2 - self.fz(
fittings[1]['K'].loc[k],
np.array([fittings[1]['SA'].loc[k], fittings[1]['SB'].loc[k]]),
x,
)
residue2 /= y2
if no_weight:
for i, _rr in enumerate(residue):
residue[i] = 1 # TODO use np.ones() but first find a way to test
residue2[i] = 1
res = fit_titration(
kind,
x[ini:fin],
y[ini:fin],
y2=y2[ini:fin],
residue=residue[ini:fin],
residue2=residue2[ini:fin],
**kwargs
)
res.index = [k]
# fitting = fitting.append(res, sort=False) DDD
fitting = pd.concat([fitting, res], sort=False)
fittings.append(fitting)
for fitting in fittings:
for ctrl, v in self.scheme.items():
for k in v:
fitting.loc[k, 'ctrl'] = ctrl
# self.fittings and self.fz
self.fittings = fittings
self._get_keys()
def plot_K(
self,
lb: int,
xlim: Optional[Tuple[float, float]] = None,
title: Optional[str] = None,
) -> plt.figure:
"""Plot K values as stripplot.
Parameters
----------
lb
Labelblock index.
xlim
Range.
title
To name the plot.
Returns
-------
The figure.
Raises
------
Exception
When no fitting results are available (in this object).
"""
if not hasattr(self, 'fittings'):
raise Exception('run fit first')
sb.set(style="whitegrid")
f = plt.figure(figsize=(12, 16))
# Ctrls
ax1 = plt.subplot2grid((8, 1), loc=(0, 0))
if len(self.keys_ctrl) > 0:
res_ctrl = self.fittings[lb].loc[self.keys_ctrl]
sb.stripplot(
x=res_ctrl['K'],
y=res_ctrl.index,
size=8,
orient='h',
hue=res_ctrl.ctrl,
ax=ax1,
)
plt.errorbar(
res_ctrl.K,
range(len(res_ctrl)),
xerr=res_ctrl.sK, # xerr=res_ctrl.sK*res_ctrl.tval,
fmt='.',
c="lightgray",
lw=8,
)
plt.grid(1, axis='both')
# Unks
# FIXME keys_unk is an attribute or a property
res_unk = self.fittings[lb].loc[self.keys_unk]
ax2 = plt.subplot2grid((8, 1), loc=(1, 0), rowspan=7)
sb.stripplot(
x=res_unk['K'].sort_index(),
y=res_unk.index,
size=12,
orient='h',
palette="Greys",
hue=res_unk['SA'].sort_index(),
ax=ax2,
)
plt.legend('')
plt.errorbar(
res_unk['K'].sort_index(),
range(len(res_unk)),
xerr=res_unk['sK'].sort_index(),
fmt='.',
c="gray",
lw=2,
)
plt.yticks(range(len(res_unk)), res_unk.index.sort_values())
plt.ylim(-1, len(res_unk))
plt.grid(1, axis='both')
if not xlim:
xlim = (
0.99 * min(res_ctrl['K'].min(), res_unk['K'].min()),
1.01 * max(res_ctrl['K'].max(), res_unk['K'].max()),
)
ax1.set_xlim(xlim)
ax2.set_xlim(xlim)
ax1.set_xticklabels([])
ax1.set_xlabel('')
title = title if title else ''
title += ' label:' + str(lb)
f.suptitle(title, fontsize=16)
f.tight_layout(pad=1.2, w_pad=0.1, h_pad=0.5, rect=(0, 0, 1, 0.97))
return f
def plot_well(self, key: str) -> plt.figure:
"""Plot global fitting using 2 labelblocks.
Here is less general. It is for 2 labelblocks.
Parameters
----------
key
Well position as dictionary key like "A01".
Returns
-------
Pointer to mpl.figure.
Raises
------
Exception
When fit is not yet run.
"""
if not hasattr(self, 'fittings'):
raise Exception('run fit first')
plt.style.use(['seaborn-ticks', 'seaborn-whitegrid'])
out = ['K', 'sK', 'SA', 'sSA', 'SB', 'sSB']
out2 = ['K', 'sK', 'SA', 'sSA', 'SB', 'sSB', 'SA2', 'sSA2', 'SB2', 'sSB2']
x = self.conc
xfit = np.linspace(min(x) * 0.98, max(x) * 1.02, 50)
residues = []
colors = []
lines = []
f = plt.figure(figsize=(10, 7))
ax_data = plt.subplot2grid((3, 1), loc=(0, 0), rowspan=2)
# labelblocks
for i, (lbg, df) in enumerate(zip(self.labelblocksgroups, self.fittings)):
y = lbg.data[key]
# ## data
colors.append(plt.cm.Set2((i + 2) * 10))
ax_data.plot(
x, y, 'o', color=colors[i], markersize=12, label='label' + str(i)
)
ax_data.plot(
xfit,
self.fz(df.K.loc[key], [df.SA.loc[key], df.SB.loc[key]], xfit),
'-',
lw=2,
color=colors[i],
alpha=0.8,
)
ax_data.set_xticks(ax_data.get_xticks()[1:-1])
# MAYBE ax_data.set_yscale('log')
residues.append(
y - self.fz(df.K.loc[key], [df.SA.loc[key], df.SB.loc[key]], x)
)
# Print out.
line = ['%1.2f' % v for v in list(df[out].loc[key])]
for _i in range(4):
line.append('')
lines.append(line)
# ## residues
ax1 = plt.subplot2grid((3, 1), loc=(2, 0))
ax1.plot(
x, residues[0], "o-", lw=2.5, color=colors[0], alpha=0.6, markersize=12
)
ax2 = plt.twinx(ax1)
ax2.plot(
x, residues[1], "o-", lw=2.5, color=colors[1], alpha=0.6, markersize=12
)
plt.subplots_adjust(hspace=0)
ax1.set_xlim(ax_data.get_xlim())
ax_data.legend()
# global
df = self.fittings[-1]
lines.append(['%1.2f' % v for v in list(df[out2].loc[key])])
ax_data.plot(
xfit,
self.fz(df.K.loc[key], [df.SA.loc[key], df.SB.loc[key]], xfit),
'b--',
lw=0.5,
)
ax_data.plot(
xfit,
self.fz(df.K.loc[key], [df.SA2.loc[key], df.SB2.loc[key]], xfit),
'b--',
lw=0.5,
)
ax_data.table(cellText=lines, colLabels=out2, loc='top')
ax1.grid(0, axis='y') # switch off horizontal
ax2.grid(1, axis='both')
# ## only residues
y = self.labelblocksgroups[0].data[key]
ax1.plot(
x,
(y - self.fz(df.K.loc[key], [df.SA.loc[key], df.SB.loc[key]], x)),
"--",
lw=1.5,
color=colors[0],
)
y = self.labelblocksgroups[1].data[key]
ax2.plot(
x,
(y - self.fz(df.K.loc[key], [df.SA2.loc[key], df.SB2.loc[key]], x)),
"--",
lw=1.5,
color=colors[1],
)
if key in self.keys_ctrl:
plt.title(
"Ctrl: " + df['ctrl'].loc[key] + " [" + key + "]", {'fontsize': 16}
)
else:
plt.title(key, {'fontsize': 16})
plt.close()
return f
def plot_all_wells(self, path: str) -> None:
"""Plot all wells into a pdf.
Parameters
----------
path
Where the pdf file is saved.
Raises
------
Exception
When fit is not yet run.
"""
if not hasattr(self, 'fittings'):
raise Exception('run fit first')
out = PdfPages(path)
for k in self.fittings[0].loc[self.keys_ctrl].index:
out.savefig(self.plot_well(k))
for k in self.fittings[0].loc[self.keys_unk].sort_index().index:
out.savefig(self.plot_well(k))
out.close()
def plot_ebar(
self,
lb: int,
x: str = 'K',
y: str = 'SA',
xerr: str = 'sK',
yerr: str = 'sSA',
xmin: Optional[float] = None,
ymin: Optional[float] = None,
xmax: Optional[float] = None,
title: Optional[str] = None,
) -> plt.figure:
"""Plot SA vs. K with errorbar for the whole plate."""
if not hasattr(self, 'fittings'):
raise Exception('run fit first')
df = self.fittings[lb]
with plt.style.context('fivethirtyeight'):
f = plt.figure(figsize=(10, 10))
if xmin:
df = df[df[x] > xmin]
if xmax:
df = df[df[x] < xmax]
if ymin:
df = df[df[y] > ymin]
try:
plt.errorbar(
df[x],
df[y],
xerr=df[xerr],
yerr=df[yerr],
fmt='o',
elinewidth=1,
markersize=10,
alpha=0.7,
)
except ValueError:
pass
if 'ctrl' not in df:
df['ctrl'] = 0
df = df[~np.isnan(df[x])]
df = df[~np.isnan(df[y])]
for idx, xv, yv, l in zip(df.index, df[x], df[y], df['ctrl']):
# x or y do not exhist.# try:
if type(l) == str:
color = '#' + hashlib.md5(l.encode()).hexdigest()[2:8]
plt.text(xv, yv, l, fontsize=13, color=color)
else:
plt.text(xv, yv, idx, fontsize=12)
# x or y do not exhist.# except:
# x or y do not exhist.# continue
plt.yscale('log')
# min(x) can be = NaN
min_x = min(max([0.01, df[x].min()]), 14)
min_y = min(max([0.01, df[y].min()]), 5000)
plt.xlim(0.99 * min_x, 1.01 * df[x].max())
plt.ylim(0.90 * min_y, 1.10 * df[y].max())
plt.grid(1, axis='both')
plt.ylabel(y)
plt.xlabel(x)
title = title if title else ''
title += ' label:' + str(lb)
plt.title(title, fontsize=15)
return f
def print_fitting(self, lb: int) -> None:
"""Print fitting parameters for the whole plate."""
def df_print(df: pd.DataFrame) -> None:
for i, r in df.iterrows():
print('{:s}'.format(i), end=' ')
for k in out[:2]:
print('{:7.2f}'.format(r[k]), end=' ')
for k in out[2:]:
print('{:7.0f}'.format(r[k]), end=' ')
print()
df = self.fittings[lb]
if 'SA2' in df.keys():
out = ['K', 'sK', 'SA', 'sSA', 'SB', 'sSB', 'SA2', 'sSA2', 'SB2', 'sSB2']
else:
out = ['K', 'sK', 'SA', 'sSA', 'SB', 'sSB']
if len(self.keys_ctrl) > 0:
res_ctrl = df.loc[self.keys_ctrl]
gr = res_ctrl.groupby('ctrl')
print(' ' + ' '.join(["{:>7s}".format(x) for x in out]))
for g in gr:
print(' ', g[0])
df_print(g[1][out])
res_unk = df.loc[self.keys_unk]
print()
print(' ' + ' '.join(["{:>7s}".format(x) for x in out]))
print(' UNK')
df_print(res_unk.sort_index())
def plot_buffer(self, title: Optional[str] = None) -> plt.figure:
"""Plot buffers (indicated in scheme) for all labelblocksgroups."""
x = self.conc
f, ax = plt.subplots(2, 1, figsize=(10, 10))
for i, lbg in enumerate(self.labelblocksgroups):
buf = copy.deepcopy(lbg.buffer)
bg = buf.pop('bg')
bg_sd = buf.pop('bg_sd')
rowlabel = ['Temp']
lines = [['{:6.1f}'.format(x) for x in lbg.temperatures]]
colors = plt.cm.Set3(np.linspace(0, 1, len(buf) + 1))
for j, (k, v) in enumerate(buf.items(), start=1):
rowlabel.append(k)
lines.append(['{:6.1f}'.format(x) for x in v])
ax[i].plot(x, v, 'o-', alpha=0.8, lw=2, markersize=3, color=colors[j])
ax[i].errorbar(
x,
bg,
yerr=bg_sd,
fmt='o-.',
markersize=15,
lw=1,
elinewidth=3,
alpha=0.8,
color='grey',
label='label' + str(i),
)
plt.subplots_adjust(hspace=0.0)
ax[i].legend(fontsize=22)
if x[0] > x[-1]: # reverse
for line in lines:
line.reverse()
ax[i].table(
cellText=lines,
rowLabels=rowlabel,
loc='top',
rowColours=colors,
alpha=0.4,
)
ax[i].set_xlim(min(x) * 0.96, max(x) * 1.02)
ax[i].set_yticks(ax[i].get_yticks()[:-1])
ax[0].set_yticks(ax[0].get_yticks()[1:])
| |
layer (nodes, anchors etc.)
def ObjectInLayer_selected(self):
try:
return self in self.layer.selection
except:
return False
def SetObjectInLayer_selected(self, state):
# Add to selection
if state and self not in self.layer.selection:
self.layer.selection.append(self)
# Remove
elif not state and self in self.layer.selection:
self.layer.selection.remove(self)
##################################################################################
#
#
#
# GSFont
#
#
#
##################################################################################
def ______________(): pass
def ____GSFont____(): pass
def ______________(): pass
'''
:mod:`GSFont`
===============================================================================
Implementation of the font object. This object is host to the :class:`masters <GSFontMaster>` used for interpolation. Even when no interpolation is involved, for the sake of object model consistency there will still be one master and one instance representing a single font.
Also, the :class:`glyphs <GSGlyph>` are attached to the Font object right here, not one level down to the masters. The different masters’ glyphs are available as :class:`layers <GSLayer>` attached to the glyph objects which are attached here.
.. class:: GSFont()
Properties
.. autosummary::
parent
masters
axes
properties
stems
instances
glyphs
classes
features
featurePrefixes
copyright
copyrights
license
licenses
designer
designers
designerURL
manufacturer
manufacturers
manufacturerURL
familyNames
trademark
trademarks
sampleText
sampleTexts
description
descriptions
compatibleFullName
compatibleFullNames
versionMajor
versionMinor
date
familyName
upm
note
kerning
userData
grid
gridSubDivisions
gridLength
keyboardIncrement
keyboardIncrementBig
keyboardIncrementHuge
snapToObjects
disablesNiceNames
customParameters
selection
selectedLayers
selectedFontMaster
masterIndex
currentText
tabs
fontView
currentTab
filepath
tool
tools
appVersion
Functions
.. autosummary::
save()
close()
show()
disableUpdateInterface()
enableUpdateInterface()
kerningForPair()
setKerningForPair()
removeKerningForPair()
newTab()
updateFeatures()
compileFeatures()
**Properties**
'''
def Font__new__(typ, *args, **kwargs):
if len(args) > 0 and isString(args[0]):
path = args[0]
URL = NSURL.fileURLWithPath_(path)
if path.endswith(".glyphs"):
result = GSFont.alloc().initWithURL_error_(URL, None)
if isinstance(result, tuple):
result = result[0]
return result
typeName = NSWorkspace.sharedWorkspace().typeOfFile_error_(path, None)[0]
if typeName is not None:
Doc = GSDocument.alloc().initWithContentsOfURL_ofType_error_(URL, typeName, None)
if Doc is not None:
return Doc[0].font
raise Exception("Unable to open font: %s", path)
return GSFont.alloc().init()
GSFont.__new__ = staticmethod(Font__new__)
def Font__init__(self, path=None):
pass
GSFont.__init__ = python_method(Font__init__)
def Font__repr__(self):
return "<GSFont \"%s\" v%s.%s with %s masters and %s instances>" % (self.familyName, self.versionMajor, self.versionMinor, len(self.masters), len(self.instances))
GSFont.__repr__ = python_method(Font__repr__)
def Font__copy__(self, memo=None):
font = self.copy()
font.setParent_(self.parent)
return font
GSFont.mutableCopyWithZone_ = Font__copy__
GSFont.__copy__ = Font__copy__
GSFont.__deepcopy__ = Font__copy__
def GSFont__contains__(self, key):
raise NotImplementedError("Font can't access values like this")
GSFont.__contains__ = GSFont__contains__
GSFont.parent = property(lambda self: self.pyobjc_instanceMethods.parent())
'''
.. attribute:: parent
Returns the internal NSDocument document. Read-only.
:type: NSDocument
'''
GSFont.masters = property(lambda self: FontFontMasterProxy(self),
lambda self, value: FontFontMasterProxy(self).setter(value))
'''
.. attribute:: masters
Collection of :class:`GSFontMaster` objects.
:type: list
'''
GSInterpolationFontProxy.masters = property(lambda self: FontFontMasterProxy(self))
GSFont.instances = property(lambda self: FontInstancesProxy(self),
lambda self, value: FontInstancesProxy(self).setter(value))
'''
.. attribute:: instances
Collection of :class:`GSInstance` objects.
:type: list
'''
GSProjectDocument.instances = property(lambda self: FontInstancesProxy(self),
lambda self, value: FontInstancesProxy(self).setter(value))
# TODO: This needs to be updated to reflect the change to a dedicated GSAxis class (elsewhere too?!)
GSFont.axes = property(lambda self: FontAxesProxy(self),
lambda self, value: FontAxesProxy(self).setter(value))
'''
.. attribute:: axes
Collection of :class:`GSAxis`:
:type: list
.. versionadded:: 2.5
.. versionchanged:: 3
'''
GSFont.properties = property(lambda self: self.mutableArrayValueForKey_("properties"),
lambda self, values: self.setProperties_(values))
'''
.. attribute:: properties
Holds the fonts info properties. Can be instances of :class:`GSFontInfoValueSingle` and :class:`GSFontInfoValueLocalized`.
The localised values use language tags defined in the middle column of `Language System Tags table`: <https://docs.microsoft.com/en-us/typography/opentype/spec/languagetags>.
To find specific values, use font.propertyForName_(name) or font.propertyForName_languageTag_(name, languageTag).
:type: list
.. versionadded:: 3
'''
GSFont.metrics = property(lambda self: self.pyobjc_instanceMethods.metrics())
'''
.. attribute:: metrics
a list of all :class:`GSMetric` objects.
:type: list
'''
GSFont.stems = property(lambda self: FontStemsProxy(self),
lambda self, value: FontStemsProxy(self).setter(value))
'''
.. attribute:: stems
The stems. A list of :class:`GSMetric` objects. For each metric, there is a metricsValue in the masters, linked by the `id`.
:type: list, dict
.. code-block:: python
font.stems[0].horizontal = False
'''
def __GSFont_getitem__(self, value):
return self.glyphForName_(value)
GSFont.__getitem__ = python_method(__GSFont_getitem__)
GSFont.glyphs = property(lambda self: FontGlyphsProxy(self),
lambda self, value: FontGlyphsProxy(self).setter(value))
GSInterpolationFontProxy.glyphs = property(lambda self: FontGlyphsProxy(self),
lambda self, value: FontGlyphsProxy(self).setter(value))
'''
.. attribute:: glyphs
Collection of :class:`GSGlyph` objects. Returns a list, but you may also call glyphs using index or glyph name or character as key.
:type: list, dict
.. code-block:: python
# Access all glyphs
for glyph in font.glyphs:
print(glyph)
<GSGlyph "A" with 4 layers>
<GSGlyph "B" with 4 layers>
<GSGlyph "C" with 4 layers>
...
# Access one glyph
print(font.glyphs['A'])
<GSGlyph "A" with 4 layers>
# Access a glyph by character (new in v2.4.1)
print(font.glyphs[u'Ư'])
<GSGlyph "Uhorn" with 4 layers>
# Access a glyph by unicode (new in v2.4.1)
print(font.glyphs['01AF'])
<GSGlyph "Uhorn" with 4 layers>
# Access a glyph by index
print(font.glyphs[145])
<GSGlyph "Uhorn" with 4 layers>
# Add a glyph
font.glyphs.append(GSGlyph('adieresis'))
# Duplicate a glyph under a different name
newGlyph = font.glyphs['A'].copy()
newGlyph.name = 'A.alt'
font.glyphs.append(newGlyph)
# Delete a glyph
del(font.glyphs['A.alt'])
'''
GSFont.classes = property(lambda self: FontClassesProxy(self),
lambda self, value: FontClassesProxy(self).setter(value))
'''
.. attribute:: classes
Collection of :class:`GSClass` objects, representing OpenType glyph classes.
:type: list
.. code-block:: python
# add a class
font.classes.append(GSClass('uppercaseLetters', 'A B C D E'))
# access all classes
for class in font.classes:
print(class.name)
# access one class
print(font.classes['uppercaseLetters'].code)
# delete a class
del(font.classes['uppercaseLetters'])
'''
GSFont.features = property(lambda self: FontFeaturesProxy(self),
lambda self, value: FontFeaturesProxy(self).setter(value))
'''
.. attribute:: features
Collection of :class:`GSFeature` objects, representing OpenType features.
:type: list
.. code-block:: python
# add a feature
font.features.append(GSFeature('liga', 'sub f i by fi;'))
# access all features
for feature in font.features:
print(feature.code)
# access one feature
print(font.features['liga'].code)
# delete a feature
del(font.features['liga'])
'''
GSFont.featurePrefixes = property(lambda self: FontFeaturePrefixesProxy(self),
lambda self, value: FontFeaturePrefixesProxy(self).setter(value))
'''
.. attribute:: featurePrefixes
Collection of :class:`GSFeaturePrefix` objects, containing stuff that needs to be outside of the OpenType features.
:type: list
.. code-block:: python
# add a prefix
font.featurePrefixes.append(GSFeaturePrefix('LanguageSystems', 'languagesystem DFLT dflt;'))
# access all prefixes
for prefix in font.featurePrefixes:
print(prefix.code)
# access one prefix
print(font.featurePrefixes['LanguageSystems'].code)
# delete
del(font.featurePrefixes['LanguageSystems'])
'''
GSFont.copyright = property(lambda self: self.defaultPropertyForName_("copyrights"),
lambda self, value: self.setProperty_value_languageTag_("copyrights", value, None))
'''
.. attribute:: copyright
This accesses the default value only. The localisations can be accessed by :attr:`GSFont.properties`
:type: str
'''
GSFont.copyrights = property(lambda self: FontInfoPropertiesProxy(self, "copyrights"))
'''
.. attribute:: copyrights
This accesses all localised copyright values.
For details :attr:`GSFont.properties`
:type: dict
.. code-block:: python
Font.copyrights["ENG"] = "All rights reserved"
.. versionadded:: 3.0.3
'''
GSFont.license = property(lambda self: self.defaultPropertyForName_("licenses"),
lambda self, value: self.setProperty_value_languageTag_("licenses", value, None))
'''
.. attribute:: license
This accesses the default value only. The localisations can be accessed by :attr:`GSFont.properties`
:type: str
.. versionadded:: 3.0.3
'''
GSFont.licenses = property(lambda self: FontInfoPropertiesProxy(self, "licenses"))
'''
.. attribute:: licenses
This accesses all localised license values.
For details :attr:`GSFont.properties`
:type: dict
.. code-block:: python
Font.licenses["ENG"] = "This font may be installed on all of your machines and printers, but you may not sell or give these fonts to anyone else."
.. versionadded:: 3.0.3
'''
GSFont.compatibleFullName = property(lambda self: self.defaultPropertyForName_("compatibleFullNames"),
lambda self, value: self.setProperty_value_languageTag_("compatibleFullNames", value, None))
'''
.. attribute:: compatibleFullName
This accesses the default value only. The localisations can be accessed by :attr:`GSFont.properties`
:type: str
.. versionadded:: 3.0.3
'''
GSFont.compatibleFullNames = property(lambda self: FontInfoPropertiesProxy(self, "compatibleFullNames"))
'''
.. attribute:: compatibleFullNames
This accesses all localised designer values.
For details :attr:`GSFont.properties`
:type: dict
.. code-block:: python
Font.compatibleFullNames["ENG"] = "MyFont Condensed Bold"
.. versionadded:: 3.0.3
'''
GSFont.sampleText = property(lambda self: self.defaultPropertyForName_("sampleTexts"),
lambda self, value: self.setProperty_value_languageTag_("sampleTexts", value, None))
'''
.. attribute:: sampleText
This accesses the default value only. The localisations can be accessed by :attr:`GSFont.properties`
:type: str
.. versionadded:: 3.0.3
'''
GSFont.sampleTexts = property(lambda self: FontInfoPropertiesProxy(self, "sampleTexts"))
'''
.. attribute:: sampleTexts
This accesses all localised designer values.
For details :attr:`GSFont.properties`
:type: dict
.. code-block:: python
Font.sampleTexts["ENG"] = "This is my sample text"
.. versionadded:: 3.0.3
'''
GSFont.description = property(lambda self: self.defaultPropertyForName_("descriptions"),
lambda self, value: self.setProperty_value_languageTag_("descriptions", value, None))
'''
.. attribute:: description
This accesses the default value only. The localisations can be accessed by :attr:`GSFont.properties`
:type: str
.. versionadded:: 3.0.3
'''
GSFont.descriptions = property(lambda self: FontInfoPropertiesProxy(self, "descriptions"))
'''
.. attribute:: descriptions
This accesses all localised designer values.
For details :attr:`GSFont.properties`
:type: dict
.. code-block:: python
Font.descriptions["ENG"] = "This is my description"
.. versionadded:: 3.0.3
'''
GSFont.designer = property(lambda self: self.defaultPropertyForName_("designers"),
lambda self, value: self.setProperty_value_languageTag_("designers", value, None))
'''
.. attribute:: designer
This accesses the default value only. The localisations can be accessed by :attr:`GSFont.properties`
:type: str
'''
GSFont.designers = property(lambda self: FontInfoPropertiesProxy(self, "designers"))
'''
.. attribute:: designers
This accesses all localised designer values.
For details :attr:`GSFont.properties`
:type: dict
.. code-block:: python
Font.designers["ENG"] = "<NAME>"
.. versionadded:: 3.0.3
'''
GSFont.trademark = property(lambda self: self.defaultPropertyForName_("trademarks"),
lambda self, value: self.setProperty_value_languageTag_("trademarks", value, None))
'''
.. attribute:: trademark
This accesses the default value only. The localisations can be accessed by :attr:`GSFont.properties`
:type: str
.. versionadded:: 3.0.3
'''
GSFont.trademarks = property(lambda self: FontInfoPropertiesProxy(self, "trademarks"))
'''
.. attribute:: trademarks
This accesses all localised trademark values.
For details :attr:`GSFont.properties`
:type: dict
.. code-block:: python
Font.trademarks["ENG"] = "ThisFont is a trademark by MyFoundry.com"
.. versionadded:: 3.0.3
'''
GSFont.designerURL = property(lambda self: self.defaultPropertyForName_("designerURL"),
lambda self, value: self.setProperty_value_languageTag_("designerURL", value, None))
'''
.. attribute:: designerURL
:type: str
'''
GSFont.manufacturer = property(lambda self: self.defaultPropertyForName_("manufacturers"),
lambda self, value: self.setProperty_value_languageTag_("manufacturers", value, None))
'''
.. attribute:: manufacturer
This accesses the default value only. The localisations can be accessed by :attr:`GSFont.properties`
:type: str
'''
GSFont.manufacturers = property(lambda self: FontInfoPropertiesProxy(self, "manufacturers"))
'''
.. attribute:: manufacturers
This accesses all localised manufacturer values.
For details :attr:`GSFont.properties`
:type: dict
.. code-block:: python
Font.manufacturers["ENG"] = "My English Corporation"
.. versionadded:: 3.0.3
'''
GSFont.manufacturerURL = property(lambda self: self.defaultPropertyForName_("manufacturerURL"),
lambda self, value: self.setProperty_value_languageTag_("manufacturerURL", value, None))
'''
.. attribute:: manufacturerURL
:type: str
'''
GSFont.versionMajor = property(lambda self: self.pyobjc_instanceMethods.versionMajor(),
lambda self, value: self.setVersionMajor_(value))
'''
.. attribute:: versionMajor
:type: int
'''
GSFont.versionMinor = property(lambda self: self.pyobjc_instanceMethods.versionMinor(),
lambda self, value: self.setVersionMinor_(value))
'''
.. attribute:: versionMinor
:type: int
'''
def __get_date__(self):
return datetime.datetime.fromtimestamp(self.pyobjc_instanceMethods.date().timeIntervalSince1970())
def __set_date__(self, date):
if isinstance(date, datetime.datetime):
self.setDate_(NSDate.alloc().initWithTimeIntervalSince1970_(time.mktime(date.timetuple())))
elif isinstance(date, (int, float)):
self.setDate_(NSDate.alloc().initWithTimeIntervalSince1970_(date))
elif isinstance(date, NSDate):
self.setDate_(date)
else:
raise TypeError("date must be a datetime object, NSDate object, int or float, not %s" % type(date).__name__)
GSFont.date = property(lambda self: __get_date__(self),
lambda self, value: __set_date__(self, value))
'''
.. attribute:: date
:type: datetime.datetime
.. code-block:: python
print(font.date)
2015-06-08 09:39:05
# set date to now
font.date = datetime.datetime.now()
# using NSDate
font.date = NSDate.date()
# or in seconds since Epoch
font.date = time.time()
'''
GSFont.familyName = property(lambda self: self.pyobjc_instanceMethods.fontName(),
lambda self, value: self.setFontName_(value))
GSFont.fontName = property(lambda self: self.pyobjc_instanceMethods.fontName(),
lambda self, value: self.setFontName_(value))
'''
.. attribute:: familyName
Family name of the typeface.
:type: str
'''
GSFont.familyNames = property(lambda self: FontInfoPropertiesProxy(self, "familyNames"))
'''
.. attribute:: familyNames
This accesses all localised family name values.
For details :attr:`GSFont.properties`
:type: dict
.. code-block:: python
Font.familyNames["ENG"] = "MyFamilyName"
.. versionadded:: 3.0.3
'''
GSFont.upm = property(lambda self: self.unitsPerEm(),
lambda self, value: self.setUnitsPerEm_(value))
'''
.. attribute:: upm
Units per Em
:type: int
'''
GSFont.note = property(lambda self: self.pyobjc_instanceMethods.note(),
lambda self, value: self.setNote_(value))
'''
.. attribute:: note
:type: str
'''
GSFont.kerning = property(lambda self: self.kerningLTR(),
lambda self, value: self.setKerningLTR_(value))
'''
.. attribute:: kerning
Kerning for LTR writing
A multi-level dictionary. The first level's key is the :attr:`GSFontMaster.id` (each master has its own kerning), the second level's key is the :attr:`GSGlyph.id` or class id (@MMK_L_XX) of the first glyph, the third level's key is a glyph id or class id (@MMK_R_XX) for the | |
worst
and 1 the best score). defaults to false
:returns:
mean_iou : float, the mean intersection over union of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.mean_iou(true, pred)
0.49030739883826424
by saskra
"""
w = init_w(w, len(targets))
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
n = cm.shape[0]
diag, rows_sums, columns_sums = np.zeros(n), np.zeros(n), np.zeros(n)
for i in range(n):
for j in range(n):
if i == j:
diag[i] = cm[i][j] # sum of the diagonal = true results
else:
rows_sums[i] += cm[i][j] # rest of the row = false negative results
columns_sums[j] += cm[i][j] # rest of the column = false positive results
class_div = diag / (columns_sums + rows_sums + diag) # intersection over union (Jaccard) per class
div_mean = 0
for i in range(n):
div_mean += class_div[i]
div_mean /= n # mean intersection over union
if adjusted:
div_mean -= 1 / n
div_mean /= 1 - 1 / n
return div_mean
@jit(nopython=True, fastmath=True)
def brier_score_loss(targets, probs, w=None):
"""
:purpose:
Calculates the Brier score loss between an array of discrete targets and an array of probabilities
:params:
targets : discrete input array of shape (n,)
probs : input array of predicted probabilities for sample of shape (n,)
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
brier_score_loss : float, the Brier score loss of the targets and probs array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> prob = np.random.RandomState(seed=0).uniform(size=10000)
>>> fastdist.brier_score_loss(true, prob)
0.5097
"""
w = init_w(w, len(targets))
num, denom = 0, 0
for i in range(len(targets)):
num += (probs[i] - targets[i]) ** 2 * w[i]
denom += w[i]
return num / denom
@jit(nopython=True, fastmath=True)
def precision_score(targets, preds, cm=None, w=None, average='binary'):
"""
:purpose:
Calculates the precision score between a discrete target and pred array
:params:
targets, preds : discrete input arrays, both of shape (n,)
cm : if you have previously calculated a confusion matrix, pass it here to save the computation.
set as None, which makes the function calculate the confusion matrix.
note that for your specific average (i.e., micro, macro, none, or binary), you must compute the confusion
matrix correctly corresponding to the one you would like to use. so, for "macro" or "none", the cm
must be computed with normalize="pred"
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
average : str, either "micro", "macro", "none", or "binary".
if "micro", computes precision globally
if "macro", take the mean of precision for each class (unweighted)
if "none", return a list of the precision for each class
if "binary", return precision in a binary classification problem
defaults to "binary", so for multi-class problems, you must change this
:returns:
precision_score : np.array, the precision score of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.precision_score(true, pred)
array([0.49879856])
"""
w = init_w(w, len(targets))
if average == 'micro':
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums
div_mean = 0.
for i in range(n):
div_mean += class_div[i]
return np.array([div_mean])
elif average == 'macro':
if cm is None:
cm = confusion_matrix(targets, preds, w=w, normalize='pred')
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums * n
class_mean = 0
for i in range(n):
class_mean += class_div[i]
return np.array([class_mean / n])
elif average == 'none':
if cm is None:
cm = confusion_matrix(targets, preds, w=w, normalize='pred')
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums * n
return class_div
elif average == 'binary':
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
return np.array([cm[1][1] / (cm[1][1] + cm[0][1])])
@jit(nopython=True, fastmath=True)
def recall_score(targets, preds, cm=None, w=None, average='binary'):
"""
:purpose:
Calculates the recall score between a discrete target and pred array
:params:
targets, preds : discrete input arrays, both of shape (n,)
cm : if you have previously calculated a confusion matrix, pass it here to save the computation.
set as None, which makes the function calculate the confusion matrix.
note that for your specific average (i.e., micro, macro, none, or binary), you must compute the confusion
matrix correctly corresponding to the one you would like to use. so, for "macro" or "none", the cm
must be computed with normalize="true"
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
average : str, either "micro", "macro", "none", or "binary".
if "micro", computes recall globally
if "macro", take the mean of recall for each class (unweighted)
if "none", return a list of the recall for each class
if "binary", return recall in a binary classification problem
defaults to "binary", so for multi-class problems, you must change this
:returns:
recall_score : np.array, the recall score of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> true = np.random.RandomState(seed=0).randint(2, size=10000)
>>> pred = np.random.RandomState(seed=1).randint(2, size=10000)
>>> fastdist.recall_score(true, pred)
array([0.48987217])
"""
w = init_w(w, len(targets))
if average == 'micro':
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums
div_mean = 0.
for i in range(n):
div_mean += class_div[i]
return np.array([div_mean])
elif average == 'macro':
if cm is None:
cm = confusion_matrix(targets, preds, w=w, normalize='true')
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums * n
class_mean = 0
for i in range(n):
class_mean += class_div[i]
return np.array([class_mean / n])
elif average == 'none':
if cm is None:
cm = confusion_matrix(targets, preds, w=w, normalize='true')
n = cm.shape[0]
diag, row_sums = np.zeros(n), np.zeros(n)
for i in range(n):
diag[i] = cm[i][i]
for j in range(n):
row_sums += cm[i][j]
class_div = diag / row_sums * n
return class_div
elif average == 'binary':
if cm is None:
cm = confusion_matrix(targets, preds, w=w)
return np.array([cm[1][1] / (cm[1][1] + cm[1][0])])
@jit(nopython=True, fastmath=True)
def f1_score(targets, preds, cm=None, w=None, average='binary'):
"""
:purpose:
Calculates the F1 score between a discrete target and pred array
:params:
targets, preds : discrete input arrays, both of shape (n,)
w : weights at each index of true and pred. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
average : str, either "micro", "macro", "none", or "binary".
if "micro", computes F1 globally
if "macro", take the mean of F1 for each class (unweighted)
if "none", return a list of the F1 for each class
if "binary", return F1 in a binary classification problem
defaults to "binary", so for multi-class problems, you must change this
:returns:
f1_score : np.array, the F1 score of the targets and preds array
:example:
>>> from fastdist import fastdist
>>> import numpy | |
<filename>sqlitely/importexport.py
# -*- coding: utf-8 -*-
"""
Functionality for exporting SQLite data to external files.
------------------------------------------------------------------------------
This file is part of SQLitely - SQLite database tool.
Released under the MIT License.
@author <NAME>
@created 21.08.2019
@modified 03.07.2020
------------------------------------------------------------------------------
"""
import collections
import csv
import datetime
import functools
import itertools
import json
import logging
import os
import re
# ImageFont for calculating column widths in Excel export, not required.
try: from PIL import ImageFont
except ImportError: ImageFont = None
try: import openpyxl
except ImportError: openpyxl = None
try: import xlrd
except ImportError: xlrd = None
try: import xlsxwriter
except ImportError: xlsxwriter = None
from . lib import util
from . lib.vendor import step
from . import conf
from . import grammar
from . import templates
try: # Used in measuring text extent for Excel column auto-width
FONT_XLSX = ImageFont.truetype(conf.FontXlsxFile, 15)
FONT_XLSX_BOLD = ImageFont.truetype(conf.FontXlsxBoldFile, 15)
except IOError: # Fall back to PIL default font if font files not on disk
FONT_XLSX = FONT_XLSX_BOLD = ImageFont.load_default()
except Exception: # Fall back to a simple mono-spaced calculation if no PIL
FONT_MONO = type('', (), {"getsize": lambda self, s: (8*len(s), 12)})()
FONT_XLSX = FONT_XLSX_BOLD = FONT_MONO
"""Wildcards for import file dialog."""
EXCEL_EXTS = (["xls"] if xlrd else []) + (["xlsx"] if openpyxl else [])
IMPORT_WILDCARD = "All supported formats (%s)|%s|%s%s"\
"CSV spreadsheet (*.csv)|*.csv|JSON data (*.json)|*.json" % (
";".join("*." + x for x in EXCEL_EXTS + ["csv"] + ["json"]),
";".join("*." + x for x in EXCEL_EXTS + ["csv"] + ["json"]),
"All spreadsheets ({0})|{0}|".format(";".join("*." + x for x in EXCEL_EXTS + ["csv"])),
"Excel workbook ({0})|{0}|".format(";".join("*." + x for x in EXCEL_EXTS))
if EXCEL_EXTS else ""
)
"""FileDialog wildcard strings, matching extensions lists and default names."""
XLSX_WILDCARD = "Excel workbook (*.xlsx)|*.xlsx|" if xlsxwriter else ""
"""Wildcards for export file dialog."""
EXPORT_WILDCARD = ("CSV spreadsheet (*.csv)|*.csv|%s"
"HTML document (*.html)|*.html|"
"JSON data (*.json)|*.json|"
"SQL INSERT statements (*.sql)|*.sql|"
"Text document (*.txt)|*.txt" % XLSX_WILDCARD)
EXPORT_EXTS = ["csv", "xlsx", "html", "json", "sql", "txt"] if xlsxwriter \
else ["csv", "html", "json", "sql", "txt"]
"""Maximum file size to do full row count for."""
MAX_IMPORT_FILESIZE_FOR_COUNT = 10 * 1e6
logger = logging.getLogger(__name__)
def export_data(make_iterable, filename, title, db, columns,
query="", category="", name="", progress=None):
"""
Exports database data to file.
@param make_iterable function returning iterable sequence yielding rows
@param filename full path and filename of resulting file, file extension
.html|.csv|.sql|.xslx determines file format
@param title title used in HTML and spreadsheet
@param db Database instance
@param columns iterable columns, as [name, ] or [{"name": name}, ]
@param query the SQL query producing the data, if any
@param category category producing the data, if any, "table" or "view"
@param name name of the table or view producing the data, if any
@param progress callback(count) to report progress,
returning false if export should cancel
"""
result = False
f, cursor = None, None
is_csv = filename.lower().endswith(".csv")
is_html = filename.lower().endswith(".html")
is_json = filename.lower().endswith(".json")
is_sql = filename.lower().endswith(".sql")
is_txt = filename.lower().endswith(".txt")
is_xlsx = filename.lower().endswith(".xlsx")
columns = [{"name": c} if isinstance(c, basestring) else c for c in columns]
colnames = [c["name"] for c in columns]
tmpfile, tmpname = None, None # Temporary file for exported rows
try:
with open(filename, "wb") as f:
if category and name: db.lock(category, name, make_iterable, label="export")
count = 0
cursor = make_iterable()
if is_csv or is_xlsx:
if is_csv:
dialect = csv.excel
dialect.delimiter = ";" # default "," is not actually used by Excel
writer = csv.writer(f, dialect)
if query:
flat = query.replace("\r", " ").replace("\n", " ")
query = flat.encode("latin1", "replace")
header = [c.encode("latin1", "replace") for c in colnames]
else:
props = {"title": title, "comments": templates.export_comment()}
writer = xlsx_writer(filename, name or "SQL Query", props=props)
writer.set_header(True)
header = colnames
if query:
a = [[query]] + (["bold", 0, False] if is_xlsx else [])
writer.writerow(*a)
writer.writerow(*([header, "bold"] if is_xlsx else [header]))
writer.set_header(False) if is_xlsx else 0
for i, row in enumerate(cursor, 1):
values = []
for col in colnames:
val = "" if row[col] is None else row[col]
if is_csv:
val = val if isinstance(val, unicode) else str(val)
val = val.encode("latin1", "replace")
values.append(val)
writer.writerow(values)
count = i
if not i % 100 and progress and not progress(count=i):
break # for i, row
if is_xlsx: writer.close()
else:
namespace = {
"db_filename": db.name,
"title": title,
"columns": columns,
"rows": cursor,
"row_count": 0,
"sql": query,
"category": category,
"name": name,
"progress": progress,
}
namespace["namespace"] = namespace # To update row_count
if is_txt: # Run through rows once, to populate text-justify options
widths = {c: len(util.unprint(c)) for c in colnames}
justs = {c: True for c in colnames}
try:
cursor2 = make_iterable()
for i, row in enumerate(cursor2):
for col in colnames:
v = row[col]
if isinstance(v, (int, long, float)): justs[col] = False
v = "" if v is None \
else v if isinstance(v, basestring) else str(v)
v = templates.SAFEBYTE_RGX.sub(templates.SAFEBYTE_REPL, unicode(v))
widths[col] = max(widths[col], len(v))
if not i % 100 and progress and not progress(): return
finally: util.try_until(lambda: cursor2.close())
namespace["columnwidths"] = widths # {col: char length}
namespace["columnjusts"] = justs # {col: True if ljust}
if progress and not progress(): return
# Write out data to temporary file first, to populate row count.
tmpname = util.unique_path("%s.rows" % filename)
tmpfile = open(tmpname, "wb+")
template = step.Template(templates.DATA_ROWS_HTML if is_html else
templates.DATA_ROWS_SQL if is_sql else templates.DATA_ROWS_JSON
if is_json else templates.DATA_ROWS_TXT,
strip=False, escape=is_html)
template.stream(tmpfile, namespace)
if progress and not progress(): return
if is_sql and "table" != category:
# Add CREATE statement for saving view AS table
meta = {"__type__": grammar.SQL.CREATE_TABLE, "name": name,
"columns": columns}
namespace["create_sql"], _ = grammar.generate(meta)
elif name:
# Add CREATE statement
transform = {"flags": {"exists": True}} if is_sql else None
create_sql = db.get_sql(category, name, transform=transform)
namespace["create_sql"] = create_sql
tmpfile.flush(), tmpfile.seek(0)
namespace["data_buffer"] = iter(lambda: tmpfile.read(65536), "")
template = step.Template(templates.DATA_HTML if is_html else
templates.DATA_SQL if is_sql else templates.DATA_JSON
if is_json else templates.DATA_TXT,
strip=False, escape=is_html)
template.stream(f, namespace)
count = namespace["row_count"]
result = progress(count=count) if progress else True
finally:
if tmpfile: util.try_until(tmpfile.close)
if tmpname: util.try_until(lambda: os.unlink(tmpname))
if not result: util.try_until(lambda: os.unlink(filename))
if cursor: util.try_until(lambda: cursor.close())
if category and name: db.unlock(category, name, make_iterable)
return result
def export_data_multiple(filename, title, db, category, progress=None):
"""
Exports database data from multiple tables/views to a single spreadsheet.
@param filename full path and filename of resulting file
@param title spreadsheet title
@param db Database instance
@param category category producing the data, "table" or "view"
@param progress callback(name, count) to report progress,
returning false if export should cancel
"""
result = True
items, cursor = db.schema[category], None
try:
props = {"title": title, "comments": templates.export_comment()}
writer = xlsx_writer(filename, props=props)
for n in items: db.lock(category, n, filename, label="export")
for name, item in items.items():
count = 0
if progress and not progress(name=name, count=count):
result = False
break # for name, item
try:
cursor = db.execute("SELECT * FROM %s" % grammar.quote(name))
row = next(cursor, None)
iterable = itertools.chain([] if row is None else [row], cursor)
writer.add_sheet(name)
colnames = [x["name"] for x in item["columns"]]
writer.set_header(True)
writer.writerow(colnames, "bold")
writer.set_header(False)
for i, row in enumerate(iterable, 1):
count = i
writer.writerow([row[c] for c in colnames])
if not i % 100 and progress and not progress(name=name, count=i):
result = False
break # for i, row
except Exception as e:
logger.exception("Error exporting %s %s from %s.", category, grammar.quote(name), db)
if progress and not progress(name=name, error=util.format_exc(e)):
result = False
finally: util.try_until(lambda: cursor.close())
if not result: break # for name, item
if progress and not progress(name=name, count=count):
result = False
break # for name, item
writer.close()
if progress: progress(done=True)
except Exception as e:
logger.exception("Error exporting %s from %s to %s.",
util.plural(category), db, filename)
if progress: progress(error=util.format_exc(e), done=True)
result = False
finally:
for n in items: db.unlock(category, n, filename)
util.try_until(lambda: cursor.close())
if not result: util.try_until(lambda: os.unlink(filename))
return result
def export_sql(filename, db, sql, title=None):
"""Exports arbitrary SQL to file."""
template = step.Template(templates.CREATE_SQL, strip=False)
ns = {"title": title, "db_filename": db.name, "sql": sql}
with open(filename, "wb") as f: template.stream(f, ns)
return True
def export_stats(filename, db, data):
"""Exports statistics to HTML or SQL file."""
filetype = os.path.splitext(filename)[1][1:].lower()
TPLARGS = {"html": (templates.DATA_STATISTICS_HTML, dict(escape=True, strip=False)),
"sql": (templates.DATA_STATISTICS_SQL, dict(strip=False)),
"txt": (templates.DATA_STATISTICS_TXT, dict(strip=False))}
template = step.Template(TPLARGS[filetype][0], **TPLARGS[filetype][1])
ns = {
"title": "Database statistics",
"db": db,
"pragma": db.get_pragma_values(stats=True),
"sql": db.get_sql(),
"stats": data.get("data", {}),
}
with open(filename, "wb") as f: template.stream(f, ns)
return True
def export_dump(filename, db, progress=None):
"""
Exports full | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 5 12:42:22 2020
Read NFWF whale and boat observation files and
create Julian Date identifies and detect passbys in whale file.
Then, find corresponding boat observations and
build and save Whale and Boat datastructures for each passbby
@author: val
"""
import os.path
from os import path
import numpy as np
#from jdcal import gcal2jd, jd2gcal
import helpers
import globalParameters as gp
import WhaleBoatObj
###################################################################
# Scan Ocean Initiative csv file(s) and construct whale and boats passby and tracks lists
#csv file structure
# YEAR TrackID MONTH DAY HOUR MINUTE SECOND ID Sex Age Calf X Y meters E meters N bearing distance longitude lat ActivityCode ActivityState Site Original Track ID
# 2003 7300501 7 30 5 9 24 J1 M 52 No 870 1715 597.5052301 -1827.871029 161.8981624 1923.050961 -123.1334529 48.49290019 5 Forage North
# 2003 7300501 7 30 5 10 20 J1 M 52 No 856 1552 492.1463197 -1702.713129 163.8787737 1772.410788 -123.1348834 48.49402661 5 Forage North
# Output data file structure:
# classtype trackID trackIDroberin site whaleID age year month day hr minute sec jDay wCalf activityCode ActivityState Xroberin Yroberin latitude longitude utmE utmN vE vN v a tortuosity
# whaleObs 0 7300501 North J1 52 2003 7 30 5 9 24 52850.2148611 No 5 Forage 870 1715 48.492900 -123.133453 490141 5371095 -1.875 2.232 2.915 0.052 0.000
# whaleObs 0 7300501 North J1 52 2003 7 30 5 10 20 52850.2155093 No 5 Forage 856 1552 48.494027 -123.134883 490036 5371220 -1.875 2.232 2.915 0.052 2.593
# Xroberin and Yroberin Note: Rob requested that the X and the Y columns in original Excel sheet be maintained
# save_CVS_format(whalePassbyList, boatsPassbyList)
# helpers.save_obj(whalePassbyList,"whalePassbys_2003_2005")
# helpers.save_obj(boatsPassbyList,"boatsPassbys_2003_2005")
# helpers.save_obj(tracksList,"tracksList_2003_2005")
### Note Bene -- IMPORTANT globals ****************************************************************************
parseErrorFileName = "analysisResults/parseErrors.txt"
parserLogFileName = "analysisResults/parserLog.txt"
os.chdir("/home/val/Documents/NFWF_Files/2020_Analysis/")
print("Working directory is ",os.getcwd())
BUILD_DICTs = True # set to True to rebuild dictionaries
#ff_fileName = "csvFiles/utmTest.csv" Note Bene file name is stored in globalParameters.py
# Note Bene: Make sure whale file has been sorted by year, mo, day, hr, min, sec and is TAB delimited
# lat lon utmE utmN
# Reference locations: North site: 48.50935 -123.1415667 489544 5372925
# South site: 48.45701667 -122.9900167 500738 5367098
R_Earth = 6.373e6 # radius of Earth in meters
lat_northSite = 48.50935
lon_northSite = -123.1415667
lat_southSite = 48.45701667
lon_southSite = -122.9900167
utmE_northSite = 489544
utmN_northSite = 5372925
utmE_southSite = 500738
utmN_southSite = 5367098
#################################Helpers
def unique(list1):
# insert the list to the set
list_set = set(list1)
# convert the set to the list
return list(list_set)
##########################################
nfwf_ff_file=open(gp.ff_fileName, encoding="latin-1")
data = nfwf_ff_file.readline()[:-2].split('\t') # read header data line
print("whale header items \n",data)
# dictionaries
anonBoatsDict = {}
codeCountDict = {}
boatsDict = {}
activityCodeDict = {}
oneTimeDict = {}
priorDataLine = ''
#############################################################################
def loadAllBoats(jdays):
allBoatlines = []
priorDataLine = ''
with open(gp.boatFileName, encoding="latin-1") as boatFile:
line = boatFile.readline()
print("Boat Header\n",line)
line = boatFile.readline()
for line in boatFile:
if line != priorDataLine: # this skips a line if it happens to be exactly equal to the prior data line
priorDataLine = line
items = line.split('\t')
jday = WhaleBoatObj.getJulianDay(1,items)
jdays.append(jday)
allBoatlines.append(line)
return allBoatlines
def buildDictionaries(allBoatLines): # anonBoatsDict has link from BoatID_boatCode to all details abt this specific vessel
global activityCodeDict
for line in allBoatLines: # anonBoatsDict.get('pcdist_BARGE')
items = line.split("\t") # ('BARGE_3', 'Barge', 'VTUG', 'Mark on the barge the tug was pulling')
#this last field is specific to the specific vessel
boatID = "%s_%s" % (items[8], items[9]) # BARGE_3 will be working id of this vessel
boatCode = items[9] # boatsDict.get('BARGE')
codeDef = items[10] # ('Barge', 'VTUG') VTUG will be for the JASCO source levels
jascoType = items[11]
commBoatName = items[20]
# print(boatID, boatCode, codeDef, jascoType, commBoatName)
# see if boatID is already in dictionaries
dictVal = anonBoatsDict.get(boatID)
# print("-----------", boatID, boatCode, dictVal)
if dictVal is None:
#build new dictionary entries
# print("codeCountDict[boatCode]",codeCountDict.get(boatCode),boatCode)
cnt = codeCountDict.get(boatCode)
if cnt is None:
codeCountDict[boatCode] = 1
cnt = 1
else:
codeCountDict[boatCode] = codeCountDict[boatCode] + 1
cnt = codeCountDict.get(boatCode)
thisCode = "%s_%d" % (boatCode, cnt) # HERE IS THE CONSTRUCTION OF ANONIMIZED BOAT NAME
rename = oneTimeDict.get(items[8])
if rename is None:
oneTimeDict[items[8]] = thisCode # this dictionary will be used to rename boats to anonomized form
anonBoatsDict[thisCode]=(boatID, codeDef, jascoType, commBoatName)
dictVal = boatsDict.get(boatCode)
if dictVal is None:
boatsDict[boatCode]=(codeDef, jascoType)
activityCodeDict = {1: ('resting', '(deep rest, hanging, logging at the surface: whales do not progress through the water)'),\
2: ('slow trav', '(whales progress through the water, although they may not make forward progress over ground)'),\
3: ('moderate trav','( travel in which whales do not porpoise)'),\
4: ('fast trav', '(includes porpoising)'),\
5: ('dispersed trav', '(foraging in a directional manner)'),
6: ('milling', '(feeding, pursuit of prey, involving changes in directions)'),\
7: ('tactile', '(socializing that involves touching another whale, such as petting, rolling or nudging)'),\
8: ('display', '(socializing that does not involve touching, but may include spyhops, tail-lobs and breaches)'),\
9: ('kelping object play', '(note when kelping also involves tactile interaction count it as tactile, rather than object play)')}
#now need to apply anonimized name to the boat objects
# print("anonBoatsDict",anonBoatsDict,"\n")
# print("codeCountDict",codeCountDict,"\n")
# input("rrrr")
# print("boatsDict",boatsDict)
# input("kkk")
def addLine(lineCnt, focalID, line, IDsList, linesLists):
idx = 0
if lineCnt > 9999:
print("lineCnt",lineCnt, line)
print("len linesList", len(linesLists))
# input("in addLine")
if focalID not in IDsList: # idx will point to where the new line should be appended
IDsList.append(focalID)
idx = len(IDsList)
if lineCnt > 9999:
print("id not in list", focalID, IDsList)
else:
idx = IDsList.index(focalID)
if lineCnt > 9999:
print("id in list", focalID, IDsList, idx)
newList = []
newList.append(line)
if len(linesLists) < idx or len(linesLists) == 0:
linesLists.append(newList)
else:
linesLists[idx].append(line)
# print(linesLists)
# print(len(linesLists[0]),IDsList)
return
def scanForNextTimeGap(maxObsGapMins, gapList): # Note Bene do I have to check for too large a jump in X or Y, say from North to South or
linesLists = [] # or some sort of measurement error?????
IDsList = []
foundTimeGap = False
jdayPrior = -1
lineCnt = 0
priorDataLine = 'init'
while not foundTimeGap:
filePos = nfwf_ff_file.tell() # save file pointer so we can back up ONE LINE when passby has ended
line = nfwf_ff_file.readline()
if line == '':
return linesLists
if line != priorDataLine: # this skips a line if it happens to be exactly equal to the prior data line
priorDataLine = line
lineCnt += 1
if len(line) == 0:
break # reached end of data file
items = line.split("\t")
jday = WhaleBoatObj.getJulianDay(0,items)
focalID = items[7] # focal animal for this data file line
if jdayPrior > 0 and (jday - jdayPrior)*24*60 >= maxObsGapMins: # a passby has surely ended
foundTimeGap = True
nfwf_ff_file.seek(filePos) # move file pointer back on line in data file
gapList.append((jday - jdayPrior)*24*60)
# input("???????")
else:
addLine(lineCnt, focalID, line, IDsList, linesLists) # THIS IS A COMPLICATED FUNCTION THAT BUILDS LISTS OF LISTS
jdayPrior = jday
return linesLists
def getBoats(passbyCnt, jDayStart, jDayStop, priorOrPostMin): # boatsJdays is a list of the ys for each line in boat file
boatsObjList = []
boat_IDs = []
dt = priorOrPostMin/(60*24) # fraction of a day
if jDayStop < jDayStart or jDayStop > boatsJdays[-1]:
return boatsObjList
idxStart = 0
while boatsJdays[idxStart] < jDayStart - dt:
f = open(gp.theoTracks_2019_FileName,'r')
print(f.readline())
# print(boatsJdays[idxStart] , jDayStart, idxStart, dt)
# input("oo")
idxStart += 1
idxStop = idxStart
while boatsJdays[idxStop] < jDayStop + dt:
idxStop += 1
boats = []
for i in range(idxStart,idxStop):
boats.append(allBoatLines[i])
# boats has the data lines for each boat that was observed during this whale passby
items = allBoatLines[i].split('\t')
boat_IDs.append(items[8])
uniqueBoats = unique(boat_IDs)
# print("unique boats", uniqueBoats)
# print(boat_IDs)
# input("yyyyy)")
for boatID in uniqueBoats:
b_lines=[]
for b in boats: # run over all the boat lines for this passby
items = b.split('\t')
if items[8] == boatID:
b_lines.append(b) # this is a list of all the obs for a specific boat during this passby
thisID = boatID.split('_') # anonimized ID will split while raw one will not
if len(thisID) == 1:
thisID = oneTimeDict[boatID] # HERE WE ANONIMIZE THE BOAT ID if it has not already been done
thisBoatsObs = WhaleBoatObj.boatObs(passbyCnt, thisID, b_lines)
boatsObjList.append(thisBoatsObs)
# print("leaving getBoats with list of length",len(boatsObjList),"idxStart=",idxStart,"idxStop=",idxStop)
return boatsObjList
def writeErrorToFile(dataFileName, lineNo, errTxt):
outputFile = []
if not path.exists(parseErrorFileName):
header = "Errors found in parsing NFWF file\n"
outputFile = open(parseErrorFileName, 'w+')
outputFile.write(header)
else:
outputFile = open(parseErrorFileName, 'a')
print("file", dataFileName, "lineNo", lineNo, "Error is", errTxt)
line = "file %s line %d :: %s\n" % (dataFileName, lineNo, errTxt)
outputFile.write(line)
outputFile.close()
def logPassbyLists(theLists):
print("in logPassbyLists with N lists=",len(theLists))
i = 0
for lst in theLists:
i += 1
items = lst[0].split("\t")
focus = items[7]
startDT = "%s_%s_%s_%s_%s_%s" % (items[0],items[2],items[3],items[4],items[5],items[6])
items = lst[-1].split("\t")
stopDT = "%s_%s_%s_%s_%s_%s" % (items[0],items[2],items[3],items[4],items[5],items[6])
logdata = "# in group %d \twhale = %s \tStart = %s \tStop = %s\n" % (i,focus,startDT,stopDT)
logFile.write(logdata)
print("logdata=",logdata)
def save_CVS_format(whalePassbyList, boatsPassbyList):
# write out tab delimited text file for all the whale data
debug = 0
whaleFile = open(gp.whaleCVSfileName,"w")
header = "classtype\ttrackID\ttrackIDroberin\tsite\twhaleID\tage\tyear\tmonth\tday\thr\tminute\tsec\tjDay\twCalf\tactivityCode\tActivityState\tXroberin\tYroberin\tlatitude\tlongitude\tutmE\tutmN\tvE\tvN\tv\ta\ttortuosity\n"
whaleFile.write(header)
for w in whalePassbyList:
fileline = "%s\t%d\t%d\t%s\t%s\t%d" % (w.classType, w.trackID, w.trackIDroberin,w.site,w.whaleID,w.age)
for i in range(w.Nobs):
theDate = | |
<filename>ur5e.py<gh_stars>0
from vrep_api import vrep
import numpy as np
import time
import os
from urx.robot import Robot
import model
import utils
class UR5E(Robot):
def __init__(self):
"""
UR5E Class: Control the Robot
CoppeliaSim(V-rep): vrep-api in Simulation
urx(third party package): urx in Real World
"""
#? Initialize data logger
logging_directory = os.path.abspath('logs')
self.datalogger = utils.Logger(logging_directory)
#! Set up grasp params
self.pre_grasp_high = 0.1
self.grasp_high = 0.02
#! Setup some params
self.workspace_limits = np.asarray([[-0.75, -0.25], [-0.25, 0.25], [0.0001, 0.4]])
self.home_pose = [-0.25, 0.0, 0.30, 0.0, 0.0, 0.0]
self.put_pose = [[-0.5, -0.3, self.pre_grasp_high, 0.0, 0.0, 0.0],
[-0.5, -0.3, self.grasp_high, 0.0, 0.0, 0.0]]
self.workstart_pose = [-0.25, 0.0, 0.1, 0.0, 0.0, 0.0]
self.explore_start_pose = [-0.25, 0.0, self.grasp_high, 0.0, 0.0, 0.0]
self.detected_threshold = 2.0
self.detect_iterations = 5000
#! Define colors for object meshes (Tableau palette)
self.color_space = np.asarray([[78.0, 121.0, 167.0], # blue
[89.0, 161.0, 79.0], # green
[156, 117, 95], # brown
[242, 142, 43], # orange
[237.0, 201.0, 72.0], # yellow
[186, 176, 172], # gray
[255.0, 87.0, 89.0], # red
[176, 122, 161], # purple
[118, 183, 178], # cyan
[255, 157, 167]])/255.0 #pink
#? Initialize trainer
self.resolutions = (32,32)
self.heatmap = model.Map(self.workspace_limits, resolutions=self.resolutions)
# self.frontierSearch = FrontierSearch(self.workspace_limits, self.resolutions)
# self.RL = QLearningTable(actions=list(range(self.frontierSearch.n_actions)))
#? Initialize filter
self.forceFilter = utils.Filter()
self.torqueFilter = utils.Filter()
# Make sure to have the server side running in V-REP:
# in a child script of a V-REP scene, add following command
# to be executed just once, at simulation start:
#
# simExtRemoteApiStart(19999)
#
# then start simulation, and run this program.
#
# IMPORTANT: for each successful call to simxStart, there
# should be a corresponding call to simxFinish at the end!
# MODIFY remoteApiConnections.txt
# Connect to simulator
vrep.simxFinish(-1) # Just in case, close all opened connections
self.sim_client = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5) # Connect to V-REP on port 19997
if self.sim_client == -1:
print('Failed to connect to simulation (V-REP remote API server). Exiting.')
exit()
else:
print('[ENVIRONMENT STATE]: Connected to simulation.')
self.restart_sim()
#! Read files in object mesh directory
self.obj_mesh_dir = os.path.abspath('simBindings/objects/blocks')
self.num_obj = 2
self.mesh_list = os.listdir(self.obj_mesh_dir)
self.object_pos = [[-0.6, 0.1, 0.2],[-0.4, -0.1, 0.2]]
#! Randomly choose objects to add to scene
self.obj_mesh_ind = np.random.randint(0, len(self.mesh_list), size=self.num_obj)
self.obj_mesh_color = self.color_space[np.asarray(range(10)), :]
# Add objects to simulation environment
self.add_objects()
# Setup virtual camera in simulation
self.setup_sim_camera()
self.force_data = []
self.torque_data = []
self.Detected = False
self.Detect_num = 0
self.Check = None
# grasp_pose = grasp_predict_pose + current_pose
self.grasp_predict_pose = None
self.grasp_pose = [0.0, 0.0, 0.0]
self.grasp_param = 0.1
def add_objects(self):
"""
Add random object automously
Only in Simulation
"""
# Add each object to robot workspace at x,y location and orientation (random or pre-loaded)
self.object_handles = []
sim_obj_handles = []
i = 0
for object_idx in range(len(self.obj_mesh_ind)):
curr_mesh_file = os.path.join(self.obj_mesh_dir, self.mesh_list[self.obj_mesh_ind[object_idx]])
curr_shape_name = 'shape_%02d' % object_idx
drop_x = (self.workspace_limits[0][1] - self.workspace_limits[0][0] - 0.2) * np.random.random_sample() + self.workspace_limits[0][0] + 0.1
drop_y = (self.workspace_limits[1][1] - self.workspace_limits[1][0] - 0.2) * np.random.random_sample() + self.workspace_limits[1][0] + 0.1
#? Drop in Random position and orientation
# object_position = [drop_x, drop_y, 0.15]
# object_orientation = [2*np.pi*np.random.random_sample(), 2*np.pi*np.random.random_sample(), 2*np.pi*np.random.random_sample()]
#? Drop in Fixed position and orientation
object_position = self.object_pos[i]
object_orientation = [np.pi/2, 0, 0]
object_color = [self.obj_mesh_color[object_idx][0], self.obj_mesh_color[object_idx][1], self.obj_mesh_color[object_idx][2]]
ret_resp,ret_ints,ret_floats,ret_strings,ret_buffer = vrep.simxCallScriptFunction(self.sim_client, 'remoteApiCommandServer',vrep.sim_scripttype_childscript,'importShape',[0,0,255,0], object_position + object_orientation + object_color, [curr_mesh_file, curr_shape_name], bytearray(), vrep.simx_opmode_blocking)
if ret_resp == 8:
print('Failed to add new objects to simulation. Please restart.')
exit()
curr_shape_handle = ret_ints[0]
self.object_handles.append(curr_shape_handle)
i += 1
time.sleep(2)
def restart_sim(self):
"""
Restart the simulation
"""
sim_ret, self.UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
sim_ret, self.Sensor_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_connection', vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (-0.5,0,0.3), vrep.simx_opmode_blocking)
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, self.RG2_tip_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_tip', vrep.simx_opmode_blocking)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
while gripper_position[2] > 0.4: # V-REP bug requiring multiple starts and stops to restart
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
def Go(self, pose):
"""
Let the Robot move to
the input pose data
"""
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle,-1,vrep.simx_opmode_blocking)
sim_ret, UR5_target_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
# Compute gripper position and linear movement increments
move_direction = np.asarray([pose[0] - UR5_target_position[0], pose[1] - UR5_target_position[1], pose[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.01*move_direction/move_magnitude
num_move_steps = max(int(np.floor((move_direction[0])/(move_step[0]+1e-5))),
int(np.floor((move_direction[1])/(move_step[1]+1e-5))),
int(np.floor((move_direction[2])/(move_step[2]+1e-5))))
# Compute gripper orientation and rotation increments
rotate_direction = np.asarray([pose[3] - UR5_target_orientation[0], pose[4] - UR5_target_orientation[1], pose[5] - UR5_target_orientation[2]])
rotate_magnitude = np.linalg.norm(rotate_direction)
rotate_step = 0.0005*rotate_direction/(rotate_magnitude+1e-5)
num_rotate_steps = int(np.floor((rotate_direction[2]+1e-5)/(rotate_step[2]+1)))
# Simultaneously move and rotate gripper
for step_iter in range(num_rotate_steps):
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (pose[3], UR5_target_orientation[1] + rotate_step[1]*min(step_iter,num_rotate_steps), pose[5]), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (pose[3],pose[4],pose[5]), vrep.simx_opmode_blocking)
for step_iter in range(num_move_steps):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle,-1,(UR5_target_position[0] + move_step[0]*min(step_iter,num_move_steps), UR5_target_position[1] + move_step[1]*min(step_iter,num_move_steps), UR5_target_position[2] + move_step[2]*min(step_iter,num_move_steps)),vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle,-1,(pose[0],pose[1],pose[2]),vrep.simx_opmode_blocking)
time.sleep(1)
def GoHome(self):
"""
Let the Robot move to
the defined home pose
"""
self.Go(self.home_pose)
def GoWork(self):
"""
Let the Robot move to
the start pose of work
"""
self.Go(self.workstart_pose)
def DetectObject(self):
"""
Check the tcp_force and
return if detect the object
"""
sim_ret,state,forceVector,torqueVector = vrep.simxReadForceSensor(self.sim_client,self.Sensor_handle,vrep.simx_opmode_streaming)
forceVector = self.forceFilter.LowPassFilter(forceVector)
torqueVector = self.torqueFilter.LowPassFilter(torqueVector)
# Output the force of XYZ
if((np.fabs(forceVector[0]) > self.detected_threshold) or (np.fabs(forceVector[1]) > self.detected_threshold)):
self.force_data = forceVector
self.Detected = True
self.Detect_num += 1
return True
else:
self.Detected = False
return False
def Explore(self):
"""
Expore and Grasp
"""
# Pre: close the gripper
self.gripper_close()
time.sleep(1)
"""
Pre-Trainging
"""
self. Go(self.explore_start_pose)
_, depth_map = self.get_camera_data()
self.heatmap.add_depth(depth_map)
for i in range(self.num_obj):
_, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle,-1,vrep.simx_opmode_blocking)
start_pos = self.heatmap.WorldToMap((UR5_target_position[0],UR5_target_position[1]))
print("[DYN_Q INFO]: Start Pos is ", start_pos)
goal_pos = []
goal_pos.append(self.heatmap.WorldToMap(self.object_pos[i]))
print("[DYN_Q INFO]: Goal Pos is ", goal_pos)
actions = model.Dyn_Q(Start=start_pos, Goal=goal_pos, Maze_Width=self.resolutions[0], Maze_Height=self.resolutions[1])
for i in range(len(actions)):
# Get Current end state
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle,-1,vrep.simx_opmode_blocking)
sim_ret, UR5_target_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
move_pos = self.heatmap.step(action=actions[i], current_pos=UR5_target_position)
# Compute gripper position and linear movement increments
move_direction = np.asarray([move_pos[0] - UR5_target_position[0], move_pos[1] - UR5_target_position[1], 0.0])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.00075*move_direction/(move_magnitude+1e-10)
num_move_steps = max(int(np.floor((move_direction[0])/(move_step[0]+1e-10))),
int(np.floor((move_direction[1])/(move_step[1]+1e-10))),
int(np.floor((move_direction[2])/(move_step[2]+1e-10))))
# Simultaneously move and rotate gripper
for step_iter in range(num_move_steps):
vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(UR5_target_position[0] + move_step[0]*min(step_iter,num_move_steps), UR5_target_position[1] + move_step[1]*min(step_iter,num_move_steps), UR5_target_position[2] + move_step[2]*min(step_iter,num_move_steps)),vrep.simx_opmode_blocking)
if self.DetectObject() :
print("[ENVIRONMENT STATE]: Touch a Object.")
vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(UR5_target_position[0], UR5_target_position[1], self.pre_grasp_high),vrep.simx_opmode_blocking)
break
# Check the Object to Grasp
if self.Detected:
print("[ENVIRONMENT STATE]: Pre to Grasp it.")
# vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(UR5_target_position[0], UR5_target_position[1], self.pre_grasp_high),vrep.simx_opmode_blocking)
# # if self.Detect_num == 4:
# # print("[STRATEGY INFO]: Try to Grasp the object.")
# # grasp_point, grasp_angle = self.frontierSearch.grasp_point_angle()
# # self.Grasp(pos_data=grasp_point, ori_data=grasp_angle)
else:
vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(move_pos[0], move_pos[1], UR5_target_position[2]),vrep.simx_opmode_blocking)
self.Grasp()
self.Go((UR5_target_position[0], UR5_target_position[1], self.pre_grasp_high, 0.0, 0.0, 0.0))
self.Go((UR5_target_position[0], UR5_target_position[1], self.grasp_high, 0.0, 0.0, 0.0))
# for i in range(self.detect_iterations):
# # Pre: close the gripper
# self.gripper_close()
# time.sleep(1)
# # Get Current end state
# sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle,-1,vrep.simx_opmode_blocking)
# sim_ret, UR5_target_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
# # RL
# w2m_pos = self.frontierSearch.map.WorldToMap((UR5_target_position[0],UR5_target_position[1]))
# heatmap = self.frontierSearch.map.heatmap
# self.action = self.RL.choose_action(map_pos=w2m_pos, explore_complete=self.frontierSearch.map.explore_complete, resolutions=self.resolutions)
# move_pos = self.frontierSearch.step(action=self.action, current_pos=(UR5_target_position[0], UR5_target_position[1]), unit=self.unit)
# # Compute gripper position and linear movement increments
# move_direction = np.asarray([move_pos[0] - UR5_target_position[0], move_pos[1] - UR5_target_position[1], 0.0])
# move_magnitude = np.linalg.norm(move_direction)
# move_step = 0.0005*move_direction/(move_magnitude+1e-10)
# num_move_steps = max(int(np.floor((move_direction[0])/(move_step[0]+1e-10))),
# int(np.floor((move_direction[1])/(move_step[1]+1e-10))),
# int(np.floor((move_direction[2])/(move_step[2]+1e-10))))
# # Simultaneously move and rotate gripper
# for step_iter in range(num_move_steps):
# vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(UR5_target_position[0] + move_step[0]*min(step_iter,num_move_steps), UR5_target_position[1] + move_step[1]*min(step_iter,num_move_steps), UR5_target_position[2] + move_step[2]*min(step_iter,num_move_steps)),vrep.simx_opmode_blocking)
# # build new free heatmap
# self.frontierSearch.buildNewFree(
# initial_cell=(UR5_target_position[0] + move_step[0]*min(step_iter,num_move_steps), UR5_target_position[1] + move_step[1]*min(step_iter,num_move_steps)),
# initial_angle=UR5_target_orientation[2]
# )
# if self.DetectObject() :
# # print("[ENVIRONMENT STATE]: Touch a Object")
# self.reward = 100
# self.RL.learn(s=w2m_pos,a=self.action,r=self.reward)
# break
# # Check the Object to Grasp
# if self.Detected:
# sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle,-1,vrep.simx_opmode_blocking)
# self.frontierSearch.buildNewFrontier(initial_cell=(UR5_target_position[0], UR5_target_position[1]),
# initial_force=self.force_data, initial_angle=UR5_target_orientation[2])
# vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(UR5_target_position[0] - move_step[0]*min(step_iter,num_move_steps), UR5_target_position[1] - move_step[1]*min(step_iter,num_move_steps), UR5_target_position[2] - move_step[2]*min(step_iter,num_move_steps)),vrep.simx_opmode_blocking)
# self.datalogger.save_heatmaps(self.frontierSearch.map.heatmap)
# if self.Detect_num == 4:
# print("[STRATEGY INFO]: Try to Grasp the object.")
# grasp_point, grasp_angle = self.frontierSearch.grasp_point_angle()
# self.Grasp(pos_data=grasp_point, ori_data=grasp_angle)
# else:
# vrep.simxSetObjectPosition(self.sim_client,self.UR5_target_handle,-1,(move_pos[0], move_pos[1], UR5_target_position[2]),vrep.simx_opmode_blocking)
# self.reward = 1
# self.RL.learn(s=w2m_pos,a=self.action,r=self.reward)
def Grasp(self):
"""
Grasp Strategy
"""
_, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle,-1,vrep.simx_opmode_blocking)
print("[PREDICT RESULT]: Desired Grasp Position: [{}, {}].".format(UR5_target_position[0], UR5_target_position[1]))
# backdata, taskcontinue = self.DesiredPositionScore(pos_data)
# if taskcontinue:
# Open the Gripper
self.gripper_open()
time.sleep(1)
| |
series_id = pheno_row_dict["series"]
participant_tuple = (participant_id, series_id)
else:
participant_tuple = (participant_id)
pheno_row_dict[measure] = measure_dict[participant_tuple]
ev_selections["demean"].append(measure)
if "Custom_ROI_Mean" in formula:
# include the means of the specified ROIs as regressors
if roi_means_dict == None:
err = "\n\n[!] You included 'Custom_ROI_Mean' in your model " \
"design, but there are no mean of ROI values provided." \
"\n\n"
raise Exception(err)
# roi_dict_dict is a dictionary of dictionaries, with each dictionary
# holding all of the means for one ROI, with each entry being a mean
# for a participant (the keys are the participant IDs)
# ex. {participant_01: 35.15, participant_02: 50.00}
# with the float values being all of the means of one of
# the ROIs specified
# there will be a dictionary for each ROI specified
roi_dict_dict = get_custom_roi_info(roi_means_dict)
add_formula_string = ""
for roi_column in roi_dict_dict.keys():
roi_dict = roi_dict_dict[roi_column]
for pheno_row_dict in pheno_file_rows:
participant_id = pheno_row_dict[subject_id_label]
if ("session" in pheno_row_dict.keys()) and \
("series" in pheno_row_dict.keys()):
session_id = pheno_row_dict["session"]
series_id = pheno_row_dict["series"]
participant_tuple = \
(participant_id, session_id, series_id)
elif "session" in pheno_row_dict.keys():
session_id = pheno_row_dict["session"]
participant_tuple = (participant_id, session_id)
elif "series" in pheno_row_dict.keys():
series_id = pheno_row_dict["series"]
participant_tuple = (participant_id, series_id)
else:
participant_tuple = (participant_id)
pheno_row_dict[roi_column] = roi_dict[participant_tuple]
ev_selections["demean"].append(roi_column)
# create a string of all the new custom ROI regressor column names
# to be inserted into the design formula, so that Patsy will
# accept the phenotypic data dictionary that now has these columns
if add_formula_string == "":
add_formula_string = add_formula_string + roi_column
else:
add_formula_string = add_formula_string + " + " + roi_column
# a regressor column of ROI means for each custom-specified ROI has
# now been added to the model with appropriate column labels
formula = formula.replace("Custom_ROI_Mean",add_formula_string)
# return the data from the phenotype file processed properly for Patsy
# and load it into 'pheno_data_dict'
# format: dictionary, each key is the name of an EV, and its value is
# a LIST of values in order of the subjects
# - categorical EVs are already renamed from '0,1,..' to
# 'EV0,EV1,..' with EV being the EV name
# - EVs to be demeaned are already demeaned
# - numerical EVs (non-categorical) are in a list which
# have been converted into a NumPy array
pheno_data_dict = create_pheno_dict(pheno_file_rows, ev_selections, \
subject_id_label)
# handle modeling group variances separately (if enabled), then edit the
# formula to be in Patsy language
if grouping_var != None:
pheno_data_dict, formula, grouping_var_id_dict = \
model_group_var_separately(grouping_var, \
formula, pheno_data_dict, \
ev_selections, coding_scheme)
else:
grouping_var_id_dict = None
if 'categorical' in ev_selections.keys():
for EV_name in ev_selections['categorical']:
if coding_scheme == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif coding_scheme == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + \
', Sum)')
# create the Patsy design matrix!
try:
dmatrix = patsy.dmatrix(formula, pheno_data_dict, NA_action='raise')
except:
print('\n\n[!] CPAC says: Design matrix creation wasn\'t ' \
'successful - do the terms in your formula correctly ' \
'correspond to the EVs listed in your phenotype file?\n')
print('Phenotype file provided: ')
print(pheno_file, '\n')
print("Phenotypic data columns (regressors): ", list(pheno_data_dict.keys()))
print("Formula: %s\n\n" % formula)
raise Exception
# check the model for multicollinearity - Patsy takes care of this, but
# just in case
check_multicollinearity(np.array(dmatrix))
# prepare for final stages
design_matrix = np.array(dmatrix, dtype=np.float16)
column_names = dmatrix.design_info.column_names
# check to make sure there are more time points than EVs!
if len(column_names) >= num_subjects:
err = "\n\n[!] CPAC says: There are more EVs than there are " \
"subjects currently included in the model for %s. There must " \
"be more subjects than EVs in the design.\n\nNumber of " \
"subjects: %d\nNumber of EVs: %d\n\nNote: An 'Intercept' " \
"column gets added to the design as an EV, so there will be " \
"one more EV than you may have specified in your design. In " \
"addition, if you specified to model group variances " \
"separately, an Intercept column will not be included, but " \
"the amount of EVs can nearly double once they are split " \
"along the grouping variable.\n\n" \
"If the number of subjects is lower than the number of " \
"subjects in your group analysis subject list, this may be " \
"because not every subject in the subject list has an output " \
"for %s in the individual-level analysis output directory.\n\n"\
% (current_output, num_subjects, len(column_names), \
current_output)
raise Exception(err)
# remove the header formatting Patsy creates for categorical variables
# because we are going to use depatsified_EV_names in the "Available EVs
# for Contrasts" list on the next page, and also to test user-made custom
# contrast files
depatsified_EV_names = []
for column in column_names:
# if using Sum encoding, a column name may look like this:
# C(adhd, Sum)[S.adhd0]
# this loop leaves it with only "adhd0" in this case, for the
# contrasts list for the next GUI page
column_string = column
string_for_removal = ''
for char in column_string:
string_for_removal = string_for_removal + char
if char == '.':
column_string = column_string.replace(string_for_removal, '')
string_for_removal = ''
column_string = column_string.replace(']', '')
depatsified_EV_names.append(column_string)
# write the .mat file finally
write_mat_file(design_matrix, output_dir, model_name, \
depatsified_EV_names, current_output)
# write the .grp file also
create_grp_file(design_matrix, grouping_var_id_dict, output_dir, \
model_name, current_output)
# return the PATSY OBJECT of dmatrix, not the Numpy array "design_matrix"
return dmatrix, depatsified_EV_names
def positive(dmat, a, coding, group_sep, grouping_var):
import numpy as np
# this is also where the "Intercept" column gets introduced into
# the contrasts columns, for when the user uses the model builder's
# contrast builder
evs = dmat.design_info.column_name_indexes
con = np.zeros(dmat.shape[1])
if group_sep == True:
if "__" in a and grouping_var in a:
ev_desc = a.split("__")
for ev in evs:
count = 0
for desc in ev_desc:
if desc in ev:
count += 1
if count == len(ev_desc):
con[evs[ev]] = 1
break
else:
# it is a dropped term so make all other terms in that
# category at -1
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
elif len(a.split(grouping_var)) > 2:
# this is if the current parsed contrast is the actual
# grouping variable, as the Patsified name will have the
# variable's name string in it twice
for ev in evs:
if a.split(".")[1] in ev:
con[evs[ev]] = 1
break
else:
# it is a dropped term so make all other terms in that
# category at -1
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
# else not modeling group variances separately
else:
if a in evs:
con[evs[a]] = 1
else:
# it is a dropped term so make all other terms in that category
# at -1
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
if coding == "Treatment":
# make Intercept 0
con[0] = 0
elif coding == "Sum":
# make Intercept 1
con[1] = 1
return con
def greater_than(dmat, a, b, coding, group_sep, grouping_var):
c1 = positive(dmat, a, coding, group_sep, grouping_var)
c2 = positive(dmat, b, coding, group_sep, grouping_var)
return c1-c2
def negative(dmat, a, coding, group_sep, grouping_var):
con = 0-positive(dmat, a, coding, group_sep, grouping_var)
return con
def create_dummy_string(length):
ppstring = ""
for i in range(0, length):
ppstring += '\t' + '%1.5e' %(1.0)
ppstring += '\n'
return ppstring
def create_con_file(con_dict, col_names, file_name, current_output, out_dir):
import os
print("col names: ")
print(col_names)
with open(os.path.join(out_dir, file_name) + ".con",'w+') as f:
# write header
num = 1
for key in con_dict:
f.write("/ContrastName%s\t%s\n" %(num,key))
num += 1
f.write("/NumWaves\t%d\n" %len(con_dict[key]))
f.write("/NumContrasts\t%d\n" %len(con_dict))
f.write("/PPheights%s" %create_dummy_string(len(con_dict[key])))
f.write("/RequiredEffect%s" %create_dummy_string(len(con_dict[key])))
f.write("\n\n")
# print labels for the columns - mainly for double-checking your
# model
col_string = '\n'
for col in col_names:
col_string = col_string + col + '\t'
print(col_string, '\n', file=f)
# write data
f.write("/Matrix\n")
for key in con_dict:
for v in con_dict[key]:
f.write("%1.5e\t" %v)
f.write("\n")
def create_fts_file(ftest_list, con_dict, model_name, current_output,
out_dir):
import os
import numpy as np
try:
print("\nFound f-tests in | |
labels to network
for item in labels:
network['pore.'+item] = False
network['throat.'+item] = False
# Add connections between parents and clones
if mode == 'parents':
tclone = sp.vstack((parents, clones)).T
extend(network=network, pore_coords=pclone, throat_conns=tclone)
if mode == 'siblings':
ts = network.find_neighbor_throats(pores=pores, mode='xnor')
tclone = network['throat.conns'][ts] + network.num_pores()
extend(network=network, pore_coords=pclone, throat_conns=tclone)
if mode == 'isolated':
extend(network=network, pore_coords=pclone)
# Apply provided labels to cloned pores
for item in labels:
network['pore.'+item][network.pores('all') >= Np] = True
network['throat.'+item][network.throats('all') >= Nt] = True
# Clear adjacency and incidence matrices which will be out of date now
network._am.clear()
network._im.clear()
def merge_networks(network, donor=[]):
r"""
Combine multiple networks into one without doing any topological
manipulations (such as stiching nearby pores to each other).
Parameters
----------
network : OpenPNM Network Object
The network to which all the other networks should be added.
donor : OpenPNM Network Object or list of Objects
The network object(s) to add to the given network
Notes
-----
This methods does *not* attempt to stitch the networks topologically.
See Also
--------
extend
trim
stitch
"""
if type(donor) == list:
donors = donor
else:
donors = [donor]
for donor in donors:
network['pore.coords'] = sp.vstack((network['pore.coords'],
donor['pore.coords']))
network['throat.conns'] = sp.vstack((network['throat.conns'],
donor['throat.conns'] +
network.Np))
p_all = sp.ones((sp.shape(network['pore.coords'])[0],), dtype=bool)
t_all = sp.ones((sp.shape(network['throat.conns'])[0],), dtype=bool)
network.update({'pore.all': p_all})
network.update({'throat.all': t_all})
for key in set(network.keys()).union(set(donor.keys())):
if key.split('.')[1] not in ['conns', 'coords', '_id', 'all']:
if key in network.keys():
pop_flag = False
if key not in donor.keys():
logger.debug('Adding ' + key + ' to donor')
# If key not on donor add it first
if network[key].dtype == bool:
donor[key] = False
else:
donor[key] = sp.nan
pop_flag = True
# Then merge it with existing array on network
try:
temp = sp.hstack((network[key], donor[key]))
except ValueError:
temp = sp.vstack((network[key], donor[key]))
network[key] = temp
if pop_flag:
donor.pop(key, None)
else:
# If key not on network add it first
logger.debug('Adding ' + key + ' to network')
if donor[key].dtype == bool:
network[key] = False
else:
network[key] = sp.nan
# Then append donor values to network
s = sp.shape(donor[key])[0]
network[key][-s:] = donor[key]
# Clear adjacency and incidence matrices which will be out of date now
network._am.clear()
network._im.clear()
def stitch(network, donor, P_network, P_donor, method='nearest',
len_max=sp.inf, len_min=0, label_suffix=''):
r'''
Stitches a second a network to the current network.
Parameters
----------
networK : OpenPNM Network Object
The Network to which to donor Network will be attached
donor : OpenPNM Network Object
The Network to stitch on to the current Network
P_network : array_like
The pores on the current Network
P_donor : array_like
The pores on the donor Network
label_suffix : string or None
Some text to append to each label in the donor Network before
inserting them into the recipient. The default is to append no
text, but a common option would be to append the donor Network's
name. To insert none of the donor labels, use None.
len_max : float
Set a length limit on length of new throats
method : string (default = 'delaunay')
The method to use when making pore to pore connections. Options are:
- 'delaunay' : Use a Delaunay tessellation
- 'nearest' : Connects each pore on the receptor network to its nearest
pore on the donor network
Notes
-----
Before stitching it is necessary to translate the pore coordinates of
one of the Networks so that it is positioned correctly relative to the
other.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> pn2 = op.network.Cubic(shape=[5, 5, 5])
>>> [pn.Np, pn.Nt]
[125, 300]
>>> [pn2.Np, pn2.Nt]
[125, 300]
>>> pn2['pore.coords'][:, 2] += 5.0
>>> op.topotools.stitch(network=pn, donor=pn2, P_network=pn.pores('top'),
... P_donor=pn2.pores('bottom'), method='nearest',
... len_max=1.0)
>>> [pn.Np, pn.Nt]
[250, 625]
'''
# Ensure Networks have no associated objects yet
if (len(network.project) > 1) or (len(donor.project) > 1):
raise Exception('Cannot stitch a Network with active objects')
network['throat.stitched'] = False
# Get the initial number of pores and throats
N_init = {}
N_init['pore'] = network.Np
N_init['throat'] = network.Nt
if method == 'nearest':
P1 = P_network
P2 = P_donor + N_init['pore'] # Increment pores on donor
C1 = network['pore.coords'][P_network]
C2 = donor['pore.coords'][P_donor]
D = sp.spatial.distance.cdist(C1, C2)
[P1_ind, P2_ind] = sp.where(D <= len_max)
conns = sp.vstack((P1[P1_ind], P2[P2_ind])).T
else:
raise Exception('<{}> method not supported'.format(method))
# Enter donor's pores into the Network
extend(network=network, pore_coords=donor['pore.coords'])
# Enter donor's throats into the Network
extend(network=network, throat_conns=donor['throat.conns'] +
N_init['pore'])
# Trim throats that are longer then given len_max
C1 = network['pore.coords'][conns[:, 0]]
C2 = network['pore.coords'][conns[:, 1]]
L = sp.sum((C1 - C2)**2, axis=1)**0.5
conns = conns[L <= len_max]
# Add donor labels to recipient network
if label_suffix is not None:
if label_suffix != '':
label_suffix = '_'+label_suffix
for label in donor.labels():
element = label.split('.')[0]
locations = sp.where(network._get_indices(element) >=
N_init[element])[0]
if label + label_suffix not in network.keys():
network[label + label_suffix] = False
network[label+label_suffix][locations] = donor[label]
# Add the new stitch throats to the Network
extend(network=network, throat_conns=conns, labels='stitched')
# Remove donor from Workspace, if present
# This check allows for the reuse of a donor Network multiple times
for sim in list(ws.values()):
if donor in sim:
del ws[sim.name]
def connect_pores(network, pores1, pores2, labels=[], add_conns=True):
r'''
Returns the possible connections between two group of pores, and optionally
makes the connections.
See ``Notes`` for advanced usage.
Parameters
----------
network : OpenPNM Network Object
pores1 : array_like
The first group of pores on the network
pores2 : array_like
The second group of pores on the network
labels : list of strings
The labels to apply to the new throats. This argument is only needed
if ``add_conns`` is True.
add_conns : bool
Indicates whether the connections should be added to the supplied
network (default is True). Otherwise, the connections are returned
as an Nt x 2 array that can be passed directly to ``extend``.
Notes
-----
(1) The method also works if ``pores1`` and ``pores2`` are list of lists,
in which case it consecutively connects corresponding members of the two
lists in a 1-to-1 fashion. Example: pores1 = [[0, 1], [2, 3]] and
pores2 = [[5], [7, 9]] leads to creation of the following connections:
0 --> 5 2 --> 7 3 --> 7
1 --> 5 2 --> 9 3 --> 9
(2) If you want to use the batch functionality, make sure that each element
within ``pores1`` and ``pores2`` are of type list or ndarray.
(3) It creates the connections in a format which is acceptable by
the default OpenPNM connection ('throat.conns') and either adds them to
the network or returns them.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> pn.Nt
300
>>> op.topotools.connect_pores(network=pn, pores1=[22, 32],
... pores2=[16, 80, 68])
>>> pn.Nt
306
>>> pn['throat.conns'][300:306]
array([[16, 22],
[22, 80],
[22, 68],
[16, 32],
[32, 80],
[32, 68]])
'''
# Assert that `pores1` and `pores2` are list of lists
try:
len(pores1[0])
except (TypeError, IndexError):
pores1 = [pores1]
try:
len(pores2[0])
except (TypeError, IndexError):
pores2 = [pores2]
if len(pores1) != len(pores2):
raise Exception('Running in batch mode! pores1 and pores2 must be' + \
' of the same length.')
arr1, arr2 = [], []
for ps1, ps2 in zip(pores1, pores2):
size1 = sp.size(ps1)
size2 = sp.size(ps2)
arr1.append(sp.repeat(ps1, size2))
arr2.append(sp.tile(ps2, size1))
conns = sp.vstack([sp.concatenate(arr1), sp.concatenate(arr2)]).T
if add_conns:
extend(network=network, throat_conns=conns, labels=labels)
else:
return conns
def find_pore_to_pore_distance(network, pores1=None, pores2=None):
r'''
Find the distance between all pores on set one to each pore in set 2
Parameters
----------
network : OpenPNM Network Object
The network object containing the pore coordinates
pores1 : array_like
The pore indices of the first set
pores2 : array_Like
The pore indices of the second set. It's OK if these indices are
partially or completely duplicating ``pores``.
Returns
-------
A distance matrix with ``len(pores1)`` rows and ``len(pores2)`` columns.
The distance between pore *i* in ``pores1`` and *j* in ``pores2`` is
located at *(i, j)* and *(j, i)* in the distance matrix.
'''
from scipy.spatial.distance import cdist
p1 = sp.array(pores1, ndmin=1)
p2 = sp.array(pores2, ndmin=1)
coords = network['pore.coords']
| |
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_object_rigid_body(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_object_shading(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_object_showhide(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_object_track(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_orientations_pie(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_paint_gpencil(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
| |
<filename>hallucinator/code_rib/bvh_skeleton/humanoid_1205_skeleton.py
from . import math3d # for debug
# from . import math3dkh as math3d # scipy too slow
from . import bvh_helper
# from . import math3dV1 # for debug
import numpy as np
from scipy.spatial.transform import Rotation
class SkeletonConverter(object):
def __init__(self):
self.root = 'Hips'
self.keypoint2index = {
'Hips': 0,
'RightUpLeg': 1,
'RightLeg': 2,
'RightFoot': 3,
'LeftUpLeg': 4,
'LeftLeg': 5,
'LeftFoot': 6,
'Spine2': 7,
# 'Spine3': 8,
'Neck': 8,
'Head': 9,
'LeftArm': 10,
'LeftForeArm': 11,
'LeftHand': 12,
'RightArm': 13,
'RightForeArm': 14,
'RightHand': 15,
# 'RightFootEndSite': -1,
# 'LeftFootEndSite': -1,
# 'LeftHandEndSite': -1,
# 'RightHandEndSite': -1
}
self.index2keypoint = {v: k for k, v in self.keypoint2index.items()}
self.keypoint_num = len(self.keypoint2index)
self.keypoint2index_21joint = {
'Hips': 0,
'RightUpLeg': 1,
'RightLeg': 2,
'RightFoot': 3,
'LeftUpLeg': 4,
'LeftLeg': 5,
'LeftFoot': 6,
'Spine': 7,
'Spine1': 8,
'Spine2': 9,
'Spine3': 10,
'Neck': 11,
'Head': 12,
'LeftShoulder': 13,
'LeftArm': 14,
'LeftForeArm': 15,
'LeftHand': 16,
'RightShoulder': 17,
'RightArm': 18,
'RightForeArm': 19,
'RightHand': 20,
# 'RightFootEndSite': -1,
# 'LeftFootEndSite': -1,
# 'LeftHandEndSite': -1,
# 'RightHandEndSite': -1
}
self.index2keypoint_21joint = {v: k for k, v in self.keypoint2index_21joint.items()}
self.keypoint_num_21joint = len(self.keypoint2index_21joint)
def convert_to_21joint(self, poses_3d):
"""
add spine, spine1, spine3, head end site, LeftShoulder, RightShoulder
poses_3d: tx16x3
:return:
"""
tmp_poses_dict = {}
""" spine, spine1 <- Hips,Spine2 """
vec_Hips2Spine2 = poses_3d[:, self.keypoint2index['Spine2']] - poses_3d[:, self.keypoint2index['Hips']]
tmp_poses_dict['Spine'] = poses_3d[:, self.keypoint2index['Hips']] + 1/3*vec_Hips2Spine2
tmp_poses_dict['Spine1'] = poses_3d[:, self.keypoint2index['Hips']] + 2/3*vec_Hips2Spine2
""" spine3 <- Spine2, Neck"""
vec_Spine22Neck = poses_3d[:, self.keypoint2index['Neck']] - poses_3d[:, self.keypoint2index['Spine2']]
tmp_poses_dict['Spine3'] = poses_3d[:, self.keypoint2index['Spine2']] + 1/2*vec_Spine22Neck
""" LeftShoulder <- Neck, LeftArm"""
vec_Neck2LeftArm = poses_3d[:, self.keypoint2index['LeftArm']] - poses_3d[:, self.keypoint2index['Neck']]
tmp_poses_dict['LeftShoulder'] = poses_3d[:, self.keypoint2index['Neck']] + 1 / 6 * vec_Neck2LeftArm
""" RightShoulder <- Neck, RightArm"""
vec_Neck2RightArm = poses_3d[:, self.keypoint2index['RightArm']] - poses_3d[:, self.keypoint2index['Neck']]
tmp_poses_dict['RightShoulder'] = poses_3d[:, self.keypoint2index['Neck']] + 1 / 6 * vec_Neck2RightArm
""" 扩充当前的tmp_poses_dict """
for keypoint in self.keypoint2index:
tmp_poses_dict[keypoint] = poses_3d[:, self.keypoint2index[keypoint]]
""" 重新排序 """
poses_3d_21joint = np.zeros((poses_3d.shape[0], 21, 3), dtype='float32')
for idx in self.index2keypoint_21joint:
poses_3d_21joint[:, idx] = tmp_poses_dict[self.index2keypoint_21joint[idx]]
return poses_3d_21joint
class H36mSkeleton(object):
def __init__(self):
self.root = 'Hips'
self.keypoint2index = {
'Hips': 0,
'RightUpLeg': 1,
'RightLeg': 2,
'RightFoot': 3,
'LeftUpLeg': 4,
'LeftLeg': 5,
'LeftFoot': 6,
'Spine': 7,
'Spine1': 8,
'Spine2': 9,
'Spine3': 10,
'Neck': 11,
'Head': 12,
'HeadEndSite': -1,
'LeftShoulder': 13,
'LeftArm': 14,
'LeftForeArm': 15,
'LeftHand': 16,
'RightShoulder': 17,
'RightArm': 18,
'RightForeArm': 19,
'RightHand': 20,
'RightFootEndSite': -1,
'LeftFootEndSite': -1,
'LeftHandEndSite': -1,
'RightHandEndSite': -1
}
self.index2keypoint = {v: k for k, v in self.keypoint2index.items()}
self.keypoint_num = len(self.keypoint2index)
self.children = {
'Hips': ['RightUpLeg', 'LeftUpLeg', 'Spine'],
'RightUpLeg': ['RightLeg'],
'RightLeg': ['RightFoot'],
'RightFoot': ['RightFootEndSite'],
'RightFootEndSite': [],
'LeftUpLeg': ['LeftLeg'],
'LeftLeg': ['LeftFoot'],
'LeftFoot': ['LeftFootEndSite'],
'LeftFootEndSite': [],
'Spine': ['Spine1'],
'Spine1': ['Spine2'],
'Spine2': ['Spine3'],
'Spine3': ['Neck', 'LeftShoulder', 'RightShoulder'],
'Neck': ['Head'],
'Head': ['HeadEndSite'],
'HeadEndSite': [],
'LeftShoulder': ['LeftArm'],
'LeftArm': ['LeftForeArm'],
'LeftForeArm': ['LeftHand'],
'LeftHand': ['LeftHandEndSite'],
'LeftHandEndSite': [],
'RightShoulder': ['RightArm'],
'RightArm': ['RightForeArm'],
'RightForeArm': ['RightHand'],
'RightHand': ['RightHandEndSite'],
'RightHandEndSite': []
}
self.parent = {self.root: None}
for parent, children in self.children.items():
for child in children:
self.parent[child] = parent
self.left_joints = [
joint for joint in self.keypoint2index
if 'Left' in joint
]
self.right_joints = [
joint for joint in self.keypoint2index
if 'Right' in joint
]
self.initial_directions = {
'Hips': [0, 0, 0],
'RightUpLeg': [-1, 0, 0],
'RightLeg': [0, 0, -1],
'RightFoot': [0, 0, -1],
'RightFootEndSite': [0, -1, 0],
'LeftUpLeg': [1, 0, 0],
'LeftLeg': [0, 0, -1],
'LeftFoot': [0, 0, -1],
'LeftFootEndSite': [0, -1, 0],
'Spine': [0, 0, 1],
'Spine1': [0, 0, 1],
'Spine2': [0, 0, 1],
'Spine3': [0, 0, 1],
'Neck': [0, 0, 1],
'Head': [0, 0, 1],
'HeadEndSite': [0, 0, 1],
'LeftShoulder': [1, 0, 0],
'LeftArm': [1, 0, 0],
'LeftForeArm': [1, 0, 0],
'LeftHand': [1, 0, 0],
'LeftHandEndSite': [1, 0, 0],
'RightShoulder': [-1, 0, 0],
'RightArm': [-1, 0, 0],
'RightForeArm': [-1, 0, 0],
'RightHand': [-1, 0, 0],
'RightHandEndSite': [-1, 0, 0]
}
self.dcms = {}
for joint in self.keypoint2index:
self.dcms[joint] = None
def get_initial_offset(self, poses_3d):
# TODO: RANSAC
bone_lens = {self.root: [0]}
stack = [self.root]
while stack:
parent = stack.pop()
p_idx = self.keypoint2index[parent]
for child in self.children[parent]:
if 'EndSite' in child:
bone_lens[child] = 0.4 * bone_lens[parent]
continue
stack.append(child)
c_idx = self.keypoint2index[child]
bone_lens[child] = np.linalg.norm(
poses_3d[:, p_idx] - poses_3d[:, c_idx],
axis=1
)
bone_len = {}
for joint in self.keypoint2index:
if 'Left' in joint or 'Right' in joint:
base_name = joint.replace('Left', '').replace('Right', '')
left_len = np.mean(bone_lens['Left' + base_name])
right_len = np.mean(bone_lens['Right' + base_name])
bone_len[joint] = (left_len + right_len) / 2
else:
bone_len[joint] = np.mean(bone_lens[joint])
initial_offset = {}
for joint, direction in self.initial_directions.items():
direction = np.array(direction) / max(np.linalg.norm(direction), 1e-12)
initial_offset[joint] = direction * bone_len[joint]
return initial_offset
def get_bvh_header(self, poses_3d):
initial_offset = self.get_initial_offset(poses_3d)
nodes = {}
for joint in self.keypoint2index:
is_root = joint == self.root
is_end_site = 'EndSite' in joint
nodes[joint] = bvh_helper.BvhNode(
name=joint,
offset=initial_offset[joint],
rotation_order='xyz' if not is_end_site else '', # default zxy
is_root=is_root,
is_end_site=is_end_site,
)
for joint, children in self.children.items():
nodes[joint].children = [nodes[child] for child in children]
for child in children:
nodes[child].parent = nodes[joint]
header = bvh_helper.BvhHeader(root=nodes[self.root], nodes=nodes)
return header
def pose2euler(self, pose, header):
channel = []
quats = {}
# quatsV1 = {}
eulers = {}
stack = [header.root]
index = self.keypoint2index
LeftForeArm_angle = math3d.anglefrom3points(pose[index['LeftArm']], pose[index['LeftForeArm']], pose[index['LeftHand']])
LeftForeArm_straight = np.abs(LeftForeArm_angle - 180) < 10
RightForeArm_angle = math3d.anglefrom3points(pose[index['RightArm']], pose[index['RightForeArm']], pose[index['RightHand']])
RightForeArm_straight = np.abs(RightForeArm_angle - 180) < 10
while stack:
node = stack.pop()
joint = node.name
joint_idx = self.keypoint2index[joint]
if node.is_root:
channel.extend(pose[joint_idx])
index = self.keypoint2index
order = None
if joint == 'Hips':
# debug_1 = pose[index['Hips']]
# debug_2 = pose[index['Spine']]
# debug_3 = pose[index['LeftUpLeg']]
# debug_4 = pose[index['RightUpLeg']]
x_dir = pose[index['LeftUpLeg']] - pose[index['RightUpLeg']]
y_dir = None
z_dir = pose[index['Spine']] - pose[joint_idx]
order = 'zyx'
# order = 'xyz'
elif joint in ['RightUpLeg', 'RightLeg']:
child_idx = self.keypoint2index[node.children[0].name]
x_dir = pose[index['Hips']] - pose[index['RightUpLeg']]
y_dir = None
z_dir = pose[joint_idx] - pose[child_idx]
order = 'zyx'
elif joint in ['LeftUpLeg', 'LeftLeg']:
child_idx = self.keypoint2index[node.children[0].name]
x_dir = pose[index['LeftUpLeg']] - pose[index['Hips']]
y_dir = None
z_dir = pose[joint_idx] - pose[child_idx]
order = 'zyx'
elif joint == 'Spine':
x_dir = pose[index['LeftUpLeg']] - pose[index['RightUpLeg']]
y_dir = None
z_dir = pose[index['Spine1']] - pose[joint_idx]
order = 'zyx'
elif joint == 'Spine3':
x_dir = pose[index['LeftArm']] - \
pose[index['RightArm']]
y_dir = None
z_dir = pose[joint_idx] - pose[index['Spine2']]
order = 'zyx'
elif joint == 'Neck':
x_dir = None
y_dir = pose[index['Spine3']] - pose[joint_idx]
z_dir = pose[index['Head']] - pose[index['Spine3']]
order = 'zxy'
elif joint == 'LeftShoulder':
x_dir = pose[index['LeftArm']] - pose[joint_idx]
y_dir = pose[index['LeftArm']] - pose[index['LeftForeArm']]
z_dir = None
order = 'xzy'
elif joint == 'LeftArm':
if LeftForeArm_straight and self.dcms['LeftForeArm'] is not None:
x_dir = pose[index['LeftForeArm']] - pose[joint_idx]
y_dir = None
z_dir = self.dcms['LeftForeArm'][2] * 1.
order = 'xyz'
else:
x_dir = pose[index['LeftForeArm']] - pose[joint_idx]
y_dir = pose[index['LeftForeArm']] - pose[index['LeftHand']]
z_dir = None
order = 'xzy'
elif joint == 'LeftForeArm':
if LeftForeArm_straight and self.dcms['LeftForeArm'] is not None:
x_dir = pose[index['LeftHand']] - pose[joint_idx]
y_dir = None
z_dir = self.dcms['LeftForeArm'][2] * 1.
order = 'xyz'
else:
x_dir = pose[index['LeftHand']] - pose[joint_idx]
y_dir = pose[joint_idx] - pose[index['LeftArm']]
z_dir = None
order = 'xzy'
elif joint == 'RightShoulder':
x_dir = pose[joint_idx] - pose[index['RightArm']]
y_dir = pose[index['RightArm']] - pose[index['RightForeArm']]
z_dir = None
order = 'xzy'
elif joint == 'RightArm':
if RightForeArm_straight and self.dcms['RightForeArm'] is not None:
x_dir = pose[joint_idx] - pose[index['RightForeArm']]
y_dir = None
z_dir = self.dcms['RightForeArm'][2] * 1.
order = 'xyz'
else:
x_dir = pose[joint_idx] - pose[index['RightForeArm']]
y_dir = pose[index['RightForeArm']] - pose[index['RightHand']]
z_dir = None
order = 'xzy'
elif joint == 'RightForeArm':
if RightForeArm_straight and self.dcms['RightForeArm'] is not None:
x_dir = pose[joint_idx] - pose[index['RightHand']]
y_dir = None
z_dir = self.dcms['RightForeArm'][2] * 1.
order = 'xyz'
else:
x_dir = pose[joint_idx] - pose[index['RightHand']]
y_dir = pose[joint_idx] - pose[index['RightArm']]
z_dir = None
order = 'xzy'
if order:
dcm = math3d.dcm_from_axis(x_dir, y_dir, z_dir, order) # 3x3 [axis['x'], axis['y'], axis['z']]
self.dcms[joint] = dcm.copy()
quats[joint] = math3d.dcm2quat(dcm)
else:
quats[joint] = quats[self.parent[joint]].copy()
local_quat = quats[joint].copy()
# local_quatV1 = quatsV1[joint].copy()
if node.parent:
local_quat = math3d.quat_divide(
q=quats[joint], r=quats[node.parent.name]
)
euler = math3d.quat2euler(
q=local_quat, order=node.rotation_order
)
if joint in ['LeftShoulder', 'RightShoulder', 'Neck']:
tmp_idx = 2 if joint == 'Neck' else 0
euler[tmp_idx] = tmp_idx # 3
local_quat = math3d.euler2quat(euler)
quat = | |
from .data import CovidData
import datetime as dt
from matplotlib.offsetbox import AnchoredText
import pandas as pd
import seaborn as sns
import geopandas as gpd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def pan_duration(date):
"""Return the duration in days of the pandemic.
As calculated from the gov.uk API. It subtracts the first date entry
in the API data from the most recent date entry.
Args:
date (datetime): DataFrame column (i.e Series) containing date
field as downloaded from the gov.uk API by get_national_data()
method from CovidData Class.
Returns:
datetime: Duration of pandemic in days as datetime object.
"""
return (date[0] - date[-1]).days
def validate_input(df):
"""Check that input into the plotting functions is of the correct type.
Args:
df (Pandas DataFrame): this is intended to be the plotting parameter
Raises:
TypeError: if parameter is not a DataFrame
"""
# if for_function == 'deaths' or for_function == 'cases':
# expected_cols = {'cases_cumulative', 'cases_demographics',
# 'cases_newDaily', 'case_rate', 'date',
# 'death_Demographics', 'name', 'vac_firstDose',
# 'vac_secondDose'}
if not isinstance(df, pd.DataFrame):
raise TypeError('Parameter must be DataFrame, use get_regional_data'
+ ' method from CovidData class.')
# if set(df.columns) != expected_cols:
# raise ValueError('Incorrect features. Expecting output from'
# + ' get_regional_data method from CovidData class')
def my_path():
"""Find correct path at module level for geo_data files.
Returns:
[type]: [description]
"""
from pathlib import Path
base = Path(__file__).resolve().parent / 'geo_data'
return base
def daily_case_plot(df, pan_duration=pan_duration, save=False):
"""Create a matplotlib plot of case numbers in the UK.
Calculated over the duration of the pandemic.Display text information
giving the most recent daily number, the highest daily number and the
date recorded, the total cumulative
number of cases and the duration of the pandemic in days.
Args:
df (DataFrame): containing covid data retrieved from CovidData
class using get_national_data() or get_UK_data() method.
pan_duration (function, optional): Defaults to pan_duration.
save (bool, optional): set True to save plot. Defaults to False.
Returns:
- Matplotlib plot, styled using matplotlib template 'ggplot'
"""
# Create Variables we wish to plot
cases = df['case_newCases'].to_list()
date = df['date'].to_list()
cumulative = df['case_cumulativeCases'].to_list()
# Find date of highest number of daily cases
high, arg_high = max(cases), cases.index(max(cases))
high_date = date[arg_high].strftime('%d %b %Y')
duration = pan_duration(date=date)
# Create matplotlib figure and specify size
fig = plt.figure(figsize=(12, 10))
plt.style.use('ggplot')
ax = fig.add_subplot()
# Plot varibles
ax.plot(date, cases)
# Style and label plot
ax.set_xlabel('Date')
ax.set_ylabel('Cases')
ax.fill_between(date, cases,
alpha=0.3)
ax.set_title('Number of people who tested positive for Covid-19 (UK)',
fontsize=18)
at = AnchoredText(f"Most recent new cases\n{cases[0]:,.0f}\
\nMax new cases\n{high:,.0f}: {high_date}\
\nCumulative cases\n{cumulative[0]:,.0f}\
\nPandemic duration\n{duration} days",
prop=dict(size=16), frameon=True, loc='upper left')
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, 0.0175), xycoords='figure fraction',
fontsize=12, color='#555555')
plt.style.use('ggplot')
if save:
plt.savefig(f"{date[0].strftime('%Y-%m-%d')}-case_numbers_plot");
plt.show()
def regional_plot_cases(save=False):
"""Plot regional case numbers on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of regional case numbers on map of UK
"""
# Collect data
regions = CovidData().get_regional_data()
scotland = CovidData(nation='scotland').get_national_data()
wales = CovidData(nation='wales').get_national_data()
ni = CovidData(nation='northern ireland').get_national_data()
regions = regions.assign(case_newCases=regions['cases_newDaily'])
# Set date to plot
date_selector = regions['date'][0]
regions_date = regions.loc[regions['date'] == date_selector]
scotland_date = \
scotland.loc[scotland['date'] == date_selector,
['date', 'name', 'case_newCases']]
wales_date = wales.loc[wales['date'] == date_selector,
['date', 'name', 'case_newCases']]
ni_date = ni.loc[ni['date'] == date_selector,
['date', 'name', 'case_newCases']]
# Combine regional data into single dataframe
final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date],
axis=0)
file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp'
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except is not good practice, this should be changed
print('Ensure you have imported geo_data sub-folder')
geo_df['nuts118nm'] = \
geo_df['nuts118nm'].replace(['North East (England)',
'North West (England)',
'East Midlands (England)',
'West Midlands (England)',
'South East (England)',
'South West (England)'],
['North East', 'North West',
'East Midlands', 'West Midlands',
'South East', 'South West'])
merged = geo_df.merge(final_df, how='left', left_on="nuts118nm",
right_on="name")
# Column to plot
feature = 'case_newCases'
# Plot range
feature_min, feature_max = merged['case_newCases'].min(), \
merged['case_newCases'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title(f'Number of new cases per region {date_selector}',
fontdict={'fontsize': '18', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=feature_min,
vmax=feature_max))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax,
edgecolor='0.8');
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.8,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-regional_cases_plot')
def regional_plot_rate(save=False):
"""Plot regional case rate per 100,000 on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of regional case rate on map of UK.
"""
# Collect data
regions = CovidData().get_regional_data()
scotland = CovidData(nation='scotland').get_national_data()
wales = CovidData(nation='wales').get_national_data()
ni = CovidData(nation='northern ireland').get_national_data()
# Set date to plot
date_selector = regions['date'][5]
regions_date = regions.loc[regions['date'] == date_selector]
scotland_date = scotland.loc[scotland['date'] == date_selector,
['date', 'name', 'case_rate']]
wales_date = wales.loc[wales['date'] == date_selector,
['date', 'name', 'case_rate']]
ni_date = ni.loc[ni['date'] == date_selector,
['date', 'name', 'case_rate']]
# Combine regional data into single dataframe
final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date],
axis=0)
file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp'
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
geo_df['nuts118nm'] = \
geo_df['nuts118nm'].replace(['North East (England)',
'North West (England)',
'East Midlands (England)',
'West Midlands (England)',
'South East (England)',
'South West (England)'],
['North East', 'North West',
'East Midlands', 'West Midlands',
'South East', 'South West'])
merged = geo_df.merge(final_df, how='left', left_on="nuts118nm",
right_on="name")
# Column to plot
feature = 'case_rate'
# Plot range
feature_min, feature_max = merged['case_rate'].min(),\
merged['case_rate'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title('Regional rate per 100,000 (new cases)',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=feature_min,
vmax=feature_max))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax,
edgecolor='0.8');
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.8,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-regional_rate_plot')
def heatmap_cases(df):
"""Create heatmap of case numbers for duration of pandemic.
Args:
df (DataFrame): Covid case data retrieved by calling CovidData
class method.
Returns:
Seaborn heatmap plot of case numbers for each day of the pandemic.
"""
# Variables to plot
cases = df['case_newCases'].to_list()
date = df['date'].to_list()
# Create new DataFrame containing two columns: date and case numbers
heat_df = pd.DataFrame({'date': date, 'cases': cases}, index=date)
# Separate out date into year month and day
heat_df['year'] = heat_df.index.year
heat_df["month"] = heat_df.index.month
heat_df['day'] = heat_df.index.day
# Use groupby to convert data to wide format for heatmap plot
x = heat_df.groupby(["year", "month", "day"])["cases"].sum()
df_wide = x.unstack()
# Plot data
sns.set(rc={"figure.figsize": (12, 10)})
# Reverse colormap so that dark colours represent higher numbers
cmap = sns.cm.rocket_r
ax = sns.heatmap(df_wide, cmap=cmap)
ax.set_title('Heatmap of daily cases since start of pandemic',
fontsize=20)
ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, 0.01), xycoords='figure fraction',
fontsize=12, color='#555555')
plt.show()
def local_rate_plot(save=False):
"""Plot local case rate per 100,000 on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of local case rate on map of UK
"""
# Find latest data
recent_date = CovidData().get_regional_data()
recent_date = recent_date['date'][5]
# Select latest data from local data
local = CovidData().get_local_data(date=recent_date)
date_selector = recent_date
local_date = local.loc[local['date'] == date_selector,
['date', 'name', 'case_rate']]
file_path = my_path() / "Local_Authority_Districts.shp"
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
local_date['name'] = \
local_date['name'].replace(['Cornwall and Isles of Scilly'],
['Cornwall'])
merged = geo_df.merge(local_date, how='outer',
left_on="lad19nm", right_on="name")
# Column to plot
feature = 'case_rate'
# Plot range
vmin, vmax = merged['case_rate'].min(), merged['case_rate'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title(f'Local rate per 100,000 {recent_date}',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=vmin, vmax=vmax))
fig.colorbar(sm)
# Create map
| |
shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
b -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
learning_rate -- learning rate for the model.
Returns:
loss -- value of the loss function (cross-entropy)
gradients -- python dictionary containing:
dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)
dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)
dWya -- Gradients of hidden-to-output weights, of shape (n_y, n_a)
db -- Gradients of bias vector, of shape (n_a, 1)
dby -- Gradients of output bias vector, of shape (n_y, 1)
a[len(X)-1] -- the last hidden state, of shape (n_a, 1)
"""
### START CODE HERE ###
# Forward propagate through time (≈1 line)
loss, cache = rnn_forward(X,Y,a_prev,parameters)
# Backpropagate through time (≈1 line)
gradients, a = rnn_backward(X,Y,parameters,cache)
# Clip your gradients between -5 (min) and 5 (max) (≈1 line)
gradients = clip(gradients, 5)
# Update parameters (≈1 line)
parameters = update_parameters(parameters,gradients,learning_rate)
### END CODE HERE ###
return loss, gradients, a[len(X)-1]
# In[20]:
np.random.seed(1)
vocab_size, n_a = 27, 100
a_prev = np.random.randn(n_a, 1)
Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
X = [12,3,5,11,22,3]
Y = [4,14,11,22,25, 26]
loss, gradients, a_last = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)
print("Loss =", loss)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("np.argmax(gradients[\"dWax\"]) =", np.argmax(gradients["dWax"]))
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
print("a_last[4] =", a_last[4])
# ** Expected output:**
#
# <table>
#
#
# <tr>
# <td>
# **Loss **
# </td>
# <td>
# 126.503975722
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWaa"][1][2]**
# </td>
# <td>
# 0.194709315347
# </td>
# <tr>
# <td>
# **np.argmax(gradients["dWax"])**
# </td>
# <td> 93
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWya"][1][2]**
# </td>
# <td> -0.007773876032
# </td>
# </tr>
# <tr>
# <td>
# **gradients["db"][4]**
# </td>
# <td> [-0.06809825]
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dby"][1]**
# </td>
# <td>[ 0.01538192]
# </td>
# </tr>
# <tr>
# <td>
# **a_last[4]**
# </td>
# <td> [-1.]
# </td>
# </tr>
#
# </table>
# ### 3.2 - Training the model
# Given the dataset of dinosaur names, we use each line of the dataset (one name) as one training example. Every 100 steps of stochastic gradient descent, you will sample 10 randomly chosen names to see how the algorithm is doing. Remember to shuffle the dataset, so that stochastic gradient descent visits the examples in random order.
#
# **Exercise**: Follow the instructions and implement `model()`. When `examples[index]` contains one dinosaur name (string), to create an example (X, Y), you can use this:
# ```python
# index = j % len(examples)
# X = [None] + [char_to_ix[ch] for ch in examples[index]]
# Y = X[1:] + [char_to_ix["\n"]]
# ```
# Note that we use: `index= j % len(examples)`, where `j = 1....num_iterations`, to make sure that `examples[index]` is always a valid statement (`index` is smaller than `len(examples)`).
# The first entry of `X` being `None` will be interpreted by `rnn_forward()` as setting $x^{\langle 0 \rangle} = \vec{0}$. Further, this ensures that `Y` is equal to `X` but shifted one step to the left, and with an additional "\n" appended to signify the end of the dinosaur name.
# In[24]:
# GRADED FUNCTION: model
def model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27):
"""
Trains the model and generates dinosaur names.
Arguments:
data -- text corpus
ix_to_char -- dictionary that maps the index to a character
char_to_ix -- dictionary that maps a character to an index
num_iterations -- number of iterations to train the model for
n_a -- number of units of the RNN cell
dino_names -- number of dinosaur names you want to sample at each iteration.
vocab_size -- number of unique characters found in the text, size of the vocabulary
Returns:
parameters -- learned parameters
"""
# Retrieve n_x and n_y from vocab_size
n_x, n_y = vocab_size, vocab_size
# Initialize parameters
parameters = initialize_parameters(n_a, n_x, n_y)
# Initialize loss (this is required because we want to smooth our loss, don't worry about it)
loss = get_initial_loss(vocab_size, dino_names)
# Build list of all dinosaur names (training examples).
with open("dinos.txt") as f:
examples = f.readlines()
examples = [x.lower().strip() for x in examples]
# Shuffle list of all dinosaur names
np.random.seed(0)
np.random.shuffle(examples)
# Initialize the hidden state of your LSTM
a_prev = np.zeros((n_a, 1))
# Optimization loop
for j in range(num_iterations):
### START CODE HERE ###
# Use the hint above to define one training example (X,Y) (≈ 2 lines)
index = j%len(examples)
X = [None] + [char_to_ix[ch] for ch in examples[index]]
Y = X[1:] + [char_to_ix["\n"]]
# Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters
# Choose a learning rate of 0.01
curr_loss, gradients, a_prev = optimize(X,Y,a_prev,parameters,learning_rate=0.01)
### END CODE HERE ###
# Use a latency trick to keep the loss smooth. It happens here to accelerate the training.
loss = smooth(loss, curr_loss)
# Every 2000 Iteration, generate "n" characters thanks to sample() to check if the model is learning properly
if j % 2000 == 0:
print('Iteration: %d, Loss: %f' % (j, loss) + '\n')
# The number of dinosaur names to print
seed = 0
for name in range(dino_names):
# Sample indices and print them
sampled_indices = sample(parameters, char_to_ix, seed)
print_sample(sampled_indices, ix_to_char)
seed += 1 # To get the same result for grading purposed, increment the seed by one.
print('\n')
return parameters
# Run the following cell, you should observe your model outputting random-looking characters at the first iteration. After a few thousand iterations, your model should learn to generate reasonable-looking names.
# In[25]:
parameters = model(data, ix_to_char, char_to_ix)
# ## Conclusion
#
# You can see that your algorithm has started to generate plausible dinosaur names towards the end of the training. At first, it was generating random characters, but towards the end you could see dinosaur names with cool endings. Feel free to run the algorithm even longer and play with hyperparameters to see if you can get even better results. Our implemetation generated some really cool names like `maconucon`, `marloralus` and `macingsersaurus`. Your model hopefully also learned that dinosaur names tend to end in `saurus`, `don`, `aura`, `tor`, etc.
#
# If your model generates some non-cool names, don't blame the model entirely--not all actual dinosaur names sound cool. (For example, `dromaeosauroides` is an actual dinosaur name and is in the training set.) But this model should give you a set of candidates from which you can pick the coolest!
#
# This assignment had used a relatively small dataset, so that you could train an RNN quickly on a CPU. Training a model of the english language requires a much bigger dataset, and usually needs much more computation, and could run for many hours on GPUs. We ran our dinosaur name for quite some time, and so far our favoriate name is the great, undefeatable, and fierce: Mangosaurus!
#
# <img src="images/mangosaurus.jpeg" style="width:250;height:300px;">
# ## 4 - Writing like Shakespeare
#
# The rest of this notebook is optional and is not graded, but we hope you'll do it anyway since it's quite fun and informative.
#
# A similar (but more complicated) task is to generate Shakespeare poems. Instead of learning from a dataset of Dinosaur names you can use a collection of Shakespearian poems. Using LSTM cells, you can learn longer term dependencies that span many characters in the text--e.g., where a character appearing somewhere a sequence can influence what should be a different character much much later in ths sequence. These long term dependencies were less important with dinosaur names, since the names were | |
bytes]] = None,
fields: typing.Optional[typing.Tuple[typing.Tuple[str, str], ...]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
host: typing.Optional[str] = None,
) -> urllib3.HTTPResponse:
# header parameters
headers = headers or {}
headers.update(self.default_headers)
if self.cookie:
headers['Cookie'] = self.cookie
# path parameters
if path_params:
for k, v in path_params.items():
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=self.configuration.safe_chars_for_path_param)
)
# auth setting
self.update_params_for_auth(headers, query_params,
auth_settings, resource_path, method, body)
# request url
if host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = host + resource_path
# perform request and return response
response = self.request(
method,
url,
query_params=query_params,
headers=headers,
fields=fields,
body=body,
stream=stream,
timeout=timeout,
)
return response
def call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[typing.Tuple[typing.Tuple[str, str], ...]] = None,
headers: typing.Optional[HTTPHeaderDict] = None,
body: typing.Optional[typing.Union[str, bytes]] = None,
fields: typing.Optional[typing.Tuple[typing.Tuple[str, str], ...]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
async_req: typing.Optional[bool] = None,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
host: typing.Optional[str] = None,
) -> urllib3.HTTPResponse:
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param headers: Header parameters to be
placed in the request header.
:param body: Request body.
:param fields: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings: Auth Settings names for the request.
:param async_req: execute request asynchronously
:type async_req: bool, optional TODO remove, unused
:param stream: if True, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Also when True, if the openapi spec describes a file download,
the data will be written to a local filesystme file and the BinarySchema
instance will also inherit from FileSchema and FileIO
Default is False.
:type stream: bool, optional
:param timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param host: api endpoint host
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(
resource_path,
method,
path_params,
query_params,
headers,
body,
fields,
auth_settings,
stream,
timeout,
host,
)
return self.pool.apply_async(
self.__call_api,
(
resource_path,
method,
path_params,
query_params,
headers,
body,
json,
fields,
auth_settings,
stream,
timeout,
host,
)
)
def request(
self,
method: str,
url: str,
query_params: typing.Optional[typing.Tuple[typing.Tuple[str, str], ...]] = None,
headers: typing.Optional[HTTPHeaderDict] = None,
fields: typing.Optional[typing.Tuple[typing.Tuple[str, str], ...]] = None,
body: typing.Optional[typing.Union[str, bytes]] = None,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> urllib3.HTTPResponse:
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
stream=stream,
timeout=timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
stream=stream,
timeout=timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
fields=fields,
stream=stream,
timeout=timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
fields=fields,
stream=stream,
timeout=timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
fields=fields,
stream=stream,
timeout=timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
fields=fields,
stream=stream,
timeout=timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
stream=stream,
timeout=timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def update_params_for_auth(self, headers, querys, auth_settings,
resource_path, method, body):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param resource_path: A string representation of the HTTP request resource path.
:param method: A string representation of the HTTP request method.
:param body: A object representing the body of the HTTP request.
The object type is the return value of _encoder.default().
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if auth_setting['in'] == 'cookie':
headers.add('Cookie', auth_setting['value'])
elif auth_setting['in'] == 'header':
if auth_setting['type'] != 'http-signature':
headers.add(auth_setting['key'], auth_setting['value'])
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
class Api:
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client: typing.Optional[ApiClient] = None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
@staticmethod
def _verify_typed_dict_inputs(cls: typing.Type[typing.TypedDict], data: typing.Dict[str, typing.Any]):
"""
Ensures that:
- required keys are present
- additional properties are not input
- value stored under required keys do not have the value unset
Note: detailed value checking is done in schema classes
"""
missing_required_keys = []
required_keys_with_unset_values = []
for required_key in cls.__required_keys__:
if required_key not in data:
missing_required_keys.append(required_key)
continue
value = data[required_key]
if value is unset:
required_keys_with_unset_values.append(required_key)
if missing_required_keys:
raise ApiTypeError(
'{} missing {} required arguments: {}'.format(
cls.__name__, len(missing_required_keys), missing_required_keys
)
)
if required_keys_with_unset_values:
raise ApiValueError(
'{} contains invalid unset values for {} required keys: {}'.format(
cls.__name__, len(required_keys_with_unset_values), required_keys_with_unset_values
)
)
disallowed_additional_keys = []
for key in data:
if key in cls.__required_keys__ or key in cls.__optional_keys__:
continue
disallowed_additional_keys.append(key)
if disallowed_additional_keys:
raise ApiTypeError(
'{} got {} unexpected keyword arguments: {}'.format(
cls.__name__, len(disallowed_additional_keys), disallowed_additional_keys
)
)
def get_host(
self,
operation_id: str,
servers: typing.Tuple[typing.Dict[str, str], ...] = tuple(),
host_index: typing.Optional[int] = None
) -> typing.Optional[str]:
configuration = self.api_client.configuration
try:
if host_index is None:
index = configuration.server_operation_index.get(
operation_id, configuration.server_index
)
else:
index = host_index
server_variables = configuration.server_operation_variables.get(
operation_id, configuration.server_variables
)
host = configuration.get_host_from_settings(
index, variables=server_variables, servers=servers
)
except IndexError:
if servers:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s" %
len(servers)
)
host = None
return host
class SerializedRequestBody(typing.TypedDict, total=False):
body: typing.Union[str, bytes]
fields: typing.Tuple[typing.Union[RequestField, tuple[str, str]], ...]
class RequestBody(StyleFormSerializer):
"""
A request body parameter
content: content_type to MediaType Schema info
"""
__json_encoder = JSONEncoder()
def __init__(
self,
content: typing.Dict[str, MediaType],
required: bool = False,
):
self.required = required
if len(content) == 0:
raise ValueError('Invalid value for content, the content dict must have >= 1 entry')
self.content = content
def __serialize_json(
self,
in_data: typing.Any
) -> typing.Dict[str, bytes]:
in_data = self.__json_encoder.default(in_data)
json_str = json.dumps(in_data, separators=(",", ":"), ensure_ascii=False).encode(
"utf-8"
)
return dict(body=json_str)
@staticmethod
def __serialize_text_plain(in_data: typing.Any) -> typing.Dict[str, str]:
if isinstance(in_data, frozendict):
raise ValueError('Unable to serialize type frozendict to text/plain')
elif isinstance(in_data, tuple):
raise ValueError('Unable to serialize type tuple to text/plain')
elif isinstance(in_data, NoneClass):
raise ValueError('Unable to serialize type NoneClass to text/plain')
elif isinstance(in_data, BoolClass):
raise ValueError('Unable to serialize type BoolClass to text/plain')
return dict(body=str(in_data))
def __multipart_json_item(self, key: str, value: Schema) -> RequestField:
json_value = self.__json_encoder.default(value)
return RequestField(name=key, data=json.dumps(json_value), headers={'Content-Type': 'application/json'})
def __multipart_form_item(self, key: str, value: Schema) -> RequestField:
if isinstance(value, str):
return RequestField(name=key, data=str(value), headers={'Content-Type': 'text/plain'})
elif isinstance(value, bytes):
return RequestField(name=key, data=value, headers={'Content-Type': 'application/octet-stream'})
elif isinstance(value, FileIO):
request_field = RequestField(
name=key,
data=value.read(),
filename=os.path.basename(value.name),
headers={'Content-Type': 'application/octet-stream'}
)
value.close()
return request_field
else:
return self.__multipart_json_item(key=key, value=value)
def __serialize_multipart_form_data(
self, in_data: Schema
) -> typing.Dict[str, typing.Tuple[RequestField, ...]]:
if not isinstance(in_data, frozendict):
raise ValueError(f'Unable to serialize {in_data} to multipart/form-data because it is not a dict of data')
"""
In a multipart/form-data request body, each schema property, or each element of a schema array property,
takes a section in the payload with an internal header as defined by RFC7578. The serialization strategy
for each property of a multipart/form-data request body can be specified in an associated Encoding Object.
When passing in multipart types, boundaries MAY be used to separate sections of the content being
transferred – thus, the following default Content-Types are defined for multipart:
If the (object) property is a primitive, or an array of primitive values, the default Content-Type is text/plain
If the property is complex, or an array of complex values, the default Content-Type is application/json
Question: how is the array of primitives encoded?
| |
-> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = (
"en-tq-wa-col_en-tw-wa-col_sw-tq-col_sw-tw-col_book_language_order.pdf"
)
check_finished_document_with_body_success(response, finished_document_path)
def test_en_tw_wa_col_sw_tw_col_sw_tw_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = "en-tw-wa-col_sw-tw-col_book_language_order.pdf"
check_finished_document_with_body_success(response, finished_document_path)
def test_en_tn_wa_col_en_tq_wa_col_sw_tn_col_sw_tq_col_sw_tn_tit_sw_tq_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
],
},
)
finished_document_path = (
"en-tn-wa-col_en-tq-wa-col_sw-tn-col_sw-tq-col_book_language_order.pdf"
)
check_finished_document_with_body_success(response, finished_document_path)
def test_en_tq_wa_col_sw_tq_col_sw_tq_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
],
},
)
finished_document_path = "en-tq-wa-col_sw-tq-col_book_language_order.pdf"
check_finished_document_with_body_success(response, finished_document_path)
def test_en_tn_wa_col_sw_tn_col_sw_tn_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "tit",
},
],
},
)
finished_document_path = (
"en-tn-wa-col_sw-tn-col_sw-tn-tit_book_language_order.pdf"
)
check_finished_document_with_body_success(response, finished_document_path)
def test_en_ulb_wa_col_sw_ulb_col_sw_ulb_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "tit",
},
],
},
)
finished_document_path = (
"en-ulb-wa-col_sw-ulb-col_sw-ulb-tit_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_gu_ulb_mrk_gu_tn_mrk_gu_tq_mrk_gu_tw_mrk_gu_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "gu",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "gu",
"resource_type": "tn",
"resource_code": "mrk",
},
{
"lang_code": "gu",
"resource_type": "tq",
"resource_code": "mrk",
},
{
"lang_code": "gu",
"resource_type": "tw",
"resource_code": "mrk",
},
{
"lang_code": "gu",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = "gu-ulb-mrk_gu-tn-mrk_gu-tq-mrk_gu-tw-mrk_gu-udb-mrk_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_mr_ulb_mrk_mr_tn_mrk_mr_tq_mrk_mr_tw_mrk_mr_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "mr",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tn",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tq",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tw",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = "mr-ulb-mrk_mr-tn-mrk_mr-tq-mrk_mr-tw-mrk_mr-udb-mrk_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_mr_ulb_mrk_mr_tn_mrk_mr_tq_mrk_mr_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "mr",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tn",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tq",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = (
"mr-ulb-mrk_mr-tn-mrk_mr-tq-mrk_mr-udb-mrk_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_mr_ulb_mrk_mr_tn_mrk_mr_tw_mrk_mr_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "mr",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tn",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tw",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = (
"mr-ulb-mrk_mr-tn-mrk_mr-tw-mrk_mr-udb-mrk_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_mr_ulb_mrk_mr_tn_mrk_mr_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "mr",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tn",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = (
"mr-ulb-mrk_mr-tn-mrk_mr-udb-mrk_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_mr_ulb_mrk_mr_tq_mrk_mr_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "mr",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tq",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = (
"mr-ulb-mrk_mr-tq-mrk_mr-udb-mrk_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
@pytest.mark.skip
def test_gu_ulb_mic_gu_tn_mic_gu_tq_mic_gu_tw_mic_gu_ta_mic_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "gu",
"resource_type": "ulb",
"resource_code": "mic",
},
{
"lang_code": "gu",
"resource_type": "tn",
"resource_code": "mic",
},
{
"lang_code": "gu",
"resource_type": "tq",
"resource_code": "mic",
},
{
"lang_code": "gu",
"resource_type": "tw",
"resource_code": "mic",
},
{
"lang_code": "gu",
"resource_type": "ta",
"resource_code": "mic",
},
],
},
)
finished_document_path = (
"gu-ulb-mic_gu-tn-mic_gu-tq-mic_gu-tw-mic_gu-ta-mic_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_tl_ulb_gen_tl_udb_gen_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "tl",
"resource_type": "ulb",
"resource_code": "gen",
},
{
"lang_code": "tl",
"resource_type": "udb",
"resource_code": "gen",
},
],
},
)
finished_document_path = "tl-ulb-gen_tl-udb-gen_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_gu_tn_mat_gu_tq_mat_gu_tw_mat_gu_udb_mat_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "gu",
"resource_type": "tn",
"resource_code": "mat",
},
{
"lang_code": "gu",
"resource_type": "tq",
"resource_code": "mat",
},
{
"lang_code": "gu",
"resource_type": "tw",
"resource_code": "mat",
},
{
"lang_code": "gu",
"resource_type": "udb",
"resource_code": "mat",
},
],
},
)
finished_document_path = (
"gu-tn-mat_gu-tq-mat_gu-tw-mat_gu-udb-mat_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_gu_tn_mat_gu_tq_mat_gu_udb_mat_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "gu",
"resource_type": "tn",
"resource_code": "mat",
},
{
"lang_code": "gu",
"resource_type": "tq",
"resource_code": "mat",
},
{
"lang_code": "gu",
"resource_type": "udb",
"resource_code": "mat",
},
],
},
)
finished_document_path = (
"gu-tn-mat_gu-tq-mat_gu-udb-mat_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_tl_tn_gen_tl_tw_gen_tl_udb_gen_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "tl",
"resource_type": "tn",
"resource_code": "gen",
},
{
"lang_code": "tl",
"resource_type": "tw",
"resource_code": "gen",
},
{
"lang_code": "tl",
"resource_type": "udb",
"resource_code": "gen",
},
],
},
)
finished_document_path = (
"tl-tn-gen_tl-tw-gen_tl-udb-gen_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_tl_tq_gen_tl_udb_gen_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "tl",
"resource_type": "tq",
"resource_code": "gen",
},
{
"lang_code": "tl",
"resource_type": "udb",
"resource_code": "gen",
},
],
},
)
finished_document_path = "tl-tq-gen_tl-udb-gen_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_tl_tw_gen_tl_udb_gen_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "tl",
"resource_type": "tw",
"resource_code": "gen",
},
{
"lang_code": "tl",
"resource_type": "udb",
"resource_code": "gen",
},
],
},
)
finished_document_path = "tl-tw-gen_tl-udb-gen_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_tl_udb_gen_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "tl",
"resource_type": "udb",
"resource_code": "gen",
},
],
},
)
finished_document_path = "tl-udb-gen_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_fr_ulb_rev_fr_tn_rev_fr_tq_rev_fr_tw_rev_fr_udb_rev_book_language_order() -> None:
"""Demonstrate listing unfound resources, in this case fr-udb-rev"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "fr",
"resource_type": "ulb",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tn",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tq",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tw",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "udb",
"resource_code": "rev",
},
],
},
)
finished_document_path = "fr-ulb-rev_fr-tn-rev_fr-tq-rev_fr-tw-rev_fr-udb-rev_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_fr_ulb_rev_fr_tn_rev_fr_tq_rev_fr_tw_rev_fr_f10_rev_book_language_order() -> None:
"""
Demonstrate two USFM resources, French, and use of a special
USFM resource: f10.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "fr",
"resource_type": "ulb",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tn",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tq",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tw",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "f10",
"resource_code": "rev",
},
],
},
)
finished_document_path = "fr-ulb-rev_fr-tn-rev_fr-tq-rev_fr-tw-rev_fr-f10-rev_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_fr_ulb_rev_fr_tq_rev_fr_tw_rev_fr_f10_rev_book_language_order() -> None:
"""
Demonstrate two USFM resources, French, and use of a special
USFM resource: f10.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = | |
313, 908, 842, 366, 618, 803, 480, 391, 263,
122, 305, 436, 798, 795, 486, 530, 815, 422, 347, 530,
118, 574, 662, 7, 909, 70, 69, 253, 809, 520, 334,
981, 359, 298, 392, 739, 349, 312, 128, 347, 691, 686,
219, 960, 182, 236, 351, 611, 588, 857, 354, 837, 867,
258, 508, 882, 229, 981, 686, 234, 508, 73, 629, 836,
393, 677, 15, 491, 428, 689, 221, 12, 370, 494, 866,
698, 316, 925, 560, 975, 645, 223, 690, 254, 196, 93,
41, 113, 949, 999, 880, 215, 844, 86, 805, 951, 803,
348, 527, 944, 126, 943, 234, 474, 747, 25, 858, 441,
372, 666, 579, 350, 498, 113, 245, 987, 913, 900, 537,
617, 80, 18, 944, 372, 684, 62, 893, 942, 561, 587,
884, 422, 256, 777, 836, 139, 943, 796, 700, 377, 382,
38, 560, 89, 889, 243, 245, 527, 349, 807, 4, 230,
873, 576, 359, 419, 786, 669, 126, 835, 403, 165, 204,
268, 573, 987])
def test_snail_016(self):
self.assertEqual(snail([[665, 175], [31, 103]]), [665, 175, 103, 31])
def test_snail_017(self):
self.assertEqual(snail([[755]]), [755])
def test_snail_018(self):
self.assertEqual(snail([[126]]), [126])
def test_snail_019(self):
self.assertEqual(snail([[636, 479, 441, 159, 593, 904, 31, 21, 198],
[558, 377, 166, 504, 919, 20, 495, 71, 899],
[955, 466, 168, 459, 223, 535, 369, 881, 709],
[814, 54, 762, 941, 804, 810, 498, 583, 828],
[678, 489, 88, 976, 967, 218, 494, 1000, 550],
[501, 310, 668, 403, 558, 697, 247, 393, 990],
[346, 220, 92, 707, 460, 106, 187, 606, 447],
[589, 900, 867, 818, 647, 180, 878, 809, 191],
[278, 820, 427, 859, 985, 594, 218, 851, 286]]),
[636, 479, 441, 159, 593, 904, 31, 21, 198, 899, 709,
828, 550, 990, 447, 191, 286, 851, 218, 594, 985, 859,
427, 820, 278, 589, 346, 501, 678, 814, 955, 558, 377,
166, 504, 919, 20, 495, 71, 881, 583, 1000, 393, 606,
809, 878, 180, 647, 818, 867, 900, 220, 310, 489, 54,
466, 168, 459, 223, 535, 369, 498, 494, 247, 187, 106,
460, 707, 92, 668, 88, 762, 941, 804, 810, 218, 697,
558, 403, 976, 967])
def test_snail_020(self):
self.assertEqual(snail([[34, 174, 567, 523, 884, 681, 348, 879],
[860, 127, 97, 983, 245, 516, 214, 358],
[812, 405, 787, 630, 856, 384, 973, 803],
[452, 925, 253, 481, 678, 517, 246, 855],
[471, 121, 342, 671, 92, 770, 690, 538],
[706, 207, 63, 874, 366, 336, 848, 708],
[771, 637, 708, 977, 977, 3, 562, 324],
[453, 816, 461, 143, 874, 992, 346, 923]]),
[34, 174, 567, 523, 884, 681, 348, 879, 358, 803, 855,
538, 708, 324, 923, 346, 992, 874, 143, 461, 816, 453,
771, 706, 471, 452, 812, 860, 127, 97, 983, 245, 516,
214, 973, 246, 690, 848, 562, 3, 977, 977, 708, 637,
207, 121, 925, 405, 787, 630, 856, 384, 517, 770, 336,
366, 874, 63, 342, 253, 481, 678, 92, 671])
def test_snail_021(self):
self.assertEqual(snail([[950, 222, 988, 710, 321, 798, 51],
[640, 844, 782, 506, 155, 308, 384],
[703, 52, 197, 723, 690, 468, 962],
[326, 195, 134, 216, 302, 503, 212],
[718, 323, 17, 449, 601, 380, 396],
[985, 698, 502, 864, 257, 804, 942],
[888, 418, 187, 880, 152, 432, 651]]),
[950, 222, 988, 710, 321, 798, 51, 384, 962, 212, 396,
942, 651, 432, 152, 880, 187, 418, 888, 985, 718, 326,
703, 640, 844, 782, 506, 155, 308, 468, 503, 380, 804,
257, 864, 502, 698, 323, 195, 52, 197, 723, 690, 302,
601, 449, 17, 134, 216])
def test_snail_022(self):
self.assertEqual(snail([[188, 383, 11, 265, 829, 552, 184, 587, 149,
839, 640, 638, 292, 990],
[523, 992, 378, 958, 526, 735, 753, 216, 781,
183, 273, 433, 458, 900],
[645, 764, 450, 273, 769, 871, 125, 983, 864,
318, 160, 300, 677, 990],
[245, 169, 676, 300, 81, 19, 481, 549, 922, 13,
798, 37, 785, 831],
[202, 912, 399, 946, 877, 577, 211, 149, 515, 7,
783, 194, 903, 458],
[241, 530, 605, 143, 110, 318, 450, 365, 300,
901, 863, 973, 997, 46],
[217, 471, 358, 537, 270, 529, 512, 306, 402,
11, 275, 228, 737, 751],
[231, 344, 693, 847, 723, 898, 87, 700, 558,
116, 927, 425, 220, 505],
[119, 851, 664, 891, 32, 670, 224, 37, 428, 45,
679, 170, 522, 181],
[506, 264, 274, 87, 567, 324, 203, 715, 628,
288, 836, 353, 367, 458],
[377, 859, 308, 788, 792, 211, 738, 314, 972,
557, 583, 789, 132, 271],
[483, 158, 749, 560, 743, 592, 710, 442, 650,
896, 323, 221, 309, 299],
[858, 549, 118, 588, 674, 975, 799, 910, 465,
453, 139, 448, 537, 680],
[713, 851, 964, 542, 64, 296, 923, 440, 225,
479, 744, 119, 144, 399]]),
[188, 383, 11, 265, 829, 552, 184, 587, 149, 839, 640,
638, 292, 990, 900, 990, 831, 458, 46, 751, 505, 181,
458, 271, 299, 680, 399, 144, 119, 744, 479, 225, 440,
923, 296, 64, 542, 964, 851, 713, 858, 483, 377, 506,
119, 231, 217, 241, 202, 245, 645, 523, 992, 378, 958,
526, 735, 753, 216, 781, 183, 273, 433, 458, 677, 785,
903, 997, 737, 220, 522, 367, 132, 309, 537, 448, 139,
453, 465, 910, 799, 975, 674, 588, 118, 549, 158, 859,
264, 851, 344, 471, 530, 912, 169, 764, 450, 273, 769,
871, 125, 983, 864, 318, 160, 300, 37, 194, 973, 228,
425, 170, 353, 789, 221, 323, 896, 650, 442, 710, 592,
743, 560, 749, 308, 274, 664, 693, 358, 605, 399, 676,
300, 81, 19, 481, 549, 922, 13, 798, 783, 863, 275,
927, 679, 836, 583, 557, 972, 314, 738, 211, 792, 788,
87, 891, 847, 537, 143, 946, 877, 577, 211, 149, 515,
7, 901, 11, 116, 45, 288, 628, 715, 203, 324, 567, 32,
723, 270, 110, 318, 450, 365, 300, 402, 558, 428, 37,
224, 670, 898, 529, 512, 306, 700, 87])
def test_snail_023(self):
self.assertEqual(snail([[903, 852, 365, 142, 106, 848, 913, 461, 732,
281, 800, 952, 711, 122],
[805, 299, 188, 853, 984, 79, 432, 280, 510,
925, 155, 124, 736, 567],
[793, 219, 758, 522, 833, 232, 24, 494, 164,
365, 205, 548, 145, 603],
[711, 113, 979, 976, 706, 457, 185, 895, 310,
106, 142, 270, 209, 577],
[866, 160, 28, 737, 871, 900, 799, 516, 203,
294, 45, 256, 242, 397],
[901, 606, 892, 620, 61, 398, 300, 14, 365, 616,
230, 82, 352, 98],
[441, 320, 684, 572, 254, 331, 401, 375, 970,
223, 65, 26, 167, 858],
[915, 104, 113, 774, 436, 832, 181, 939, 238,
90, 67, 227, 426, 55],
[846, 135, 332, 105, 110, 301, 794, 431, 860,
715, 201, 69, 744, 657],
[341, 691, 666, 61, 827, 814, 82, 276, 274, 888,
738, 387, 429, 69],
[706, 204, 421, 382, 258, 466, 97, 189, 893,
523, 910, 633, 510, 351],
[560, 109, 533, 541, 825, 571, 608, 542, 92,
385, 694, 762, 465, 620],
[369, 509, 928, 286, 860, 142, 4, 926, 657, 697,
743, 858, 430, 638],
[812, 243, 974, 854, 283, 573, 121, 48, 71, 536,
561, 687, 375, 884]]),
[903, 852, 365, 142, 106, 848, 913, 461, 732, 281, 800,
952, 711, 122, 567, 603, 577, 397, 98, 858, 55, 657,
69, 351, 620, 638, 884, 375, 687, 561, 536, 71, 48,
121, 573, 283, 854, 974, 243, 812, 369, 560, 706, 341,
846, 915, 441, 901, 866, 711, 793, 805, 299, 188, 853,
984, 79, 432, 280, 510, 925, 155, 124, 736, 145, 209,
242, 352, 167, 426, 744, 429, 510, 465, 430, 858, 743,
697, 657, 926, 4, 142, 860, 286, 928, 509, 109, 204,
691, 135, 104, 320, 606, 160, 113, 219, 758, 522, 833,
232, 24, 494, | |
def check_hostname(self):
return self._check_hostname
@check_hostname.setter
def check_hostname(self, value):
check_hostname = bool(value)
if check_hostname and lib.SSL_CTX_get_verify_mode(self.ctx) == lib.SSL_VERIFY_NONE:
self._set_verify_mode(CERT_REQUIRED)
self._check_hostname = check_hostname
@property
def _host_flags(self):
return self.hostflags
@_host_flags.setter
def _host_flags(self, arg):
new_flags = int(arg)
param = lib.SSL_CTX_get0_param(self.ctx);
self.hostflags = new_flags;
lib.X509_VERIFY_PARAM_set_hostflags(param, new_flags)
def set_ciphers(self, cipherlist):
cipherlistbuf = _str_to_ffi_buffer(cipherlist)
ret = lib.SSL_CTX_set_cipher_list(self.ctx, cipherlistbuf)
if ret == 0:
# Clearing the error queue is necessary on some OpenSSL
# versions, otherwise the error will be reported again
# when another SSL call is done.
lib.ERR_clear_error()
raise ssl_error("No cipher can be selected.")
def get_ciphers(self):
ssl = lib.SSL_new(self.ctx)
try:
ciphers = lib.SSL_get_ciphers(ssl)
if ciphers == ffi.NULL:
return None
count = lib.sk_SSL_CIPHER_num(ciphers)
res = [None] * count
for i in range(count):
dct = cipher_to_dict(lib.sk_SSL_CIPHER_value(ciphers, i))
res[i] = dct
return res
finally:
lib.SSL_free(ssl)
def load_cert_chain(self, certfile, keyfile=None, password=None):
if keyfile is None:
keyfile = certfile
pw_info = PasswordInfo()
index = -1
orig_passwd_cb = lib.SSL_CTX_get_default_passwd_cb(self.ctx)
orig_passwd_userdata = lib.SSL_CTX_get_default_passwd_cb_userdata(self.ctx)
if password is not None:
if callable(password):
pw_info.callable = password
else:
if isinstance(password, (str, bytes, bytearray)):
pw_info.password = password
else:
raise TypeError("password should be a string or callable")
pw_info.handle = ffi.new_handle(pw_info)
index = _thread.get_ident()
PWINFO_STORAGE[index] = pw_info
lib.SSL_CTX_set_default_passwd_cb(self.ctx, Cryptography_pem_password_cb)
lib.SSL_CTX_set_default_passwd_cb_userdata(self.ctx, pw_info.handle)
prev_errno = ffi.errno
try:
ffi.errno = 0
certfilebuf = _str_to_ffi_buffer(certfile)
ret = lib.SSL_CTX_use_certificate_chain_file(self.ctx, certfilebuf)
if ret != 1:
if pw_info.operationerror:
lib.ERR_clear_error()
raise pw_info.operationerror
_errno = ffi.errno
if _errno:
lib.ERR_clear_error()
raise OSError(_errno, "Error")
else:
raise ssl_error(None)
ffi.errno = 0
buf = _str_to_ffi_buffer(keyfile)
ret = lib.SSL_CTX_use_PrivateKey_file(self.ctx, buf,
lib.SSL_FILETYPE_PEM)
if ret != 1:
if pw_info.operationerror:
lib.ERR_clear_error()
raise pw_info.operationerror
_errno = ffi.errno
if _errno:
lib.ERR_clear_error()
raise OSError(_errno, None)
else:
raise ssl_error(None)
ret = lib.SSL_CTX_check_private_key(self.ctx)
if ret != 1:
raise ssl_error(None)
finally:
ffi.errno = prev_errno
if index >= 0:
del PWINFO_STORAGE[index]
lib.SSL_CTX_set_default_passwd_cb(self.ctx, orig_passwd_cb)
lib.SSL_CTX_set_default_passwd_cb_userdata(self.ctx, orig_passwd_userdata)
def _wrap_socket(self, sock, server_side, server_hostname=None, *,
owner=None, session=None):
if server_hostname:
server_hostname = server_hostname.encode('ascii')
return _SSLSocket._new__ssl_socket(self, sock, server_side,
server_hostname, owner, session, None, None)
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
prev_errno = ffi.errno
try:
ffi.errno = 0
if cadata is None:
ca_file_type = -1
else:
if not isinstance(cadata, str):
ca_file_type = lib.SSL_FILETYPE_ASN1
else:
ca_file_type = lib.SSL_FILETYPE_PEM
try:
cadata = cadata.encode('ascii')
except UnicodeEncodeError:
raise TypeError("cadata should be a ASCII string or a bytes-like object")
if cafile is None and capath is None and cadata is None:
raise TypeError("cafile and capath cannot be both omitted")
# load from cadata
if cadata is not None:
buf = _str_to_ffi_buffer(cadata)
self._add_ca_certs(buf, len(buf), ca_file_type)
# load cafile or capath
if cafile is not None or capath is not None:
if cafile is None:
cafilebuf = ffi.NULL
else:
cafilebuf = _str_to_ffi_buffer(cafile)
if capath is None:
capathbuf = ffi.NULL
else:
capathbuf = _str_to_ffi_buffer(capath)
ret = lib.SSL_CTX_load_verify_locations(self.ctx, cafilebuf, capathbuf)
if ret != 1:
_errno = ffi.errno
if _errno:
lib.ERR_clear_error()
raise OSError(_errno, '')
else:
raise ssl_error(None)
finally:
ffi.errno = prev_errno
def _add_ca_certs(self, data, size, ca_file_type):
biobuf = lib.BIO_new_mem_buf(data, size)
if biobuf == ffi.NULL:
raise ssl_error("Can't allocate buffer")
try:
store = lib.SSL_CTX_get_cert_store(self.ctx)
loaded = 0
while True:
if ca_file_type == lib.SSL_FILETYPE_ASN1:
cert = lib.d2i_X509_bio(biobuf, ffi.NULL)
else:
cert = lib.PEM_read_bio_X509(biobuf, ffi.NULL,
lib.SSL_CTX_get_default_passwd_cb(self.ctx),
lib.SSL_CTX_get_default_passwd_cb_userdata(self.ctx),
)
if not cert:
break
try:
r = lib.X509_STORE_add_cert(store, cert)
finally:
lib.X509_free(cert)
if not r:
err = lib.ERR_peek_last_error()
if (lib.ERR_GET_LIB(err) == lib.ERR_LIB_X509 and
lib.ERR_GET_REASON(err) ==
lib.X509_R_CERT_ALREADY_IN_HASH_TABLE):
# cert already in hash table, not an error
lib.ERR_clear_error()
else:
break
loaded += 1
err = lib.ERR_peek_last_error()
if loaded == 0:
if ca_file_type == lib.SSL_FILETYPE_PEM:
msg = "no start line: cadata does not contain a certificate"
else:
msg = "not enough data: cadata does not contain a certificate";
raise ssl_error(msg)
elif (ca_file_type == lib.SSL_FILETYPE_ASN1 and
loaded > 0 and
lib.ERR_GET_LIB(err) == lib.ERR_LIB_ASN1 and
lib.ERR_GET_REASON(err) == lib.ASN1_R_HEADER_TOO_LONG):
# EOF ASN1 file, not an error
lib.ERR_clear_error()
elif (ca_file_type == lib.SSL_FILETYPE_PEM and
lib.ERR_GET_LIB(err) == lib.ERR_LIB_PEM and
lib.ERR_GET_REASON(err) == lib.PEM_R_NO_START_LINE):
# EOF PEM file, not an error
lib.ERR_clear_error()
elif err != 0:
raise ssl_error(None)
finally:
lib.BIO_free(biobuf)
@property
def sni_callback(self):
r"""Set a callback that will be called when a server name is
provided by the SSL/TLS client in the SNI extension.
If the argument is None then the callback is disabled. The method
is called with the SSLSocket, the server name as a string, and the
SSLContext object. See RFC 6066 for details of the SNI
extension.
"""
return self._sni_cb
@sni_callback.setter
def sni_callback(self, cb):
if self._protocol == PROTOCOL_TLS_CLIENT:
raise ValueError('sni_callback cannot be set on TLS_CLIENT context')
if not HAS_SNI:
raise NotImplementedError("The TLS extension servername callback, "
"SSL_CTX_set_tlsext_servername_callback, "
"is not in the current OpenSSL library.")
if cb is None:
lib.SSL_CTX_set_tlsext_servername_callback(self.ctx, ffi.NULL)
self._sni_cb = None
lib.SSL_CTX_set_tlsext_servername_arg(self.ctx, ffi.NULL)
self._sni_cb_handle = None
return
if not callable(cb):
lib.SSL_CTX_set_tlsext_servername_callback(self.ctx, ffi.NULL)
raise TypeError("not a callable object")
self._sni_cb = GenericCallback(cb, self)
self._sni_cb_handle = sni_cb = ffi.new_handle(self._sni_cb)
lib.SSL_CTX_set_tlsext_servername_callback(self.ctx, _servername_callback)
lib.SSL_CTX_set_tlsext_servername_arg(self.ctx, sni_cb)
@property
def _msg_callback(self):
return self._msg_cb
@_msg_callback.setter
def _msg_callback(self, arg, userdata=None):
# userdata is unused
if arg is None:
lib.SSL_CTX_set_msg_callback(self.ctx, ffi.NULL)
self._msg_cb = None
if not callable(arg):
lib.SSL_CTX_set_msg_callback(self.ctx, ffi.NULL)
self._msg_cb = None
raise TypeError('not a callable object')
self._msg_cb = arg
lib.SSL_CTX_set_msg_callback(self.ctx, _msg_callback)
def cert_store_stats(self):
store = lib.SSL_CTX_get_cert_store(self.ctx)
x509 = 0
x509_ca = 0
crl = 0
objs = lib.X509_STORE_get0_objects(store)
count = lib.sk_X509_OBJECT_num(objs)
for i in range(count):
obj = lib.sk_X509_OBJECT_value(objs, i)
_type = lib.X509_OBJECT_get_type(obj)
if _type == lib.X509_LU_X509:
x509 += 1
cert = lib.X509_OBJECT_get0_X509(obj)
if lib.X509_check_ca(cert):
x509_ca += 1
elif _type == lib.X509_LU_CRL:
crl += 1
else:
# Ignore X509_LU_FAIL, X509_LU_RETRY, X509_LU_PKEY.
# As far as I can tell they are internal states and never
# stored in a cert store
pass
return {'x509': x509, 'x509_ca': x509_ca, 'crl': crl}
def session_stats(self):
stats = {}
for name, ssl_func in SSL_CTX_STATS:
stats[name] = ssl_func(self.ctx)
return stats
def set_default_verify_paths(self):
if (not os.environ.get('SSL_CERT_FILE') and
not os.environ.get('SSL_CERT_DIR') and
not sys.platform == 'win32'):
locations = get_default_verify_paths()
self.load_verify_locations(locations[1], locations[3])
return
if not lib.SSL_CTX_set_default_verify_paths(self.ctx):
raise ssl_error(None)
def load_dh_params(self, filepath):
sys.audit("open", filepath, 'rb', 0)
prev_errno = ffi.errno
try:
ffi.errno = 0
if filepath is None:
raise TypeError("filepath must not be None")
buf = _fs_converter(filepath)
mode = ffi.new("char[]",b"rb")
ffi.errno = 0
bio = lib.BIO_new_file(buf, mode)
if bio == ffi.NULL:
_errno = ffi.errno
lib.ERR_clear_error()
raise OSError(_errno, '')
try:
dh = lib.PEM_read_bio_DHparams(bio, ffi.NULL, ffi.NULL, ffi.NULL)
finally:
lib.BIO_free(bio)
if dh == ffi.NULL:
_errno = ffi.errno
if _errno != 0:
lib.ERR_clear_error()
raise OSError(_errno, '')
else:
raise ssl_error(None)
try:
if lib.SSL_CTX_set_tmp_dh(self.ctx, dh) == 0:
raise ssl_error(None)
finally:
lib.DH_free(dh)
finally:
ffi.errno = prev_errno
def get_ca_certs(self, binary_form=None):
binary_mode = bool(binary_form)
_list = []
store = lib.SSL_CTX_get_cert_store(self.ctx)
objs = lib.X509_STORE_get0_objects(store)
count = lib.sk_X509_OBJECT_num(objs)
for i in range(count):
obj = lib.sk_X509_OBJECT_value(objs, i)
_type = lib.X509_OBJECT_get_type(obj)
if _type != lib.X509_LU_X509:
# not a x509 cert
continue
# CA for any purpose
cert = lib.X509_OBJECT_get0_X509(obj)
if not lib.X509_check_ca(cert):
continue
if binary_mode:
_list.append(_certificate_to_der(cert))
else:
_list.append(_decode_certificate(cert))
return _list
def set_ecdh_curve(self, name):
# needs to be zero terminated
if name is None:
raise TypeError()
buf = _fs_converter(name)
nid = lib.OBJ_sn2nid(buf)
if nid == 0:
raise ValueError("unknown elliptic curve name '%s'" % name)
key = lib.EC_KEY_new_by_curve_name(nid)
if not key:
raise ssl_error(None)
try:
lib.SSL_CTX_set_tmp_ecdh(self.ctx, key)
finally:
lib.EC_KEY_free(key)
def _set_alpn_protocols(self, protos):
if HAS_ALPN:
self.alpn_protocols = protocols = ffi.from_buffer(protos)
length = len(protocols)
if lib.SSL_CTX_set_alpn_protos(self.ctx,ffi.cast("unsigned char*", protocols), length):
return MemoryError()
self._alpn_protocols_handle = handle = ffi.new_handle(self)
lib.SSL_CTX_set_alpn_select_cb(self.ctx, select_alpn_callback, handle)
else:
raise NotImplementedError("The ALPN extension requires OpenSSL 1.0.2 or later.")
def _set_npn_protocols(self, protos):
if HAS_NPN:
self.npn_protocols = ffi.from_buffer(protos)
handle = ffi.new_handle(self)
self._npn_protocols_handle = handle # track a reference to the handle
lib.SSL_CTX_set_next_protos_advertised_cb(self.ctx, advertise_npn_callback, handle)
lib.SSL_CTX_set_next_proto_select_cb(self.ctx, select_npn_callback, handle)
else:
raise NotImplementedError("The NPN extension requires OpenSSL 1.0.1 or later.")
def _wrap_bio(self, incoming, outgoing, server_side, server_hostname, *,
owner=None, session=None):
# server_hostname is either None (or absent), or to be encoded
# using the ascii encoding.
hostname = None
if server_hostname is not None:
hostname = server_hostname.encode("ascii")
sock = _SSLSocket._new__ssl_socket(
self, None, server_side, hostname, owner, session, incoming,
outgoing)
return sock
@property
def post_handshake_auth(self):
if HAS_TLSv1_3:
return bool(self._post_handshake_auth)
return None
@post_handshake_auth.setter
def post_handshake_auth(self, arg):
if arg is None:
raise AttributeError("cannot delete attribute")
pha = int(bool(arg))
self._post_handshake_auth = pha
return 0;
# cryptography constraint: OPENSSL_NO_TLSEXT will never be set!
if HAS_SNI:
@ffi.callback("int(SSL*,int*,void*)")
def _servername_callback(s, al, arg):
scb | |
<gh_stars>1-10
# IDLEX EXTENSION
## """
## Copyright(C) 2011 The Board of Trustees of the University of Illinois.
## All rights reserved.
##
## Developed by: <NAME>
## University of Illinois
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal with the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
##
## + Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimers.
## + Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimers in the
## documentation and/or other materials provided with the distribution.
## + Neither the names of <NAME>, the University of Illinois, nor
## the names of its contributors may be used to endorse or promote
## products derived from this Software without specific prior written
## permission.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR
## ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
## CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
## THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
##
##
##
## Tabbed Editor Window Extension - provide tabs in IDLE's editor
##
## About:
##
## This extenion is a gross hack on the object system of IDLE.
## The first EditorWindow instance gets configured as a TabManager
## and subsequent EditorWindow instances use a duck-typed Frame instead
## of a toplevel Tk object.
##
## The tab bar itself works best under Linux. Under MacOSX, the buttons
## are misplaced. Under Windows, the scroll wheel doesn't move the tabs.
##
## """
config_extension_def = """
[TabExtension]
enable=1
enable_shell = 0
always_show = False
[TabExtension_cfgBindings]
tab-new-event=<Control-Key-t>
"""
import sys
if sys.version < '3':
from Tkinter import *
import tkMessageBox
else:
from tkinter import *
import tkinter.messagebox as tkMessageBox
import idlelib.EditorWindow as EditorWindow
import idlelib.WindowList as WindowList
from idlelib.ToolTip import ToolTipBase
import idlelib.ToolTip as ToolTip
import idlelib.FileList as FileList
import idlelib.Bindings as Bindings
from idlelib.configHandler import idleConf
from platform import python_version
TAB_BAR_SIDE = 'top' # 'bottom'
WARN_MULTIPLE_TAB_CLOSING = True
def get_cfg(cfg, type="bool", default=True):
return idleConf.GetOption("extensions", "TabExtension",
cfg, type=type, default=default)
def set_cfg(cfg, b):
return idleConf.SetOption("extensions", "TabExtension",
cfg,'%s' % b)
class TabExtension(object):
menudefs = [
('options', [
('!Always Show Tabs', '<<tab-show-event>>'),
]),]
def __init__(self, editwin):
# This is called from a unique "EditorWindow" instance, with its own set of menu/text widgets
self.editwin = editwin
# add "New Tab to file menu
self.add_menu_entry()
# monkey-patching the call backs to get updates to filename into tab bar
editwin.undo.set_saved_change_hook(self.saved_change_hook)
def updaterecentfileslist(x):
editwin.update_recent_files_list(x)
self.saved_change_hook() # to reflect opened file names in tab bar
editwin.io.updaterecentfileslist = updaterecentfileslist
text = self.editwin.text
text.bind('<<tab-new-event>>', self.tab_new_event)
text.bind('<<close-window>>', self.close_window_event)
self.editwin.setvar("<<tab-show-event>>", get_cfg("always_show"))
if 'TAB_MANAGER' in dir(editwin.top):
# clone the tab master pointers
self.TAB_FRAME = editwin.top # containing widget
self.tabmanager = editwin.top.TAB_MANAGER
self.button = self.add_tab_button()
editwin.top.TAB_MANAGER = None # break reference, no longer needed
editwin.top.wakeup = self.wakeup
self.button.select()
return
# INITIALIZE THE FIRST TAB MANAGER
text.bind('<<tab-show-event>>', self.toggle_show)
flist = self.editwin.flist
self.tabmanager = tabmanager = TabManager(top=self.editwin.top, tab=self, flist=flist)
tabmanager.ACTIVE = self
# REPACK the EditorWindow widget contents into a Frame
TOPLEVEL = self.editwin.top
F = tabmanager.create_frame()
F.wakeup = self.wakeup
for elt in TOPLEVEL.pack_slaves():
p = elt.pack_info()
p['in'] = F
elt.pack(**p)
F.pack(side='top', fill=BOTH, expand=YES)
F._lower() # fix Z-order
# TODO: repack all grid and place widgets
self.TAB_FRAME = F # reference to container frame
editwin.top = F
self.button = self.add_tab_button() # populate tab bar
TOPLEVEL.after_idle(self.editwin.postwindowsmenu) # need to change menu
def add_menu_entry(self):
# patch "New Tab" into the File Menu
e = self.editwin
f = e.menudict['file']
text = e.text
eventname = '<<tab-new-event>>'
def command(text=text, eventname=eventname):
text.event_generate(eventname)
keydefs = Bindings.default_keydefs
accelerator = EditorWindow.get_accelerator(keydefs, eventname)
f.insert_command(2, label="New Tab", command=command,
accelerator=accelerator)
def toggle_show(self, ev=None):
self.always_show = not get_cfg("always_show")
set_cfg("always_show", self.always_show)
self.editwin.setvar("<<tab-show-event>>", self.always_show)
self.tabmanager.visible_bar()
def wakeup(self):
return self.button.select()
def select(self, event=None):
return self.tabmanager.tab_wakeup(tabframe=self)
def closetab(self, event=None):
return self.tabmanager.close_tab(tabframe=self)
def add_tab_button(self):
b = self.tabmanager.addtab(tabframe=self)
#self.tooltip = ToolTip.ToolTip(b, self.get_filepath())
self.tooltip = TabToolTip(b, self.get_filepath)
return b
def tab_new_event(self, event=None):
self.tabmanager.newtab()
return "break"
def saved_change_hook(self):
self.editwin.saved_change_hook()
self.button.set_text(self.get_title())
self.tooltip.text = self.get_filepath()
def save_stars(self, txt):
""" wrap strings with ** if it refers to a window that's not saved"""
if not self.editwin.get_saved():
txt = "*%s*" % txt
return txt
def get_filepath(self, event=None):
f = self.editwin.long_title()
if not f:
f = 'Untitled'
return self.save_stars(f)
def get_title(self, event=None):
short = self.editwin.short_title()
# remove the Python version...
if short:
try:
pyversion = "Python " + python_version() + ": "
if short.startswith(pyversion):
short = short[len(pyversion):]
except:
pass
if not short:
short = "Untitled"
return self.save_stars(short)
def close(self):
#print 'unloading tabextension.py'
self.editwin = None
self.TAB_FRAME = None
self.tooltip = None
def close_window_event(self, event=None):
""" Redirect to close the current tab """
self.button.remove()
return "break"
class TabToolTip(ToolTipBase):
def __init__(self, button, text_callback):
ToolTipBase.__init__(self, button)
self.text_callback = text_callback
def showcontents(self):
try:
text = self.text_callback()
except:
text = ''
ToolTipBase.showcontents(self, text)
def schedule(self):
self.unschedule()
self.id = self.button.after(500, self.showtip)
def showtip(self):
# make sure tip is on the screen
ToolTipBase.showtip(self)
tipwindow = self.tipwindow
tipwindow.update_idletasks()
sw = tipwindow.winfo_screenwidth()
tw = tipwindow.winfo_width()
tx = tipwindow.winfo_x()
ty = tipwindow.winfo_y()
delta = tw + tx - sw
if delta > 0:
# must shift the tipwindow to the left by delta
dx = tx - delta
tipwindow.wm_geometry('+%d+%d' % (dx, ty))
class TabManagerList(object): # for window list
def __init__(self):
self.clients = []
self.ACTIVE = None
self.orig_LTL = WindowList.ListedToplevel # save original
def get_frame(self):
if self.ACTIVE is not None:
F = self.ACTIVE.create_frame()
else:
if self.clients:
F = self.clients[0].create_frame()
else:
F = None # should not happen
return F
def set_active(self, t):
if t in self.clients:
self.ACTIVE = t
self.postwindowsmenu()
else:
pass
def postwindowsmenu(self, event=None):
# FIXME: what does this do again?
for t in self.clients:
if t.active_frame.editwin is not None:
t.active_frame.editwin.postwindowsmenu()
else:
print('null editwin:', t, t.active_frame)
def add(self, m):
TOPLEVEL = m.TOPLEVEL
def change(event=None, m=m):
tabmanagerlist.set_active(m)
TOPLEVEL.bind('<FocusIn>', change, '+')
self.clients.append(m)
def change_manager(self, event=None):
self.set_active(self)
def remove(self, m):
if m is self.ACTIVE:
self.ACTIVE = None
self.clients.remove(m)
tabmanagerlist = TabManagerList() # This is a stand-in object for ListedTopLevel in WindowList
# MONKEY PATCH - temporarily replace the ListedTopLevel with a Frame
# object in the current TabManager window
def patch(func):
def n(*arg, **kw):
if tabmanagerlist.ACTIVE is not None: # are there any toplevel windows?
orig = WindowList.ListedToplevel # save original
def open_patch(*arg, **kw):
return tabmanagerlist.get_frame()
WindowList.ListedToplevel = open_patch # patch it
retval = func(*arg, **kw) # call function
WindowList.ListedToplevel = orig # restore it
return retval
else:
return func(*arg, **kw) # call original function
return n
FileList.FileList.open = patch(FileList.FileList.open)
class TabManager(object): # for handling an instance of ListedTopLevel
def __init__(self, top=None, tab=None, flist=None):
self.flist = flist
TOPLEVEL = self.TOPLEVEL = top
self.TABS = []
self.CLOSE_FRAME = None
self.active_frame = tab
TOPLEVEL.protocol("WM_DELETE_WINDOW", self.closetoplevel)
TOPLEVEL.bind('<<tab-show-event>>', self.visible_bar)
# create a tab bar widget
tab_bar = self.tab_bar = TabWidget(self.TOPLEVEL)
tab_bar.config(height=7, relief=GROOVE, bd=1)
tab_bar.bind('<Button-3>', lambda x: self.tabmenu(event=x))
tabmanagerlist.add(self)
def create_frame(self):
# make a FRAME for holding the editors,
# duck-typing to mimic a Toplevel object
TOPLEVEL = self.TOPLEVEL
F = Frame(TOPLEVEL)
F.state = lambda: "normal"
F.wm_geometry = TOPLEVEL.wm_geometry
F.protocol = lambda *args, **kwargs: True # override protocol requests
F.wakeup = None # will be overwritten by TabExtension
F.wm_title = TOPLEVEL.wm_title # pass-thru
F.wm_iconname = TOPLEVEL.wm_iconname # pass-thru
F.TAB_MANAGER = self # INDICATOR
F._lower = F.lower
F._lift = F.lift
F.lift = TOPLEVEL.lift
F.lower = TOPLEVEL.lower
F.instance_dict = TOPLEVEL.instance_dict
F.update_windowlist_registry = TOPLEVEL.update_windowlist_registry
F.iconbitmap = TOPLEVEL.iconbitmap
return F
def newtab(self):
patch(self.flist.new)()
def addtab(self, tabframe=None):
tab_bar = self.tab_bar
b = tab_bar.add(text=tabframe.get_title(),
select_callback=tabframe.select,
remove_callback=tabframe.closetab)
def mb(event=None, tabframe=tabframe):
self.tabmenu(event=event, tabframe=tabframe)
b.totalbind('<Button-3>', mb)
self.TABS.append(tabframe)
self.visible_bar()
return b
def tabmenu(self, event=None, tabframe=None):
rmenu = Menu(self.TOPLEVEL, tearoff=0)
if tabframe is not None:
rmenu.add_command(label='Close tab', command=tabframe.button.remove)
rmenu.add_separator()
rmenu.add_command(label='New tab', command=tabframe.tab_new_event)
rmenu.add_separator()
for t in self.TABS:
label = t.get_title()
rmenu.add_command(label=label, command=t.button.select)
rmenu.tk_popup(event.x_root, event.y_root)
def visible_bar(self, ev=None):
a = | |
"""
return _casadi.IM_set(self, *args)
def get_nz(self, *args):
"""
get_nz(self, bool ind1, Slice k) -> IM
get_nz(self, bool ind1, IM k) -> IM
"""
return _casadi.IM_get_nz(self, *args)
def set_nz(self, *args):
"""
set_nz(self, IM m, bool ind1, Slice k)
set_nz(self, IM m, bool ind1, IM k)
"""
return _casadi.IM_set_nz(self, *args)
def __pos__(self, *args):
"""
__pos__(self) -> IM
"""
return _casadi.IM___pos__(self, *args)
def __neg__(self, *args):
"""
__neg__(self) -> IM
"""
return _casadi.IM___neg__(self, *args)
def binary(*args):
"""
binary(int op, IM x, IM y) -> IM
"""
return _casadi.IM_binary(*args)
binary = staticmethod(binary)
def unary(*args):
"""
unary(int op, IM x) -> IM
"""
return _casadi.IM_unary(*args)
unary = staticmethod(unary)
def scalar_matrix(*args):
"""
scalar_matrix(int op, IM x, IM y) -> IM
"""
return _casadi.IM_scalar_matrix(*args)
scalar_matrix = staticmethod(scalar_matrix)
def matrix_scalar(*args):
"""
matrix_scalar(int op, IM x, IM y) -> IM
"""
return _casadi.IM_matrix_scalar(*args)
matrix_scalar = staticmethod(matrix_scalar)
def matrix_matrix(*args):
"""
matrix_matrix(int op, IM x, IM y) -> IM
"""
return _casadi.IM_matrix_matrix(*args)
matrix_matrix = staticmethod(matrix_matrix)
def printme(self, *args):
"""
printme(self, IM y) -> IM
"""
return _casadi.IM_printme(self, *args)
def set_max_depth(*args):
"""
set_max_depth(int eq_depth)
"""
return _casadi.IM_set_max_depth(*args)
set_max_depth = staticmethod(set_max_depth)
def get_max_depth(*args):
"""
get_max_depth() -> int
"""
return _casadi.IM_get_max_depth(*args)
get_max_depth = staticmethod(get_max_depth)
def get_input(*args):
"""
get_input(Function f) -> std::vector< casadi::Matrix< long long >,std::allocator< casadi::Matrix< casadi_int > > >
"""
return _casadi.IM_get_input(*args)
get_input = staticmethod(get_input)
def get_free(*args):
"""
get_free(Function f) -> std::vector< casadi::Matrix< long long >,std::allocator< casadi::Matrix< casadi_int > > >
"""
return _casadi.IM_get_free(*args)
get_free = staticmethod(get_free)
def type_name(*args):
"""
type_name() -> str
"""
return _casadi.IM_type_name(*args)
type_name = staticmethod(type_name)
def print_split(self, *args):
"""
print_split(self) -> ([str] OUTPUT, [str] OUTPUT)
"""
return _casadi.IM_print_split(self, *args)
def disp(self, *args):
"""
Print a representation of the object.
disp(self, bool more)
"""
return _casadi.IM_disp(self, *args)
def str(self, *args):
"""
Get string representation.
str(self, bool more) -> str
"""
return _casadi.IM_str(self, *args)
def print_scalar(self, *args):
"""
Print scalar.
print_scalar(self)
"""
return _casadi.IM_print_scalar(self, *args)
def print_vector(self, *args):
"""
Print vector-style.
print_vector(self, bool truncate)
"""
return _casadi.IM_print_vector(self, *args)
def print_dense(self, *args):
"""
Print dense matrix-stype.
print_dense(self, bool truncate)
"""
return _casadi.IM_print_dense(self, *args)
def print_sparse(self, *args):
"""
Print sparse matrix style.
print_sparse(self, bool truncate)
"""
return _casadi.IM_print_sparse(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _casadi.IM_clear(self, *args)
def resize(self, *args):
"""
resize(self, int nrow, int ncol)
"""
return _casadi.IM_resize(self, *args)
def reserve(self, *args):
"""
reserve(self, int nnz)
reserve(self, int nnz, int ncol)
"""
return _casadi.IM_reserve(self, *args)
def erase(self, *args):
"""
Erase a submatrix (leaving structural zeros in its place) Erase elements of
erase(self, [int] rr, bool ind1)
erase(self, [int] rr, [int] cc, bool ind1)
Erase a submatrix (leaving structural zeros in its place) Erase rows and/or
a matrix.
> erase(self, [int] rr, bool ind1)
------------------------------------------------------------------------
Erase a submatrix (leaving structural zeros in its place) Erase elements of
a matrix.
> erase(self, [int] rr, [int] cc, bool ind1)
------------------------------------------------------------------------
Erase a submatrix (leaving structural zeros in its place) Erase rows and/or
columns of a matrix.
"""
return _casadi.IM_erase(self, *args)
def remove(self, *args):
"""
Remove columns and rows Remove/delete rows and/or columns of a matrix.
remove(self, [int] rr, [int] cc)
"""
return _casadi.IM_remove(self, *args)
def enlarge(self, *args):
"""
Enlarge matrix Make the matrix larger by inserting empty rows and columns,
enlarge(self, int nrow, int ncol, [int] rr, [int] cc, bool ind1)
keeping the existing non-zeros.
"""
return _casadi.IM_enlarge(self, *args)
def sparsity(self, *args):
"""
Get an owning reference to the sparsity pattern.
sparsity(self) -> Sparsity
"""
return _casadi.IM_sparsity(self, *args)
def triplet(*args):
"""
triplet([int] row, [int] col, IM d) -> IM
triplet([int] row, [int] col, IM d, (int,int) rc) -> IM
triplet([int] row, [int] col, IM d, int nrow, int ncol) -> IM
"""
return _casadi.IM_triplet(*args)
triplet = staticmethod(triplet)
def inf(*args):
"""
create a matrix with all inf
inf(int nrow, int ncol) -> IM
inf((int,int) rc) -> IM
inf(Sparsity sp) -> IM
"""
return _casadi.IM_inf(*args)
inf = staticmethod(inf)
def nan(*args):
"""
create a matrix with all nan
nan(int nrow, int ncol) -> IM
nan((int,int) rc) -> IM
nan(Sparsity sp) -> IM
"""
return _casadi.IM_nan(*args)
nan = staticmethod(nan)
def eye(*args):
"""
eye(int ncol) -> IM
"""
return _casadi.IM_eye(*args)
eye = staticmethod(eye)
def element_hash(self, *args):
"""
element_hash(self) -> int
"""
return _casadi.IM_element_hash(self, *args)
def is_regular(self, *args):
"""
is_regular(self) -> bool
"""
return _casadi.IM_is_regular(self, *args)
def is_smooth(self, *args):
"""
is_smooth(self) -> bool
"""
return _casadi.IM_is_smooth(self, *args)
def is_leaf(self, *args):
"""
is_leaf(self) -> bool
"""
return _casadi.IM_is_leaf(self, *args)
def is_commutative(self, *args):
"""
is_commutative(self) -> bool
"""
return _casadi.IM_is_commutative(self, *args)
def is_symbolic(self, *args):
"""
is_symbolic(self) -> bool
"""
return _casadi.IM_is_symbolic(self, *args)
def is_valid_input(self, *args):
"""
is_valid_input(self) -> bool
"""
return _casadi.IM_is_valid_input(self, *args)
def has_duplicates(self, *args):
"""
has_duplicates(self) -> bool
"""
return _casadi.IM_has_duplicates(self, *args)
def reset_input(self, *args):
"""
reset_input(self)
"""
return _casadi.IM_reset_input(self, *args)
def is_constant(self, *args):
"""
Check if the matrix is constant (note that false negative answers are
is_constant(self) -> bool
possible)
"""
return _casadi.IM_is_constant(self, *args)
def is_integer(self, *args):
"""
Check if the matrix is integer-valued (note that false negative answers are
is_integer(self) -> bool
possible)
"""
return _casadi.IM_is_integer(self, *args)
def is_zero(self, *args):
"""
check if the matrix is 0 (note that false negative answers are possible)
is_zero(self) -> bool
"""
return _casadi.IM_is_zero(self, *args)
def is_one(self, *args):
"""
check if the matrix is 1 (note that false negative answers are possible)
is_one(self) -> bool
"""
return _casadi.IM_is_one(self, *args)
def is_minus_one(self, *args):
"""
check if the matrix is -1 (note that false negative answers are possible)
is_minus_one(self) -> bool
"""
return _casadi.IM_is_minus_one(self, *args)
def is_eye(self, *args):
"""
check if the matrix is an identity matrix (note that false negative answers
is_eye(self) -> bool
are possible)
"""
return _casadi.IM_is_eye(self, *args)
def op(self, *args):
"""
op(self) -> int
"""
return _casadi.IM_op(self, *args)
def is_op(self, *args):
"""
is_op(self, int op) -> bool
"""
return _casadi.IM_is_op(self, *args)
def has_zeros(self, *args):
"""
Check if the matrix has any zero entries which are not structural zeros.
has_zeros(self) -> bool
"""
return _casadi.IM_has_zeros(self, *args)
def nonzeros(self, *args):
"""
Get all nonzeros.
nonzeros(self) -> [int]
Implementation of Matrix::get_nonzeros (in public API)
"""
return _casadi.IM_nonzeros(self, *args)
def elements(self, *args):
"""
Get all elements.
elements(self) -> [int]
"""
return _casadi.IM_elements(self, *args)
def __float__(self, *args):
"""
__float__(self) -> float
"""
return _casadi.IM___float__(self, *args)
def __int__(self, *args):
"""
__int__(self) -> int
"""
return _casadi.IM___int__(self, *args)
def name(self, *args):
"""
name(self) -> str
"""
return _casadi.IM_name(self, *args)
def dep(self, *args):
"""
dep(self, int ch) -> IM
"""
return _casadi.IM_dep(self, *args)
def n_dep(self, *args):
"""
n_dep(self) -> int
"""
return _casadi.IM_n_dep(self, *args)
def set_precision(*args):
"""
Set the 'precision, width & scientific' used in printing and serializing to
set_precision(int precision)
streams.
"""
return _casadi.IM_set_precision(*args)
set_precision = staticmethod(set_precision)
def set_width(*args):
"""
Set the 'precision, width & scientific' used in printing and serializing to
set_width(int width)
streams.
"""
return _casadi.IM_set_width(*args)
set_width = staticmethod(set_width)
def set_scientific(*args):
"""
Set the 'precision, width & scientific' used in printing and serializing to
set_scientific(bool scientific)
streams.
"""
return _casadi.IM_set_scientific(*args)
set_scientific = staticmethod(set_scientific)
def rng(*args):
"""
rng(int seed)
"""
return _casadi.IM_rng(*args)
rng = staticmethod(rng)
def rand(*args):
"""
Create a matrix with uniformly distributed random numbers.
rand(int nrow, int ncol) -> IM
rand((int,int) rc) -> IM
rand(Sparsity sp) -> IM
"""
return _casadi.IM_rand(*args)
rand = staticmethod(rand)
def export_code(self, *args):
"""
Export matrix in specific language.
export_code(self, str lang, dict options)
lang: only 'matlab' supported for now
::
* options:
* inline: Indicates if you want everything on a single line (default: False)
* name: Name of exported variable (default: 'm')
* indent_level: Level of indentation (default: 0)
* spoof_zero: Replace numerical zero by a 1e-200 (default: false)
* might be needed for matlab sparse construct,
* which doesn't allow numerical zero
*
"""
return _casadi.IM_export_code(self, *args)
def info(self, *args):
"""
Obtain information about sparsity
info(self) -> dict
"""
return _casadi.IM_info(self, *args)
def from_info(*args):
"""
from_info(dict info) -> IM
"""
return _casadi.IM_from_info(*args)
from_info = staticmethod(from_info)
def to_file(self, *args):
"""
Export numerical matrix to file
to_file(self, str filename, | |
<filename>Taxonomie_interface.py
from tkinter import *
from tkscrolledframe import ScrolledFrame
import os
import pathlib
import xml.etree.ElementTree as ET
class TAX_Interface():
def __init__(self, bg_color, button_color, label_color, Button_Font, Label_Font):
self.bg_color = bg_color
self.button_color = button_color
self.label_color = label_color
self.Button_Font = Button_Font
self.Label_Font = Label_Font
def open_tax_window(self):
# Fragenpool auswählen
self.select_taxonomy_file = filedialog.askdirectory(initialdir=pathlib.Path().absolute(), title="Select a File")
self.folder_name = self.select_taxonomy_file.rsplit('/', 1)[-1]
self.folder_name_split1 = self.folder_name[:15]
self.folder_name_split2 = self.folder_name.rsplit('_', 1)[-1]
self.taxonomy_exportXML_file = os.path.normpath(os.path.join(self.select_taxonomy_file, 'Services', 'Taxonomy', 'set_1', 'export.xml'))
self.taxonomy_file_write = self.taxonomy_exportXML_file
self.taxonomy_qtiXML_file = os.path.normpath(os.path.join(self.select_taxonomy_file, self.folder_name_split1 + "qti_" + self.folder_name_split2 + ".xml"))
self.taxonomy_file_read = os.path.normpath(os.path.join(self.select_taxonomy_file, 'Services', 'Taxonomy', 'set_1', 'export.xml'))
# Taxonomy-window
self.taxonomy_width = 1000
self.taxonomy_height = 800
### Neues Fenster "Taxonomie" erzeugen
# New Window must be "Toplevel" not "Tk()" in order to get Radiobuttons to work properly
self.taxonomy_window = Toplevel()
self.taxonomy_window.title("Taxonomie --- " + str(self.select_taxonomy_file))
### Frame
# Create a ScrolledFrame widget
self.sf_taxonomy = ScrolledFrame(self.taxonomy_window, width=self.taxonomy_width, height=self.taxonomy_height)
self.sf_taxonomy.pack(expand=1, fill="both")
# Create a frame within the ScrolledFrame
self.taxonomy = self.sf_taxonomy.display_widget(Frame)
self.taxonomy_frame_labels_scroll= LabelFrame(self.taxonomy, text="Fragen ID's", padx=5, pady=5)
self.taxonomy_frame_labels_scroll.grid(row=0, column=0, padx=20, pady=10, sticky=NW)
self.taxonomy_frame_labels2 = ScrolledFrame(self.taxonomy_frame_labels_scroll, height=700, width=500)
self.taxonomy_frame_labels2.pack(expand=1, fill="both")
self.taxonomy_frame_labels = self.taxonomy_frame_labels2.display_widget(Frame)
self.taxonomy_frame_boxes = LabelFrame(self.taxonomy, text="Fragen ID's", padx=5, pady=5)
self.taxonomy_frame_boxes.grid(row=0, column=1, padx=20, pady=10, sticky=NW)
self.taxonomy_frame_tree = LabelFrame(self.taxonomy, text="Taxonomie Baum", padx=5, pady=5)
self.taxonomy_frame_tree.grid(row=0, column=1, padx=20, pady=200, sticky=NW)
### LABELS UND ENTRYIES
# ---- Starting ID to End ID set to node
self.label_starting_id = Label(self.taxonomy_frame_boxes, text="von Fragen ID")
self.label_starting_id.grid(sticky=W, pady=5, row=0, column=0)
self.starting_id_var = StringVar()
self.ending_id_var = StringVar()
self.taxonomy_name = StringVar()
self.tax_node_name = StringVar()
self.tax_node_parent = StringVar()
self.entry_starting_id = Entry(self.taxonomy_frame_boxes, textvariable=self.starting_id_var, width=10)
self.entry_starting_id.grid(sticky=W, pady=5, row=1, column=0)
self.label_ending_id = Label(self.taxonomy_frame_boxes, text="bis Fragen ID")
self.label_ending_id.grid(sticky=W, padx=10, pady=5, row=0, column=1)
self.entry_ending_id = Entry(self.taxonomy_frame_boxes, textvariable=self.ending_id_var, width=10)
self.entry_ending_id.grid(sticky=W, padx=10, pady=5, row=1, column=1)
self.taxonomy_name_label = Label(self.taxonomy_frame_tree, text="Name für Taxonomie")
self.taxonomy_name_label.grid(sticky=W, padx=10, pady=5, row=0, column=0)
self.taxonomy_name_entry = Entry(self.taxonomy_frame_tree, textvariable=self.taxonomy_name, width=20)
self.taxonomy_name_entry.grid(sticky=W, padx=10, pady=5, row=0, column=1)
self.tax_node_name_label = Label(self.taxonomy_frame_tree, text="Name für Knoten")
self.tax_node_name_label.grid(sticky=W, padx=10, pady=5, row=1, column=0)
self.tax_node_name_entry = Entry(self.taxonomy_frame_tree, textvariable=self.tax_node_name, width=20)
self.tax_node_name_entry.grid(sticky=W, padx=10, pady=5, row=1, column=1)
self.tax_node_parent_label = Label(self.taxonomy_frame_tree, text="Vaterknoten")
self.tax_node_parent_label.grid(sticky=W, padx=10, pady=5, row=2, column=0)
self.tax_node_parent_entry = Entry(self.taxonomy_frame_tree, textvariable=self.tax_node_parent, width=20)
self.tax_node_parent_entry.grid(sticky=W, padx=10, pady=5, row=2, column=1)
#### BUTTONS
# Button to assign questions to node
self.assign_to_node_btn = Button(self.taxonomy_frame_boxes, text="Fragen dem Knoten\nhinzufügen", command=lambda: TAX_Interface.assign_questions_to_node(self))
self.assign_to_node_btn.grid(row=4, column=0, sticky=W, pady=(20, 0))
self.remove_from_node_btn = Button(self.taxonomy_frame_boxes, text="Fragen von Knoten\nentfernen",command=lambda: TAX_Interface.remove_question_from_node(self))
self.remove_from_node_btn.grid(row=4, column=1, sticky=W, padx=5, pady=(20, 0))
self.tax_add_node_btn = Button(self.taxonomy_frame_tree, text="Knoten hinzufügen",command=lambda: TAX_Interface.add_node_to_tax(self))
self.tax_add_node_btn.grid(row=6, column=0, sticky=W, padx=5, pady=(20, 0))
#self.scan_tax_tree_btn = Button(self.taxonomy_frame_tree, text="scan_tax_tree",command=lambda: Taxonomie.scan_tax_tree(self))
#self.scan_tax_tree_btn.grid(row=6, column=1, sticky=W, padx=5, pady=(20, 0))
self.update_taxonomy_name_btn = Button(self.taxonomy_frame_tree, text="Taxonomie-Namen\naktualisieren", command=lambda: TAX_Interface.update_taxonomy_name(self))
self.update_taxonomy_name_btn.grid(row=0, column=2, sticky=E, padx=5, pady=(5, 0))
self.tax_remove_node_btn = Button(self.taxonomy_frame_tree, text="Knoten entfernen",command=lambda: TAX_Interface.remove_node_from_tax(self))
self.tax_remove_node_btn.grid(row=6, column=1, sticky=W, padx=5, pady=(20, 0))
self.tax_reallocate_btn = Button(self.taxonomy_frame_tree, text="Taxonomie-Datei\nneu anordnen",command=lambda: TAX_Interface.tax_reallocate(self))
self.tax_reallocate_btn.grid(row=5, column=2, sticky=W, padx=5, pady=(20, 0))
def edit_tax_of_existing_ilias_pool_file(self):
TAX_Interface.open_tax_window(self)
TAX_Interface.tax_file_refresh(self, self.taxonomy_exportXML_file)
TAX_Interface.read_taxonomy_file(self)
TAX_Interface.scan_tax_tree(self)
def read_taxonomy_file(self):
#self.taxonomy_qtiXML_file = taxonomy_qtiXML_file
print("read")
print(self.taxonomy_qtiXML_file)
self.mytree = ET.parse(self.taxonomy_qtiXML_file)
self.myroot = self.mytree.getroot()
self.item_id_list = []
self.item_title_list = []
self.item_id_var = 0
self.item_title_var = 0
self.item_labels_list = []
self.combobox_list = []
for item in self.myroot.iter('item'):
self.item_id_raw = str(item.get('ident'))
self.item_id = self.item_id_raw.rsplit('_', 1)[-1]
self.item_title = str(item.get('title'))
self.item_id_list.append(self.item_id)
self.item_title_list.append(self.item_title)
# print(len(self.ident))
for id_text in self.item_id_list:
label_id = Label(self.taxonomy_frame_labels, text=id_text)
label_id.grid(sticky=W, pady=5, row=self.item_id_var, column=0)
self.item_labels_list.append(str(label_id.cget("text")))
print("Label ID: " + str(label_id.cget("text")))
label_placeholder = Label(self.taxonomy_frame_labels, text=" ---- ")
label_placeholder.grid(sticky=W, pady=5, row=self.item_id_var, column=1)
self.item_id_var = self.item_id_var + 1
for title_text in self.item_title_list:
label_title = Label(self.taxonomy_frame_labels, text=title_text)
label_title.grid(sticky=W, pady=5, row=self.item_title_var, column=2)
self.item_title_var = self.item_title_var + 1
##### - Taxonomie Ebenen auslesen - ####
print("\n")
print("---- Taxonomie auslesen")
self.mytree = ET.parse(self.taxonomy_file_read)
self.myroot = self.mytree.getroot()
self.tax_title = []
self.child_tag = []
self.node_tag = []
self.item_in_node = []
self.item_tag = []
self.root_node = "000000"
self.id_to_node_dict = {}
self.item_nr_list = []
# Auslesen der Root-ID Diese ID gibt den "Hauptstamm" der Taxonomie an
# Root-ID wird vorher auf "000000" gesetzt um zu prüfen ob der Wert im nächsten Schritt überschrieben wurde
for Tax in self.myroot.iter('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Tax'):
self.root_node = Tax.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Id').text
if self.root_node != "000000":
print("Root Node found: " + str(self.root_node))
else:
print("No Root ID in File!")
# ---- Alle Ebenen im Dokument suchen ---- #
for TaxTree in self.myroot.iter('{http://www.ilias.de/Services/Taxonomy/tax/4_3}TaxTree'):
if TaxTree.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}TaxId').text == str(self.root_node):
self.child_tag.append(TaxTree.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Child').text)
self.node_tag.append(TaxTree.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Title').text)
print("Nodes found: " + str(self.node_tag))
print("with Child ID: " + str(self.child_tag))
# convert list "child tag" and list "node_tag" to dictionary
self.id_to_node_dict = dict(zip(self.child_tag, self.node_tag))
self.node_to_id_dict = dict(zip(self.node_tag, self.child_tag))
# print(self.id_to_node_dict)
print("------------------------------------------------")
print("\n")
# print("------- Show Question assignments -------")
for i in range(len(self.child_tag)):
for tax_node in self.myroot.iter('{http://www.ilias.de/Services/Taxonomy/tax/4_3}TaxNodeAssignment'):
if tax_node.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}NodeId').text == str(
self.child_tag[i]): # Bsp. für Ebene 1 ID
self.item_in_node.append(str(self.child_tag[i]))
self.item_tag.append(
tax_node.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}ItemId').text)
self.item_nr_list.append(self.item_labels_list.index(
tax_node.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}ItemId').text))
for i in range(len(self.item_nr_list)):
label_taxnode = Label(self.taxonomy_frame_labels,
text=" --- " + str(self.id_to_node_dict.get(self.item_in_node[i])))
label_taxnode.grid(sticky=W, pady=5, row=self.item_labels_list.index(self.item_tag[i]), column=4)
# PRüfen ob die Fragen im Fragenpool konsistent sind (fortlaufende ID's
self.check_question_id_start = str(self.item_labels_list[0])
self.check_question_id_end = str(self.item_labels_list[len(self.item_labels_list) - 1])
self.check_question_id_counter = int(self.check_question_id_start)
# for i in range(len(self.item_labels_list)):
# if int(self.item_labels_list[i]) != int(self.check_question_id_counter):
# print("Error in Labels list", self.item_labels_list[i], self.check_question_id_counter)
# self.check_question_id_counter = self.check_question_id_counter + 1
# print("Label-check DONE")
TAX_Interface.tax_combobox_refresh(self)
def tax_combobox_refresh (self):
# ---- Alle Ebenen im Dokument suchen ---- #
self.node_tag_update = []
for TaxTree in self.myroot.iter('{http://www.ilias.de/Services/Taxonomy/tax/4_3}TaxTree'):
if TaxTree.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}TaxId').text == str(self.root_node):
self.node_tag_update.append(TaxTree.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Title').text)
self.node_tag_update.sort(key=str.lower)
self.tax_nodes_myCombo = ttk.Combobox(self.taxonomy_frame_boxes, value=self.node_tag_update, width=30)
self.tax_nodes_myCombo.current(0)
# self.tax_nodes_myCombo.bind("<<ComboboxSelected>>", selected_var)
self.tax_nodes_myCombo.grid(row=1, column=2, sticky=W, padx=10, pady=5)
def tax_file_refresh(self, file_location):
self.file_location = file_location
# print("refresh_file_location: " + str(self.file_location))
with open(self.file_location, 'r') as xml_file:
xml_str = xml_file.read()
xml_str = xml_str.replace('ns0:', 'exp:')
xml_str = xml_str.replace('ns2:', 'ds:')
xml_str = xml_str.replace('ns3:', '') # replace "x" with "new value for x"
xml_str = xml_str.replace(
'<exp:Export xmlns:ns0="http://www.ilias.de/Services/Export/exp/4_1" xmlns:ns2="http://www.ilias.de/Services/DataSet/ds/4_3" xmlns:ns3="http://www.ilias.de/Services/Taxonomy/tax/4_3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" InstallationId="0" InstallationUrl="https://ilias.th-koeln.de" Entity="tax" SchemaVersion="4.3.0" TargetRelease="5.4.0" xsi:schemaLocation="http://www.ilias.de/Services/Export/exp/4_1 https://ilias.th-koeln.de/xml/ilias_export_4_1.xsd http://www.ilias.de/Services/Taxonomy/tax/4_3 https://ilias.th-koeln.de/xml/ilias_tax_4_3.xsd http://www.ilias.de/Services/DataSet/ds/4_3 https://ilias.th-koeln.de/xml/ilias_ds_4_3.xsd">',
'<exp:Export InstallationId="0" InstallationUrl="https://ilias.th-koeln.de" Entity="tax" SchemaVersion="4.3.0" TargetRelease="5.4.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:exp="http://www.ilias.de/Services/Export/exp/4_1" xsi:schemaLocation="http://www.ilias.de/Services/Export/exp/4_1 https://ilias.th-koeln.de/xml/ilias_export_4_1.xsd http://www.ilias.de/Services/Taxonomy/tax/4_3 https://ilias.th-koeln.de/xml/ilias_tax_4_3.xsd http://www.ilias.de/Services/DataSet/ds/4_3 https://ilias.th-koeln.de/xml/ilias_ds_4_3.xsd" xmlns="http://www.ilias.de/Services/Taxonomy/tax/4_3" xmlns:ds="http://www.ilias.de/Services/DataSet/ds/4_3">')
xml_str = xml_str.replace(
'<exp:Export xmlns:ns0="http://www.ilias.de/Services/Export/exp/4_1" xmlns:ns2="http://www.ilias.de/Services/DataSet/ds/4_3" xmlns:ns3="http://www.ilias.de/Services/Taxonomy/tax/4_3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" Entity="tax" InstallationId="0" InstallationUrl="https://ilias.th-koeln.de" SchemaVersion="4.3.0" TargetRelease="5.4.0" xsi:schemaLocation="http://www.ilias.de/Services/Export/exp/4_1 https://ilias.th-koeln.de/xml/ilias_export_4_1.xsd http://www.ilias.de/Services/Taxonomy/tax/4_3 https://ilias.th-koeln.de/xml/ilias_tax_4_3.xsd http://www.ilias.de/Services/DataSet/ds/4_3 https://ilias.th-koeln.de/xml/ilias_ds_4_3.xsd">',
'<exp:Export InstallationId="0" InstallationUrl="https://ilias.th-koeln.de" Entity="tax" SchemaVersion="4.3.0" TargetRelease="5.4.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:exp="http://www.ilias.de/Services/Export/exp/4_1" xsi:schemaLocation="http://www.ilias.de/Services/Export/exp/4_1 https://ilias.th-koeln.de/xml/ilias_export_4_1.xsd http://www.ilias.de/Services/Taxonomy/tax/4_3 https://ilias.th-koeln.de/xml/ilias_tax_4_3.xsd http://www.ilias.de/Services/DataSet/ds/4_3 https://ilias.th-koeln.de/xml/ilias_ds_4_3.xsd" xmlns="http://www.ilias.de/Services/Taxonomy/tax/4_3" xmlns:ds="http://www.ilias.de/Services/DataSet/ds/4_3">')
with open(self.file_location, 'w') as replaced_xml_file:
replaced_xml_file.write(xml_str)
def scan_tax_tree(self):
self.mytree = ET.parse(self.taxonomy_file_read)
self.myroot = self.mytree.getroot()
self.taxonomy_frame_tree_picture_scroll = LabelFrame(self.taxonomy, text="Taxonomie Bild", padx=5, pady=5)
self.taxonomy_frame_tree_picture_scroll.grid(row=0, column=1, padx=20, pady=450, sticky=NW)
self.taxonomy_frame_tree_picture2 = ScrolledFrame(self.taxonomy_frame_tree_picture_scroll, height=250, width=200)
self.taxonomy_frame_tree_picture2.pack(expand=1, fill="both")
### Bind the arrow keys and scroll wheel
### Funktion hat keine auswirkungen, erzeugt jedoch (vernachlässigbare) Fehler
#self.taxonomy_frame_tree_picture2.bind_arrow_keys(app)
#self.taxonomy_frame_tree_picture2.bind_scroll_wheel(app)
self.taxonomy_frame_tree_picture = self.taxonomy_frame_tree_picture2.display_widget(Frame)
self.collect_childs = []
self.collect_title = []
self.collect_depth = []
self.collect_parent = []
self.collect_order_nr = []
self.collect_labels_sorted = []
self.tax_data = []
self.id_to_depth_dict = {}
self.parentId_to_title_dict = {}
self.parentId_from_id_dict = {}
self.title_to_id_dict = {}
# Taxonomie Datei nach Hauptebene (ID und Name) suchen
for TaxId in self.myroot.iter('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Tax'):
if TaxId.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Id').text == str(self.root_node):
self.tax_root_id = TaxId.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Id').text
self.tax_root_label = TaxId.find('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Title').text
#print(self.parentId_to_title_dict)
for child in self.myroot.iter('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Child'):
self.collect_childs.append(child.text)
for parent in self.myroot.iter('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Parent'):
self.collect_parent.append(parent.text)
for depth in self.myroot.iter('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Depth'):
self.collect_depth.append(depth.text)
for title in self.myroot.iter('{http://www.ilias.de/Services/Taxonomy/tax/4_3}Title'):
self.collect_title.append(title.text)
#print(title.text)
for order_nr in self.myroot.iter('{http://www.ilias.de/Services/Taxonomy/tax/4_3}OrderNr'):
self.collect_order_nr.append(order_nr.text)
self.tax_data = list(zip( self.collect_childs, self.collect_parent, self.collect_depth, self.collect_title, self.collect_order_nr ))
# .pop(0) enfternt den 1. Eintrag aus der Liste. In Liste "Title" ist 1 Eintrag mehr enthalten, als in den restlichen Listen. Der Eintrag beschreibt den Taxonomie-Namen
self.collect_title.pop(0)
self.id_to_depth_dict = dict(zip(self.collect_childs, self.collect_depth))
self.id_to_title_dict = dict(zip(self.collect_childs, self.collect_title))
self.parentId_from_id_dict = dict(zip(self.collect_childs, self.collect_parent))
# Bild in Labels erstellen
self.tax_depth_0_label = Label(self.taxonomy_frame_tree_picture, text=str(self.tax_root_label))
self.tax_depth_0_label.grid(sticky=W)
# collect_title muss "i+1" da im '0'ten Fach der Hauptitel ist. Title[] ist 1 Fach größer als Child[]
for i in range(len(self.collect_childs)):
#print(self.collect_parent[i], self.collect_childs[i],self.id_to_depth_dict.get(self.collect_childs[i]), self.collect_title[i], self.collect_order_nr[i])
if self.id_to_depth_dict.get(self.collect_childs[i]) == "2":
self.tax_depth_1_label= Label(self.taxonomy_frame_tree_picture, text=" " + str(self.collect_title[i]))
#self.tax_depth_1_label.grid(sticky=W)
self.collect_labels_sorted.append(self.tax_depth_1_label.cget("text"))
if self.id_to_depth_dict.get(self.collect_childs[i]) == "3":
self.tax_depth_2_label = Label(self.taxonomy_frame_tree_picture, text=" " + str(self.id_to_title_dict.get(self.collect_parent[i])) + " ===> " + str(self.collect_title[i]))
#self.tax_depth_2_label.grid(sticky=W)
self.collect_labels_sorted.append(self.tax_depth_2_label.cget("text"))
if self.id_to_depth_dict.get(self.collect_childs[i]) == "4":
self.tax_depth_3_label = Label(self.taxonomy_frame_tree_picture, text=" " + str(self.id_to_title_dict.get(self.parentId_from_id_dict.get(self.collect_parent[i])))+ " ===> " +str(self.id_to_title_dict.get(self.collect_parent[i]))+ " ===> " + str(self.collect_title[i]))
#self.tax_depth_3_label.grid(sticky=W)
self.collect_labels_sorted.append(self.tax_depth_3_label.cget("text"))
for i in range(len(self.collect_labels_sorted)):
self.collect_labels_sorted[i] = self.collect_labels_sorted[i].strip()
self.collect_labels_sorted.sort()
for i in range(len(self.collect_labels_sorted)):
self.depth_count = "0"
self.depth_count = self.collect_labels_sorted[i].count("==>")
if self.depth_count == 0:
self.sorted_labels = Label(self.taxonomy_frame_tree_picture, text=" " + self.collect_labels_sorted[i])
self.sorted_labels.grid(sticky=W)
if self.depth_count == 1:
self.sorted_labels = Label(self.taxonomy_frame_tree_picture, text=" " + self.collect_labels_sorted[i])
self.sorted_labels.grid(sticky=W)
if self.depth_count == 2:
self.sorted_labels = Label(self.taxonomy_frame_tree_picture, text=" " + self.collect_labels_sorted[i])
self.sorted_labels.grid(sticky=W)
def update_taxonomy_name(self):
self.mytree = ET.parse(self.taxonomy_file_read)
self.myroot = self.mytree.getroot()
if self.taxonomy_name_entry.get != "":
# Auslesen der Root-ID Diese ID gibt den "Hauptstamm" der Taxonomie an
# Root-ID wird vorher auf "000000" gesetzt um zu prüfen ob der Wert im nächsten Schritt überschrieben | |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
import scipy.optimize as opt # curve_fit, fmin, fmin_tnc
import jams.functions as functions # from jams
from jams.mad import mad # from jams
import warnings
# import pdb
# ----------------------------------------------------------------------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=np.nan,
method='reichstein', shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data.
It uses either
1. a fit of Reco vs. temperature to all nighttime data, or
2. several fits over the season of Reco vs. temperature as in Reichstein et al. (2005), or
3. the daytime method of Lasslop et al. (2010),
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=np.nan,
method='reichstein', shape=False, masked=False):
Input
-----
Inputs are 1D arrays that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Optional Input
--------------
If method = 'day' | 'lasslop', extra inputs are
rg global radiation, i.e. shortwave down [W m-2]
vpd vapour pressure deficit [Pa]
Parameters
----------
undef undefined values in data (default: np.nan)
Input arrays will be masked at undef, keeping the original mask
method if 'global' | 'falge': fit of Reco vs. temperature to all nighttime data
if 'local' | 'reichstein': method of Reichstein et al. (2005)
if 'day' | 'lasslop': method of Lasslop et al. (2010)
shape if False then outputs are 1D arrays;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to reshape
masked if False: outputs are undef where nee and t are masked or undef
if True: return masked arrays where outputs would be undef
If method = 'night' | 'reichstein', extra parameters are
nogppnight if True: Resp=NEE, GPP=0 at night, GPP always positive
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
Negative respiration possible at night when gpp is forced to 0 with nogppnight=True
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual sums of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Lasslop et al. (2010)
Separation of net ecosystem exchange into assimilation and respiration using
a light response curve approach: critical issues and global evaluation
Global Change Biology 16, 187-208
Reichstein et al. (2005)
On the separation of net ecosystem exchange into assimilation and ecosystem
respiration: review and improved algorithm.
Global Change Biology 11, 1424-1439
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, transpose=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = np.squeeze(dat[5,:])
>>> rg = np.squeeze(dat[6,:])
>>> tair = np.squeeze(dat[7,:])
>>> undef = -9999.
>>> isday = np.where(rg > 10., True, False)
>>> tt = np.where(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> print(Reco[1120:1128])
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='Reichstein', masked=True)
>>> print(GPP[1120:1128])
[-- -- -- 4.406068706013192 8.319421516040766 10.624254150217764
8.492456637225963 11.238197347837367]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='reichstein', shape=(np.size(NEE),1))
>>> print(GPP[1120:1128])
[[-9.99900000e+03]
[-9.99900000e+03]
[-9.99900000e+03]
[ 4.40606871e+00]
[ 8.31942152e+00]
[ 1.06242542e+01]
[ 8.49245664e+00]
[ 1.12381973e+01]]
>>> VPD = np.squeeze(dat[8,:])
>>> vpd = np.where(VPD == undef, undef, VPD*100.)
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, rg, vpd, undef=undef, method='day')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 2.78457540e+00
6.63212545e+00 8.88902165e+00 6.74243873e+00 9.51364527e+00]
>>> print(Reco[1120:1128])
[0.28786696 0.34594516 0.43893276 0.5495954 0.70029545 0.90849165
1.15074873 1.46137527]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2014 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=np.nan
MC, Nov 2012 - wrapper for individual routines nee2gpp_reichstein etc.
MC, Feb 2013 - ported to Python 3
MC, May 2013 - replaced cost functions by generel cost function cost_abs if possible
AP, Aug 2014 - replaced fmin with fmin_tnc to permit params<0,
permit gpp<0 at any time if nogppnight=True
"""
# Global relationship in Reichstein et al. (2005)
if ((method.lower() == 'global') | (method.lower() == 'falge')):
return nee2gpp_falge(dates, nee, t, isday, undef=undef, shape=shape, masked=masked)
# Local relationship = Reichstein et al. (2005)
elif ((method.lower() == 'local') | (method.lower() == 'reichstein')):
return nee2gpp_reichstein(dates, nee, t, isday, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Lasslop et al. (2010) method
elif ((method.lower() == 'day') | (method.lower() == 'lasslop')):
return nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Include new methods here
else:
raise ValueError('Error nee2gpp: method not implemented yet.')
# ----------------------------------------------------------------------
def nee2gpp_falge(dates, nee, t, isday, undef=np.nan,
shape=False, masked=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using a fit of Reco vs. temperature to all nighttime data,
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_falge(dates, nee, t, isday, undef=np.nan, shape=False, masked=False):
Input
-----
Inputs are 1D arrays that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Parameters
----------
undef undefined values in data (default: np.nan)
Input arrays will be masked at undef, keeping the original mask
shape if False then outputs are 1D arrays;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to reshape
masked if False: outputs are undef where nee and t are masked or undef
if True: return masked arrays where outputs would be undef
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual sums of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, transpose=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = np.squeeze(dat[5,:])
>>> rg = np.squeeze(dat[6,:])
>>> tair = np.squeeze(dat[7,:])
>>> undef = -9999.
>>> isday = np.where(rg > 10., True, False)
>>> tt = np.where(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
License
-------
This file | |
Retrieve the information for a specific aggregation account associated with a client. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_aggregation_account_using_get_with_http_info(aggregation_account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str aggregation_account_id: UUID aggregation_account_id (required)
:return: AggregationAccount
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['aggregation_account_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_aggregation_account_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'aggregation_account_id' is set
if self.api_client.client_side_validation and ('aggregation_account_id' not in params or
params['aggregation_account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `aggregation_account_id` when calling `get_aggregation_account_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'aggregation_account_id' in params:
path_params['aggregation_account_id'] = params['aggregation_account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/aggregation_account/{aggregation_account_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AggregationAccount', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_aggregation_account_balance_using_put(self, aggregation_account_balance, aggregation_account_balance_id, **kwargs): # noqa: E501
"""Update an aggregation account balance # noqa: E501
Update a balance record for an aggregation account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_aggregation_account_balance_using_put(aggregation_account_balance, aggregation_account_balance_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object aggregation_account_balance: aggregation_account_balance (required)
:param str aggregation_account_balance_id: UUID aggregation_account_balance_id (required)
:return: AggregationAccountBalance
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_aggregation_account_balance_using_put_with_http_info(aggregation_account_balance, aggregation_account_balance_id, **kwargs) # noqa: E501
else:
(data) = self.update_aggregation_account_balance_using_put_with_http_info(aggregation_account_balance, aggregation_account_balance_id, **kwargs) # noqa: E501
return data
def update_aggregation_account_balance_using_put_with_http_info(self, aggregation_account_balance, aggregation_account_balance_id, **kwargs): # noqa: E501
"""Update an aggregation account balance # noqa: E501
Update a balance record for an aggregation account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_aggregation_account_balance_using_put_with_http_info(aggregation_account_balance, aggregation_account_balance_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object aggregation_account_balance: aggregation_account_balance (required)
:param str aggregation_account_balance_id: UUID aggregation_account_balance_id (required)
:return: AggregationAccountBalance
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['aggregation_account_balance', 'aggregation_account_balance_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_aggregation_account_balance_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'aggregation_account_balance' is set
if self.api_client.client_side_validation and ('aggregation_account_balance' not in params or
params['aggregation_account_balance'] is None): # noqa: E501
raise ValueError("Missing the required parameter `aggregation_account_balance` when calling `update_aggregation_account_balance_using_put`") # noqa: E501
# verify the required parameter 'aggregation_account_balance_id' is set
if self.api_client.client_side_validation and ('aggregation_account_balance_id' not in params or
params['aggregation_account_balance_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `aggregation_account_balance_id` when calling `update_aggregation_account_balance_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'aggregation_account_balance_id' in params:
path_params['aggregation_account_balance_id'] = params['aggregation_account_balance_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'aggregation_account_balance' in params:
body_params = params['aggregation_account_balance']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/aggregation_account_balance/{aggregation_account_balance_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AggregationAccountBalance', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_aggregation_account_bulk_using_put(self, aggregation_account_list, **kwargs): # noqa: E501
"""Update a bulk aggregation account # noqa: E501
Update a bulk aggregation account under a client. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_aggregation_account_bulk_using_put(aggregation_account_list, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[object] aggregation_account_list: aggregationAccountList (required)
:return: list[AggregationAccount]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_aggregation_account_bulk_using_put_with_http_info(aggregation_account_list, **kwargs) # noqa: E501
else:
(data) = self.update_aggregation_account_bulk_using_put_with_http_info(aggregation_account_list, **kwargs) # noqa: E501
return data
def update_aggregation_account_bulk_using_put_with_http_info(self, aggregation_account_list, **kwargs): # noqa: E501
"""Update a bulk aggregation account # noqa: E501
Update a bulk aggregation account under a client. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_aggregation_account_bulk_using_put_with_http_info(aggregation_account_list, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[object] aggregation_account_list: aggregationAccountList (required)
:return: list[AggregationAccount]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['aggregation_account_list'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_aggregation_account_bulk_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'aggregation_account_list' is set
if self.api_client.client_side_validation and ('aggregation_account_list' not in params or
params['aggregation_account_list'] is None): # noqa: E501
raise ValueError("Missing the required parameter `aggregation_account_list` when calling `update_aggregation_account_bulk_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'aggregation_account_list' in params:
body_params = params['aggregation_account_list']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/bulk_aggregation_account', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[AggregationAccount]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_aggregation_account_holding_bulk_using_put(self, aggregation_account_holding, **kwargs): # noqa: E501
"""Update an bulk aggregation account holding # noqa: E501
Update a bulk holding record for an aggregation account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_aggregation_account_holding_bulk_using_put(aggregation_account_holding, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[object] aggregation_account_holding: aggregationAccountHolding (required)
:return: list[AggregationAccountHolding]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_aggregation_account_holding_bulk_using_put_with_http_info(aggregation_account_holding, **kwargs) # noqa: E501
else:
(data) = self.update_aggregation_account_holding_bulk_using_put_with_http_info(aggregation_account_holding, **kwargs) # noqa: E501
return data
def update_aggregation_account_holding_bulk_using_put_with_http_info(self, aggregation_account_holding, **kwargs): # noqa: E501
"""Update an bulk aggregation account holding # noqa: E501
Update a bulk holding record for an aggregation account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_aggregation_account_holding_bulk_using_put_with_http_info(aggregation_account_holding, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[object] aggregation_account_holding: aggregationAccountHolding (required)
:return: list[AggregationAccountHolding]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['aggregation_account_holding'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_aggregation_account_holding_bulk_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'aggregation_account_holding' is set
if self.api_client.client_side_validation and ('aggregation_account_holding' not in params or
params['aggregation_account_holding'] is None): # noqa: E501
raise ValueError("Missing the required parameter `aggregation_account_holding` when calling `update_aggregation_account_holding_bulk_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'aggregation_account_holding' in params:
body_params = params['aggregation_account_holding']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/bulk_aggregation_account_holding', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[AggregationAccountHolding]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_aggregation_account_holding_using_put(self, aggregation_account_holding, aggregation_account_holding_id, **kwargs): # noqa: E501
"""Update an aggregation account holding | |
<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
import unittest
from collections import namedtuple
from test.multiprocess_test_case import (
MultiProcessTestCase,
get_random_test_tensor,
onehot,
)
import crypten
import crypten.gradients as gradients
import torch
import torch.nn.functional as F
from crypten.autograd_cryptensor import AutogradContext, AutogradCrypTensor
from crypten.common.tensor_types import is_float_tensor
from crypten.gradients import AutogradFunction
class TestAutograd(MultiProcessTestCase):
"""
This class tests all autograd-related functionality.
"""
benchmarks_enabled = False
def setUp(self):
super().setUp()
# we do not want main process (rank -1) initializing the communicator:
if self.rank >= 0:
crypten.init()
def _check(self, encrypted_tensor, reference, msg, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text()
# check that sizes match:
self.assertTrue(tensor.size() == reference.size(), msg)
# check that values match:
if is_float_tensor(reference):
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.1)
test_passed = test_passed.gt(0).all().item() == 1
else:
test_passed = (tensor == reference).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result = %s;\nreference = %s" % (tensor, reference))
self.assertTrue(test_passed, msg=msg)
def test_non_differentiable_marking(self):
"""Tests whether marking of non-differentiability works correctly."""
# generate random inputs:
inputs = [get_random_test_tensor(is_float=True) for _ in range(5)]
inputs = [crypten.cryptensor(input) for input in inputs]
ctx = AutogradContext()
# repeat test multiple times:
for _ in range(10):
# mark non-differentiable inputs as such:
differentiable = [random.random() > 0.5 for _ in range(len(inputs))]
for idx, diff in enumerate(differentiable):
if not diff:
ctx.mark_non_differentiable(inputs[idx])
# check that inputs were correctly marked:
for idx, input in enumerate(inputs):
self.assertEqual(
ctx.is_differentiable(input),
differentiable[idx],
"marking of differentiability failed",
)
ctx.reset()
# test behavior of AutogradCrypTensor:
input = AutogradCrypTensor(inputs[0])
reference = [True, True, False]
for func_name in ["min", "max"]:
outputs = [None] * 3
outputs[0] = getattr(input, func_name)()
outputs[1], outputs[2] = getattr(input, func_name)(dim=0)
for idx, output in enumerate(outputs):
self.assertEqual(
output.requires_grad,
reference[idx],
"value of requires_grad is incorrect",
)
# behavior of max_pool2d in which indices are returned:
input = get_random_test_tensor(size=(1, 3, 8, 8), is_float=True)
input = AutogradCrypTensor(crypten.cryptensor(input))
reference = [True, True, False]
outputs = [None] * 3
outputs[0] = input.max_pool2d(2, return_indices=False)
outputs[1], outputs[2] = input.max_pool2d(2, return_indices=True)
for idx, output in enumerate(outputs):
self.assertEqual(
output.requires_grad,
reference[idx],
"value of requires_grad is incorrect",
)
def test_autograd_registation(self):
"""Tests registration of new autograd function."""
# check that get_grad_fn() returns correct functions:
for func_name, reference_func in gradients.FUNCTION_REGISTRY.items():
grad_fn = gradients.get_grad_fn(func_name)
self.assertEqual(grad_fn, reference_func)
self.assertEqual(grad_fn.name, func_name)
# check that non-existing functions return None:
for invalid_func_name in ["bfobofb", "djhfhr"]:
func = gradients.get_grad_fn(invalid_func_name)
self.assertIsNone(func)
# check that registering new classes works:
for func_name in ["mock_func1", "mock_func2", "mock_func3"]:
cls = type("%sName" % func_name, (AutogradFunction,), {})
gradients.register_function(func_name)(cls)
grad_fn = gradients.get_grad_fn(func_name)
self.assertEqual(grad_fn, cls)
self.assertEqual(grad_fn.name, func_name)
# check that existing functions cannot be overwritten:
for func_name in ["add", "sub", "view"]:
cls = type("%sName" % func_name, (AutogradFunction,), {})
with self.assertRaises(ValueError):
gradients.register_function(func_name)(cls)
def test_autograd_functions(self):
"""Tests individual autograd functions without testing autograd."""
# input sizes for tests of autograd functions:
input_size = {
"t": (2, 4),
"transpose": (4, 8, 3),
"flip": (2, 3, 7, 2),
"view": (8, 6),
"reshape": (8, 6),
"flatten": (8, 6),
"narrow": (10, 7),
"take": (5, 10, 15), # NOTE: this only tests the pytorch take
# functionality. The remaining take functionality
# is tested separately
"gather": (2, 2),
"scatter": (3, 5),
"roll": (4, 8),
"squeeze": (12, 1, 6),
"unsqueeze": (7, 3),
"__getitem__": (6, 6),
"neg": (8, 4),
"relu": (3, 7),
"tanh": (4, 3),
"add": (10, 7),
"sub": (9, 2),
"mul": (3, 5),
"matmul": (7, 7),
"div": (5, 4),
"pow": (4, 3),
"square": (8, 5),
"sqrt": (5, 6),
"exp": (5, 2),
"log": (3, 7),
"dot": (8,),
"ger": (12,),
"sin": (5, 4),
"cos": (9, 3),
"abs": (8, 5),
"sign": (8, 5),
"norm": (3, 2), # NOTE: Flaky because sqrt only works for values up to 200.
"sum": (4, 3),
"cumsum": (13, 7),
"trace": (4, 4),
"mean": (2, 9),
"var": (3, 4),
"max": (6, 7),
"min": (4, 5),
"sigmoid": (4, 7),
"softmax": (10, 5),
"pad": (6, 3),
# "avg_pool2d": (1, 3, 21, 21), # TODO: Enable once avg_pool2d is
# fixed in gradients.py.
"max_pool2d": (1, 3, 21, 21),
"conv2d": (1, 4, 21, 21),
"binary_cross_entropy": (8,),
"cross_entropy": (8, 4),
}
additional_args = {
"transpose": [2, 0],
"flip": [(1, 3, 2)],
"view": [(4, 12)],
"reshape": [(4, 12)],
"narrow": [1, 2, 3],
"gather": [1, torch.tensor([[0, 0], [1, 0]])],
"scatter": [
0,
torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]]),
get_random_test_tensor(size=(2, 5), is_float=True),
],
"roll": [(2, -1), (0, 1)],
"squeeze": [1],
"unsqueeze": [1],
"__getitem__": [1],
"div": [4.0],
"pow": [2.0],
"cumsum": [1],
"softmax": [1],
"pad": [(1, 2, 3, 4)],
"avg_pool2d": [5],
"max_pool2d": [3],
"conv2d": [get_random_test_tensor(size=(2, 4, 3, 3), is_float=True)],
"take": [torch.tensor([0, 5, 10])],
"binary_cross_entropy": [
get_random_test_tensor(size=(8,), is_float=True).gt(0.0).float()
],
"cross_entropy": [
onehot(
get_random_test_tensor(size=(8,), max_value=3).abs(), num_targets=4
)
],
}
binary_functions = ["add", "sub", "mul", "dot", "ger", "matmul"]
positive_only = ["pow", "sqrt", "log", "binary_cross_entropy"]
# loop over all autograd functions:
for func_name in input_size.keys():
# generate inputs:
inputs = [
get_random_test_tensor(
size=input_size[func_name], max_value=1.0, is_float=True
)
for _ in range(2 if func_name in binary_functions else 1)
]
if func_name in positive_only: # some functions do not take negative values
inputs = [input.abs().add_(0.001) for input in inputs]
for input in inputs:
input.requires_grad = True
encr_inputs = [crypten.cryptensor(input) for input in inputs]
number_of_inputs = len(inputs)
# add additional arguments, encrypting only tensors (if found):
if func_name in additional_args:
inputs += additional_args[func_name]
encr_inputs += additional_args[func_name]
if func_name == "take":
encr_inputs += [None]
elif func_name not in ["gather", "scatter"]:
encr_inputs = [
crypten.cryptensor(t) if torch.is_tensor(t) else t
for t in encr_inputs
]
# cross_entropy uses one-hot targets in crypten but not in PyTorch:
if func_name == "cross_entropy":
inputs[1] = inputs[1].argmax(1)
# AutogradFunction.forward() does not accept unpacked inputs:
if len(encr_inputs) == 1:
encr_inputs = encr_inputs[0]
# test forward function:
if hasattr(inputs[0], func_name): # torch.function()
reference = getattr(inputs[0], func_name)(*inputs[1:])
elif hasattr(F, func_name): # torch.nn.functional.function()
reference = getattr(F, func_name)(*inputs)
elif func_name == "square":
reference = inputs[0].pow(2.0)
else:
raise ValueError("unknown PyTorch function: %s" % func_name)
ctx = AutogradContext()
grad_fn = gradients.get_grad_fn(func_name)
encr_output = grad_fn.forward(ctx, encr_inputs)
self._check(encr_output, reference, "%s forward failed" % func_name)
if func_name == "view":
ctx = AutogradContext()
# check view() with a list of int to represent size.
# encr_inputs[0]: input
# encr_inputs[1]: tuple as torch.Size, to be unpacked.
view_input, sizes = encr_inputs
encr_output = grad_fn.forward(
ctx, [view_input] + [size for size in sizes]
)
self._check(encr_output, reference, "%s forward failed" % func_name)
# run backward functions:
grad_output = get_random_test_tensor(
max_value=2, size=reference.size(), is_float=True
)
encr_grad_output = encr_output.new(grad_output)
reference.backward(grad_output)
encr_grad = grad_fn.backward(ctx, encr_grad_output)
# test result of running backward function:
if not isinstance(encr_grad, (list, tuple)):
encr_grad = (encr_grad,)
for idx in range(number_of_inputs):
self._check(
encr_grad[idx], inputs[idx].grad, "%s backward failed" % func_name
)
def test_autograd_func_take(self):
"""Tests the part of autograd take that does not have a torch equivalent"""
tensor_size = [5, 5, 5, 5]
index = torch.tensor([[[1, 2], [3, 4]], [[4, 2], [1, 3]]], dtype=torch.long)
# Test when dimension!=None
for dimension in range(0, 4):
tensor = get_random_test_tensor(size=tensor_size, is_float=True)
ref_forward = torch.from_numpy(tensor.numpy().take(index, dimension))
encrypted_tensor = crypten.cryptensor(tensor)
encr_inputs = [encrypted_tensor, index, dimension]
# test forward
ctx = AutogradContext()
grad_fn_take = gradients.get_grad_fn("take")
encr_output = grad_fn_take.forward(ctx, encr_inputs)
self._check(encr_output, ref_forward, "take forward failed: dimension set")
# test backward:
# first, recreate take forward function with only torch operations
tensor2 = get_random_test_tensor(size=tensor_size, is_float=True)
tensor2.requires_grad = True
all_indices = [slice(0, x) for x in tensor2.size()]
all_indices[dimension] = index
ref_forward_torch = tensor2[all_indices]
grad_output = torch.ones(ref_forward_torch.size())
ref_forward_torch.backward(grad_output)
# next, do backward pass on encrypted tensor
encr_grad_output = encr_output.new(grad_output)
encr_grad = grad_fn_take.backward(ctx, encr_grad_output)
# finally, compare values
self._check(encr_grad, tensor2.grad, "take backward failed: dimension set")
def test_detach(self):
"""Tests that detach() works as expected."""
for func_name in ["detach", "detach_"]:
# get test case:
input_size = (12, 5)
input1 = get_random_test_tensor(size=input_size, is_float=True)
input2 = get_random_test_tensor(size=input_size, is_float=True)
input1 = AutogradCrypTensor(crypten.cryptensor(input1))
input2 = AutogradCrypTensor(crypten.cryptensor(input2))
# perform forward computation with detach in the middle:
intermediate = input1.add(1.0)
intermediate = getattr(intermediate, func_name)()
output = intermediate.add(input2).sum()
# perform backward:
output.backward()
msg = "detach() function does not behave as expected"
| |
import unittest
import datetime
from decimal import Decimal
import pymongo
import gridfs
from mongoengine import *
import mongoengine.connection
from mongoengine.connection import _get_db
from mongoengine.base import _document_registry
mongoengine.connection.set_default_db("test")
class FieldTest(unittest.TestCase):
def setUp(self):
connect()
self.db = _get_db()
def tearDown(self):
_document_registry.clear()
def test_default_values(self):
"""Ensure that default field values are used when creating a document.
"""
class Person(Document):
name = StringField()
age = IntField(default=30)
userid = StringField(default=lambda: 'test')
person = Person(name='<NAME>')
self.assertEqual(person._data['age'], 30)
self.assertEqual(person._data['userid'], 'test')
def test_required_values(self):
"""Ensure that required field constraints are enforced.
"""
class Person(Document):
name = StringField(required=True)
age = IntField(required=True)
userid = StringField()
person = Person(name="<NAME>")
self.assertRaises(ValidationError, person.validate)
person = Person(age=30)
self.assertRaises(ValidationError, person.validate)
def test_object_id_validation(self):
"""Ensure that invalid values cannot be assigned to string fields.
"""
class Person(Document):
name = StringField()
person = Person(name='<NAME>')
self.assertEqual(person.id, None)
person.id = 47
self.assertRaises(ValidationError, person.validate)
person.id = 'abc'
self.assertRaises(ValidationError, person.validate)
person.id = '497ce96f395f2f052a494fd4'
person.validate()
def test_string_validation(self):
"""Ensure that invalid values cannot be assigned to string fields.
"""
class Person(Document):
name = StringField(max_length=20)
userid = StringField(r'[0-9a-z_]+$')
person = Person(name=34)
self.assertRaises(ValidationError, person.validate)
# Test regex validation on userid
person = Person(userid='test.User')
self.assertRaises(ValidationError, person.validate)
person.userid = 'test_user'
self.assertEqual(person.userid, 'test_user')
person.validate()
# Test max length validation on name
person = Person(name='Name that is more than twenty characters')
self.assertRaises(ValidationError, person.validate)
person.name = 'Shorter name'
person.validate()
def test_url_validation(self):
"""Ensure that URLFields validate urls properly.
"""
class Link(Document):
url = URLField()
link = Link()
link.url = 'google'
self.assertRaises(ValidationError, link.validate)
link.url = 'http://www.google.com:8080'
link.validate()
def test_int_validation(self):
"""Ensure that invalid values cannot be assigned to int fields.
"""
class Person(Document):
age = IntField(min_value=0, max_value=110)
person = Person()
person.age = 50
person.validate()
person.age = 50L
person.validate()
person.age = 50.6
person.validate()
person.age = -1
self.assertRaises(ValidationError, person.validate)
person.age = 120
self.assertRaises(ValidationError, person.validate)
person.age = 'ten'
self.assertRaises(ValidationError, person.validate)
def test_float_validation(self):
"""Ensure that invalid values cannot be assigned to float fields.
"""
class Person(Document):
height = FloatField(min_value=0.1, max_value=3.5)
person = Person()
person.height = 1.89
person.validate()
person.height = 2
person.validate()
person.height = 2L
person.validate()
person.height = '2.0'
self.assertRaises(ValidationError, person.validate)
person.height = 0.01
self.assertRaises(ValidationError, person.validate)
person.height = 4.0
self.assertRaises(ValidationError, person.validate)
def test_decimal_validation(self):
"""Ensure that invalid values cannot be assigned to decimal fields.
"""
class Person(Document):
height = DecimalField(min_value=Decimal('0.1'),
max_value=Decimal('3.5'))
Person.drop_collection()
person = Person()
person.height = Decimal('1.89')
person.save()
person.reload()
self.assertEqual(person.height, Decimal('1.89'))
person.height = '2.0'
person.save()
person.height = 0.01
self.assertRaises(ValidationError, person.validate)
person.height = Decimal('0.01')
self.assertRaises(ValidationError, person.validate)
person.height = Decimal('4.0')
self.assertRaises(ValidationError, person.validate)
Person.drop_collection()
def test_boolean_validation(self):
"""Ensure that invalid values cannot be assigned to boolean fields.
"""
class Person(Document):
admin = BooleanField()
person = Person()
person.admin = True
person.validate()
person.admin = 2
self.assertRaises(ValidationError, person.validate)
person.admin = 'Yes'
self.assertRaises(ValidationError, person.validate)
def test_datetime_validation(self):
"""Ensure that invalid values cannot be assigned to datetime fields.
"""
class LogEntry(Document):
time = DateTimeField()
log = LogEntry()
log.time = datetime.datetime.now()
log.validate()
log.time = datetime.date.today()
self.assertRaises(ValidationError, log.validate)
log.time = -1
self.assertRaises(ValidationError, log.validate)
log.time = '1pm'
self.assertRaises(ValidationError, log.validate)
def test_datetime(self):
"""Tests showing pymongo datetime fields handling of microseconds.
Microseconds are rounded to the nearest millisecond and pre UTC
handling is wonky.
See: http://api.mongodb.org/python/current/api/bson/son.html#dt
"""
class LogEntry(Document):
date = DateTimeField()
LogEntry.drop_collection()
# Post UTC - microseconds are rounded (down) nearest millisecond and dropped
d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 999)
d2 = datetime.datetime(1970, 01, 01, 00, 00, 01)
log = LogEntry()
log.date = d1
log.save()
log.reload()
self.assertNotEquals(log.date, d1)
self.assertEquals(log.date, d2)
# Post UTC - microseconds are rounded (down) nearest millisecond
d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 9999)
d2 = datetime.datetime(1970, 01, 01, 00, 00, 01, 9000)
log.date = d1
log.save()
log.reload()
self.assertNotEquals(log.date, d1)
self.assertEquals(log.date, d2)
# Pre UTC dates microseconds below 1000 are dropped
d1 = datetime.datetime(1969, 12, 31, 23, 59, 59, 999)
d2 = datetime.datetime(1969, 12, 31, 23, 59, 59)
log.date = d1
log.save()
log.reload()
self.assertNotEquals(log.date, d1)
self.assertEquals(log.date, d2)
LogEntry.drop_collection()
def test_list_validation(self):
"""Ensure that a list field only accepts lists with valid elements.
"""
class User(Document):
pass
class Comment(EmbeddedDocument):
content = StringField()
class BlogPost(Document):
content = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
tags = ListField(StringField())
authors = ListField(ReferenceField(User))
generic = ListField(GenericReferenceField())
post = BlogPost(content='Went for a walk today...')
post.validate()
post.tags = 'fun'
self.assertRaises(ValidationError, post.validate)
post.tags = [1, 2]
self.assertRaises(ValidationError, post.validate)
post.tags = ['fun', 'leisure']
post.validate()
post.tags = ('fun', 'leisure')
post.validate()
post.comments = ['a']
self.assertRaises(ValidationError, post.validate)
post.comments = 'yay'
self.assertRaises(ValidationError, post.validate)
comments = [Comment(content='Good for you'), Comment(content='Yay.')]
post.comments = comments
post.validate()
post.authors = [Comment()]
self.assertRaises(ValidationError, post.validate)
user = User()
user.save()
post.authors = [user]
post.validate()
User.drop_collection()
BlogPost.drop_collection()
def test_sorted_list_sorting(self):
"""Ensure that a sorted list field properly sorts values.
"""
class Comment(EmbeddedDocument):
order = IntField()
content = StringField()
class BlogPost(Document):
content = StringField()
comments = SortedListField(EmbeddedDocumentField(Comment),
ordering='order')
tags = SortedListField(StringField())
post = BlogPost(content='Went for a walk today...')
post.save()
post.tags = ['leisure', 'fun']
post.save()
post.reload()
self.assertEqual(post.tags, ['fun', 'leisure'])
comment1 = Comment(content='Good for you', order=1)
comment2 = Comment(content='Yay.', order=0)
comments = [comment1, comment2]
post.comments = comments
post.save()
post.reload()
self.assertEqual(post.comments[0].content, comment2.content)
self.assertEqual(post.comments[1].content, comment1.content)
BlogPost.drop_collection()
def test_list_field(self):
"""Ensure that list types work as expected.
"""
class BlogPost(Document):
info = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost()
post.info = 'my post'
self.assertRaises(ValidationError, post.validate)
post.info = {'title': 'test'}
self.assertRaises(ValidationError, post.validate)
post.info = ['test']
post.save()
self.assertEquals(BlogPost.count({}), 1)
BlogPost.drop_collection()
def test_list_field_strict(self):
"""Ensure that list field handles validation if provided a strict field type."""
class Simple(Document):
mapping = ListField(field=IntField())
Simple.drop_collection()
e = Simple()
e.mapping = [1]
e.save()
def create_invalid_mapping():
e.mapping = ["abc"]
e.save()
self.assertRaises(ValidationError, create_invalid_mapping)
Simple.drop_collection()
def test_dict_field(self):
"""Ensure that dict types work as expected.
"""
class BlogPost(Document):
info = DictField()
BlogPost.drop_collection()
post = BlogPost()
post.info = 'my post'
self.assertRaises(ValidationError, post.validate)
post.info = ['test', 'test']
self.assertRaises(ValidationError, post.validate)
post.info = {'$title': 'test'}
self.assertRaises(ValidationError, post.validate)
post.info = {'the.title': 'test'}
self.assertRaises(ValidationError, post.validate)
post.info = {'title': 'test'}
post.save()
post = BlogPost()
post.info = {'details': {'test': 'test'}}
post.save()
post = BlogPost()
post.info = {'details': {'test': 3}}
post.save()
self.assertEquals(BlogPost.count({}), 3)
self.assertEquals(BlogPost.objects.filter(info__title__exact='test').count(), 1)
BlogPost.drop_collection()
def test_embedded_document_validation(self):
"""Ensure that invalid embedded documents cannot be assigned to
embedded document fields.
"""
class Comment(EmbeddedDocument):
content = StringField()
class PersonPreferences(EmbeddedDocument):
food = StringField(required=True)
number = IntField()
class Person(Document):
name = StringField()
preferences = EmbeddedDocumentField(PersonPreferences)
person = Person(name='Test User')
person.preferences = 'My Preferences'
self.assertRaises(ValidationError, person.validate)
# Check that only the right embedded doc works
person.preferences = Comment(content='Nice blog post...')
self.assertRaises(ValidationError, person.validate)
# Check that the embedded doc is valid
person.preferences = PersonPreferences()
self.assertRaises(ValidationError, person.validate)
person.preferences = PersonPreferences(food='Cheese', number=47)
self.assertEqual(person.preferences.food, 'Cheese')
person.validate()
def test_embedded_document_inheritance(self):
"""Ensure that subclasses of embedded documents may be provided to
EmbeddedDocumentFields of the superclass' type.
"""
class User(EmbeddedDocument):
name = StringField()
class PowerUser(User):
power = IntField()
class BlogPost(Document):
content = StringField()
author = EmbeddedDocumentField(User)
post = BlogPost(content='What I did today...')
post.author = User(name='Test User')
post.author = PowerUser(name='Test User', power=47)
def test_reference_validation(self):
"""Ensure that invalid docment objects cannot be assigned to reference
fields.
"""
class User(Document):
name = StringField()
class BlogPost(Document):
content = StringField()
author = ReferenceField(User)
User.drop_collection()
BlogPost.drop_collection()
self.assertRaises(ValidationError, ReferenceField, EmbeddedDocument)
user = User(name='Test User')
# Ensure that the referenced object must have been saved
post1 = BlogPost(content='Chips and gravy taste good.')
post1.author = user
self.assertRaises(ValidationError, post1.save)
# Check that an invalid object type cannot be used
post2 = BlogPost(content='Chips and chilli taste good.')
post1.author = post2
self.assertRaises(ValidationError, post1.validate)
user.save()
post1.author = user
post1.save()
post2.save()
post1.author = post2
self.assertRaises(ValidationError, post1.validate)
User.drop_collection()
BlogPost.drop_collection()
def test_list_item_dereference(self):
"""Ensure that DBRef items in ListFields are dereferenced.
"""
class User(Document):
name = StringField()
class Group(Document):
members = ListField(ReferenceField(User))
User.drop_collection()
Group.drop_collection()
user1 = User(name='user1')
user1.save()
user2 = User(name='user2')
user2.save()
group = Group(members=[user1, user2])
group.save()
group_obj = Group.objects.first()
self.assertEqual(group_obj.members[0].name, user1.name)
self.assertEqual(group_obj.members[1].name, user2.name)
User.drop_collection()
Group.drop_collection()
def test_recursive_reference(self):
"""Ensure that ReferenceFields can reference their own documents.
"""
class Employee(Document):
name = StringField()
boss = ReferenceField('self')
friends = ListField(ReferenceField('self'))
bill = Employee(name='<NAME>')
bill.save()
michael = Employee(name='<NAME>')
michael.save()
samir = Employee(name='<NAME>')
samir.save()
friends = [michael, samir]
peter = Employee(name='<NAME>', boss=bill, friends=friends)
peter.save()
peter = Employee.objects.with_id(peter.id)
self.assertEqual(peter.boss, bill)
self.assertEqual(peter.friends, friends)
def test_recursive_embedding(self):
"""Ensure that EmbeddedDocumentFields can contain their own documents.
"""
class Tree(Document):
name = StringField()
children = ListField(EmbeddedDocumentField('TreeNode'))
class TreeNode(EmbeddedDocument):
name = StringField()
children = ListField(EmbeddedDocumentField('self'))
Tree.drop_collection()
tree = Tree(name="Tree")
first_child = TreeNode(name="Child 1")
tree.children.append(first_child)
second_child = TreeNode(name="Child 2")
first_child.children.append(second_child)
tree.save()
tree = Tree.objects.first()
self.assertEqual(len(tree.children), 1)
| |
rv.blit(top, (0, 0), focus=True, main=True)
renpy.display.render.redraw(self, 0)
return rv
class ImageDissolve(Transition):
"""
:doc: transition function
:args: (image, time, ramplen=8, reverse=False, alpha=True, time_warp=None)
:name: ImageDissolve
Returns a transition that dissolves the old scene into the new scene, using
an image to control the dissolve process. This means that white pixels will
dissolve in first, and black pixels will dissolve in last.
`image`
A control image to use. This must be either an image file or
image manipulator. The control image should be the size of
the scenes being dissolved.
`time`
The time the dissolve will take.
`ramplen`
The length of the ramp to use. This must be an integer power
of 2. When this is the default value of 8, when a white pixel
is fully dissolved, a pixel 8 shades of gray darker will have
completed one step of dissolving in.
`reverse`
If True, black pixels will dissolve in before white pixels.
`alpha`
Ignored.
`time_warp`
A function that adjusts the timeline. If not None, this should be a
function that takes a fractional time between 0.0 and 1.0, and returns
a number in the same range.
::
define circirisout = ImageDissolve("circiris.png", 1.0)
define circirisin = ImageDissolve("circiris.png", 1.0, reverse=True)
define circiristbigramp = ImageDissolve("circiris.png", 1.0, ramplen=256)
"""
__version__ = 1
def after_upgrade(self, version):
if version < 1:
self.alpha = False
time_warp = None
def __init__(
self,
image,
time,
ramplen=8,
ramptype='linear',
ramp=None,
reverse=False,
alpha=False,
old_widget=None,
new_widget=None,
time_warp=None,
**properties):
# ramptype and ramp are now unused, but are kept for compatbility with
# older code.
super(ImageDissolve, self).__init__(time, **properties)
self.old_widget = old_widget
self.new_widget = new_widget
self.events = False
self.alpha = alpha
self.time_warp = time_warp
if not reverse:
# Copies red -> alpha
matrix = renpy.display.im.matrix(
0, 0, 0, 0, 1,
0, 0, 0, 0, 1,
0, 0, 0, 0, 1,
1, 0, 0, 0, 0)
else:
# Copies 1-red -> alpha
matrix = renpy.display.im.matrix(
0, 0, 0, 0, 1,
0, 0, 0, 0, 1,
0, 0, 0, 0, 1,
-1, 0, 0, 0, 1)
self.image = renpy.display.im.MatrixColor(image, matrix)
if ramp is not None:
ramplen = len(ramp)
# The length of the ramp.
self.ramplen = max(ramplen, 1)
def visit(self):
return super(ImageDissolve, self).visit() + [ self.image ]
def render(self, width, height, st, at):
if renpy.game.less_updates or renpy.display.less_imagedissolve:
return null_render(self, width, height, st, at)
if st >= self.delay:
self.events = True
return render(self.new_widget, width, height, st, at)
image = render(self.image, width, height, st, at)
bottom = render(self.old_widget, width, height, st, at)
top = render(self.new_widget, width, height, st, at)
width = min(bottom.width, top.width, image.width)
height = min(bottom.height, top.height, image.height)
rv = renpy.display.render.Render(width, height, opaque=not (self.alpha or renpy.config.dissolve_force_alpha))
complete = st / self.delay
if self.time_warp is not None:
complete = self.time_warp(complete)
rv.operation = renpy.display.render.IMAGEDISSOLVE
rv.operation_alpha = self.alpha or renpy.config.dissolve_force_alpha
rv.operation_complete = complete
rv.operation_parameter = self.ramplen
if renpy.display.render.models:
target = rv.get_size()
if image.get_size() != target:
image = image.subsurface((0, 0, width, height))
if top.get_size() != target:
top = top.subsurface((0, 0, width, height))
if bottom.get_size() != target:
bottom = bottom.subsurface((0, 0, width, height))
ramp = self.ramplen
# Prevent a DBZ if the user gives us a 0 ramp.
if ramp < 1:
ramp = 1
# Compute the offset to apply to the alpha.
start = -1.0
end = ramp / 256.0
offset = start + (end - start) * complete
rv.mesh = True
rv.add_shader("renpy.imagedissolve",)
rv.add_uniform("u_renpy_dissolve_offset", offset)
rv.add_uniform("u_renpy_dissolve_multiplier", 256.0 / ramp)
rv.add_property("mipmap", renpy.config.mipmap_dissolves if (self.style.mipmap is None) else self.style.mipmap)
rv.blit(image, (0, 0), focus=False, main=False)
rv.blit(bottom, (0, 0), focus=False, main=False)
rv.blit(top, (0, 0), focus=True, main=True)
renpy.display.render.redraw(self, 0)
return rv
class AlphaDissolve(Transition):
"""
:doc: transition function
:args: (control, delay=0.0, alpha=False, reverse=False)
Returns a transition that uses a control displayable (almost always some
sort of animated transform) to transition from one screen to another. The
transform is evaluated. The new screen is used where the transform is
opaque, and the old image is used when it is transparent.
`control`
The control transform.
`delay`
The time the transition takes, before ending.
`alpha`
Ignored.
`reverse`
If true, the alpha channel is reversed. Opaque areas are taken
from the old image, while transparent areas are taken from the
new image.
"""
mipmap = None
def __init__(
self,
control,
delay=0.0,
old_widget=None,
new_widget=None,
alpha=False,
reverse=False,
**properties):
super(AlphaDissolve, self).__init__(delay, **properties)
self.control = renpy.display.layout.Fixed()
self.control.add(control)
self.old_widget = renpy.easy.displayable(old_widget)
self.new_widget = renpy.easy.displayable(new_widget)
self.events = False
self.alpha = alpha
self.reverse = reverse
def visit(self):
return super(AlphaDissolve, self).visit() + [ self.control ]
def render(self, width, height, st, at):
if renpy.game.less_updates or renpy.display.less_imagedissolve:
return null_render(self, width, height, st, at)
if st >= self.delay:
self.events = True
bottom = render(self.old_widget, width, height, st, at)
top = render(self.new_widget, width, height, st, at)
width = min(bottom.width, top.width)
height = min(bottom.height, top.height)
control = render(self.control, width, height, st, at)
rv = renpy.display.render.Render(width, height, opaque=not self.alpha)
rv.operation = renpy.display.render.IMAGEDISSOLVE
rv.operation_alpha = self.alpha or renpy.config.dissolve_force_alpha
rv.operation_complete = 256.0 / (256.0 + 256.0)
rv.operation_parameter = 256
if renpy.display.render.models:
rv.mesh = True
rv.add_shader("renpy.imagedissolve",)
rv.add_uniform("u_renpy_dissolve_offset", 0)
rv.add_uniform("u_renpy_dissolve_multiplier", 1.0)
rv.add_property("mipmap", renpy.config.mipmap_dissolves if (self.style.mipmap is None) else self.style.mipmap)
rv.blit(control, (0, 0), focus=False, main=False)
if not self.reverse:
rv.blit(bottom, (0, 0), focus=False, main=False)
rv.blit(top, (0, 0), focus=True, main=True)
else:
rv.blit(top, (0, 0), focus=True, main=True)
rv.blit(bottom, (0, 0), focus=False, main=False)
return rv
class CropMove(Transition):
"""
:doc: transition function
:args: (time, mode="slideright", startcrop=(0.0, 0.0, 0.0, 1.0), startpos=(0.0, 0.0), endcrop=(0.0, 0.0, 1.0, 1.0), endpos=(0.0, 0.0), topnew=True)
:name: CropMove
Returns a transition that works by cropping a scene and positioning it on the
screen. This can be used to implement a variety of effects, all of which
involve changing rectangular slices of scenes.
`time`
The time the transition takes.
`mode`
The name of the mode of the transition. There are three groups
of modes: wipes, slides, and other. This can also be "custom",
to allow a custom mode to be defined.
In a wipe, the image stays fixed, and more of it is revealed as
the transition progresses. For example, in "wiperight", a wipe from left to right, first the left edge of the image is
revealed at the left edge of the screen, then the center of the image,
and finally the right side of the image at the right of the screen.
Other supported wipes are "wipeleft", "wipedown", and "wipeup".
In a slide, the image moves. So in a "slideright", the right edge of the
image starts at the left edge of the screen, and moves to the right
as the transition progresses. Other slides are "slideleft", "slidedown",
and "slideup".
There are also slideaways, in which the old image moves on top of
the new image. Slideaways include "slideawayright", "slideawayleft",
"slideawayup", and "slideawaydown".
We also support a rectangular iris in with "irisin" and a
rectangular iris out with "irisout".
The following parameters are only respected if the mode is "custom". Positions
are relative to the size of the screen, while the crops are relative to the
size of the image. So a crop of (0.25, 0.0, 0.5, 1.0) takes the middle
half of an image.
`startcrop`
The starting rectangle that is cropped out of the
top image. A 4-element tuple containing x, y, width, and height.
`startpos`
The starting place that the top image is drawn
to the screen at, a 2-element tuple containing x and y.
`endcrop`
The ending rectangle that is cropped out of the
top image. A 4-element tuple containing x, y, width, and height.
`endpos`
The ending place that the top image is drawn
to the screen at, a 2-element tuple containing x and y.
`topnew`
If true, the scene that is cropped and moved (and is on top of
the other scene) is the new scene. If false, it is the old scene.
::
define wiperight = CropMove(1.0, "wiperight")
define wipeleft = CropMove(1.0, "wipeleft")
define wipeup = CropMove(1.0, "wipeup")
define wipedown = CropMove(1.0, "wipedown")
define slideright = CropMove(1.0, "slideright")
define slideleft = CropMove(1.0, "slideleft")
define slideup = CropMove(1.0, "slideup")
define | |
self.tr("Read Ligand"), '',
# self.tr("PDBQT Files (*.pdbqt);; All files (*)"))
# return filename.encode('ascii', 'replace')
#def getReceptorMapsFilename(self):
# filename, selfilter = QtGui.QFileDialog().getOpenFileName(
# self, self.tr("Read Receptor Maps"), '',
# self.tr("zip Files (*.zip);; All files (*)"))
# return filename.encode('ascii', 'replace')
def getLigand(self, filename):
if os.path.exists(filename):
self.ligandEntryWidget.setStyleSheet("background-color: None")
mol = Read(filename.encode('ascii', 'replace'))
self.setLigand(mol)
self.checkReady()
if self.unzippedMapsFolder is not None:
self.makeScorer()
else:
self.ligandEntryWidget.setStyleSheet("background-color: #F14D81")
def setLigand(self, mol):
if self.dockedLigand:
self.pmvViewer.pmv.deleteMolecule(self.dockedLigand)
self.dockedLigand = mol
atoms = mol.select()
d1 = getAtomIndicesPerType(atoms)
self.rmsdCalc = HungarianMatchingRMSD_prody(atoms.getCoords(), d1, d1)
if self.pmvViewer:
pmv = self.pmvViewer.pmv
pmv.addMolecule(mol)
pmv.customColor(mol.select('element C'), [(0.,1.,1.)], geomsToColor=['lines'])
#pmv.displaySticksAndBalls(mol)
if len(pmv.Mols)==1:
self.pmvViewer.Reset_cb()
self.pmvViewer.Normalize_cb()
self.pmvViewer.Center_cb()
def setGridVisible(self, value):
# value is 0 for unchecked and 2 for checked for checkbox
# not(value==0) make it work for 0, 1, 2, False, True
self.boxGeom.master.Set(visible = not(value==0))
for c in self.boxGeom.master.children:
if c.name=='faces':
c.Set(visible = 0)
else:
c.Set(visible = not(value==0))
def getMaps(self, filename):
if os.path.exists(filename):
from ADFR.utils.maps import MapsFile
self.mf = mf = MapsFile(filename)
mf.unzipMaps()
self.unzippedMapsFolder = unzippedMapsFolder = mf.getMapsFolder()
receptorFilename = os.path.join(mf.getMapsFolder(),
mf.getReceptorFilename())
flexRes = mf.getFlexResStr()
#flexResStr = mf.getFlexResStr()
#from ADFR.utils.maps import flexResStr2flexRes
#flexRes = flexResStr2flexRes(flexResStr)
covalentRec = mf.getCovalentBond()
if covalentRec is not None:
covalentRec.insert(
0, int(mf.getCovalentBondTorsionAtom().split()[1][1:-1]))
self.mapsFilename = filename
self.checkReady()
if self.receptor and self.pmvViewer:
self.pmvViewer.pmv.deleteMolecule([self.receptor])
self.receptor = Read(receptorFilename)
#if self.dockedLigand is not None:
# self.makeScorer(flexRes=flexRes)
if self.pmvViewer:
self.pmvViewer.pmv.addMolecule(self.receptor)
from DejaVu2.Box import NiceBox
b = self.boxGeom = NiceBox('gridOutline')
b.setCenter(*mf.getBoxCenter())
b.setSides(*mf.getBoxSize())
self.boxGeom.addToViewer(self.pmvViewer)
self.setGridVisible(True)
## from DejaVu2.Points import Points
## self.TPoints = Points(
## 'tpoints', visible=1, inheritMaterial=False,
## materials=[(1,0,0)], inheritPointWidth=False,
## pointWidth=4.)
## self.pmvViewer.AddObject(self.TPoints)
#from DejaVu2.Spheres import Spheres
#self.anchorAtomGeom = Spheres('rootAtom', visible=0, inheritMaterial=False,
# materials=[(1,0,1)], inheritFrontPolyMode=False,
# frontPolyMode='line', quality=2,
# inheritLineWidth=0, lineWidth=1)
#self.pmvViewer.AddObject(self.anchorAtomGeom)
def setOutput(self, text):
self.outputFilename = text.encode('ascii', 'replace')
self.checkReady()
def gaStart_cb(self, jobNum, logFile):
#print 'in main Start', jobNum, logFile, percent
self._jobStatus[jobNum] = 1
self.gaRunsMap.setJobs(self._jobStatus)
self.gaRunsMap.update()
def getPoseData(self, logFile):
f = open(logFile)
lines = f.readlines()
f.close()
w1 = lines[-3].split()
w2 = lines[-2].split()
return float(w1[2]), float(w1[4]),{
'RRL': float(w2[1][:-1]), 'FRFR': float(w2[3][:-1]),
'RRFR': float(w2[5][:-1]), 'wRR': float(w2[7][:-1]),
'LL': float(w2[9][:-1]), 'FRL': float(w2[11][:-1])}
def updateBestLabels(self, jobNum, score, rmsdRef, energies):
self.bestScoreLabel.setText('job: %d score: %.3f'%(jobNum+1, score))
if energies['FRFR'] != 0.0:
lab = "LL: %.3f, RL: %.3f, 'FRL: %.3f, FRFR: %.3f, RRFR: %.3f"%(energies['LL'], energies['RRL'], energies['FRL'], energies['FRFR'], energies['RRFR'])
else:
lab = "LL: %.3f, RL: %.3f"%(energies['LL'], energies['RRL'])
self.bestScoreEnergyLabel.setText(lab)
self.rmsdCalc.setRefCoords(self.dockedLigand._ag._coords[self.best_score_jobnum])
rmsdBest = self.rmsdCalc.computeRMSD(self.dockedLigand._ag._coords[jobNum])
self.rmsdsLabel.setText('ref: %.3f solution: %.3f'%(rmsdRef, rmsdBest))
def gaDone_cb(self, jobNum, logFile, percent, status, error):
#print 'in main end', jobNum, logFile, percent, status, error
if status=='OK':
self._jobStatus[jobNum] = 2
self.gaRunsMap.setJobs(self._jobStatus)
self.gaRunsMap.update()
score, rmsdRef, energies = self.getPoseData(logFile)
self._scores[jobNum] = score
self._rmsdsRef[jobNum] = rmsdRef
self._energies[jobNum] = energies
# get pose coordinates
ligandFilename = '%s_%04d_lig.pdbqt'%(self.outputNameWidget.text(), jobNum)
lig = Read(ligandFilename)
ag = self.dockedLigand._ag
ag.setACSIndex(jobNum)
ag.setCoords(lig._ag.getCoords())
# get ligand genes
f = open(ligandFilename)
lines = f.readlines()
f.close()
ln = 3
words = lines[ln].split()
if words[1]=='GENES':
nbGenesLines = int(words[2])
genes = []
for i in range(nbGenesLines):
words = lines[ln+1+i].split('|==|')
genes.extend([float(x) for x in words[1].split()])
self._genes[jobNum] = genes
else:
print 'ERROR: GENES not found', lines[0]
if score < self.best_score:
if self.pmvViewer:
self.dockedLigand.geomContainer.allCoords[:] = lig._ag.getCoords()
self.pmvViewer.pmv.displayLines(self.dockedLigand)
self.best_score = score
self.best_score_jobnum = jobNum
self.best_score_rmsdRef = rmsdRef
self.best_score_energies = energies
self.updateBestLabels(jobNum, score, rmsdRef, energies)
elif status=='FAILED':
#b.setStyleSheet("background-color: red")
self._jobStatus[jobNum] = 3
self.gaRunsMap.setJobs(self._jobStatus)
self.gaRunsMap.update()
print 'ERROR', error
## cluster solutions
#order = numpy.argsort([x for x in self._scores if x is not None])
order = []
scores = [] # list of scores from the self._scores for jobs that have completed
#build scores list and list of indices of solutions to be clustered
for i, sc in enumerate(self._scores):
if sc is not None:
order.append(i) # because solution coords start at self.dockedLigand._ag._coords[1]
scores.append(sc)
# make sure the 'order' list is sorted by score
oorder = numpy.argsort(scores)
order = numpy.array(order)[oorder]
if len(order)>1:
# cluster all solutions
#print 'ORDER', order
#print 'scores', self._scores
self.clusters = clusterPoses(self.dockedLigand._ag._coords, order,
self.rmsdCalc, cutOff=2.0)
#print 'clusters', self.clusters
#for i, c in enumerate(self.clusters):
# print i, c, [self._scores[j] for j in c]
self.gaRunsMap.setJobs(self._jobStatus)
# bin scores in each cluster
## eBinWidth = 0.5
## minE = min(scores)
## maxE = max(scores)
## nBins = int(ceil((maxE-minE)/eBinWidth))
## #print 'NBINS', nBins, maxE, minE, eBinWidth
## #print 'energies', min(self._scores), max(self._scores)
## histo = [None]* len(self.clusters)
## for cnum, cl in enumerate(self.clusters):
## count = [0]*nBins
## for solInd in cl:
## count[int((self._scores[solInd]-minE)/eBinWidth)] += 1
## histo[cnum] = count
#print 'HISTO', histo
self.clustersWidget.setClusters(self.clusters, self._scores)
self.clustersWidget.update()
if percent==1.0:
self.dockButton.setText('dock')
if len(order)>1:
self.setNbCusterButtons(len(self.clusters))
self.setPose(self.best_score_jobnum)
def setPose(self, i):
if self.dockButton.text() == 'stop': return
if self.pmvViewer:
self.dockedLigand.geomContainer.allCoords[:] = self.dockedLigand._ag._coords[i]
self.pmvViewer.pmv.displayLines(self.dockedLigand)
self.updateBestLabels(i, self._scores[i], self._rmsdsRef[i], self._energies[i])
self._ind.setGenes(self._genes[i])
_score = self._ind.score()
#print 'POSE', i, _score,
from ADFR.utils.analyze import getHBPairs, addHBlines
atoms = self._adfr.ligandFT.mol.select()
#hbPairs, hbEne = getHBPairs(self._ind, atoms, cutOffEne=-0.001)
#if len(hbPairs):
# geoms = addHBlines(self.pmvViewer, hbPairs, hbEne, atoms.getCoords())
#import pdb; pdb.set_trace()
self.detailsWidget.fillTable(self._ind, self._adfr, self.dockedLigand._ag._coords[i])
def setNbGA(self, num):
self._jobStatus = [0]*num
self.gaRunsMap.setJobs(self._jobStatus)
self.gaRunsMap.update()
self.setNbCusterButtons(0)
self.clustersWidget.setMaxBarHeight(num)
self.clustersWidget.setClusters(None, None)
self.clustersWidget.update()
def setNbCusterButtons(self, num):
for b in self.clButtons:
self.clButtonsLayout.removeWidget(b)
b.setParent(None)
b.deleteLater()
self.clButtons = []
n = 0
nbPerRow = 3
for i in range(num):
self.rmsdCalc.setRefCoords(self.dockedLigand._ag._coords[self.best_score_jobnum])
rmsdBest = self.rmsdCalc.computeRMSD(self.dockedLigand._ag._coords[self.clusters[i][0]])
w = QtGui.QPushButton("%d (%.2f)"%(n+1,rmsdBest))
w.setFixedSize(QtCore.QSize(50, 15))
self.clButtons.append(w)
cb = CallbackFunction(self.setPose, self.clusters[i][0])
w.clicked.connect(cb)
self.clButtonsLayout.addWidget(w, n/nbPerRow, n-nbPerRow*(n/nbPerRow))
n += 1
self.clButtonsLayout.update()
#import pdb; pdb.set_trace()
def runDocking(self, inThread=True):
# reset buttons to default color
#for b in self.buttons:
# b.setStyleSheet("background-color: None")
# delete the cluster buttons
self.setNbCusterButtons(0)
self.best_score = 9999999999.
self.best_score_jobnum = -1
self.best_score_rmsd = -1
self.best_score_energies = {}
nbGA = self.gaNbWidget.value()
self._scores = [None]*nbGA
self._genes = [None]*nbGA
self._rmsdsRef = [None]*nbGA
self._energies = [None]*nbGA
# makes sure we have enough coord sets to store poses
# first job is ni coordinate set 1 NOT 0
ag = self.dockedLigand._ag
coords = ag.getCoords()
if ag.numCoordsets() < nbGA:
for i in range(ag.numCoordsets(), nbGA):
self.dockedLigand._ag.addCoordset(coords, 'pose %d'%(i))
self.bestScoreLabel.setText('job: %d score: %.3f'%(-1, 0.))
self.bestScoreEnergyLabel.setText("")
self.rmsdsLabel.setText('ref: %.3f solution: %.3f'%(-1, -1))
args = [None, self.ligandEntryWidget.text().encode('ascii', 'replace'),
'--target', '"%s"'%self.mapsEntryWidget.text().encode('ascii', 'replace'),
'--jobName', '"%s"'%self.outputNameWidget.text(),
'--maxCores', str(self.coreNbWidget.value()),
'-o', '"%s"'%self.outputFilename,
'-O',
'--nbRuns', str(nbGA),
'--maxEvals', str(self.maxEvalsWidget.value()),
] # first agument is ignored
refLig = self.refLigWidget.text()
if refLig:
args.append('-r')
args.append(refLig)
#print args
#print ' '.join(args[1:])
self.dockButton.setText('stop')
#self.dockButton.setDisabled(True)
nga = self.gaNbWidget.value()
self._jobStatus = [0]*nga
self.gaRunsMap.setJobs(self._jobStatus)
self.gaRunsMap.update()
#self.clustersWidget.setMaxBarHeight(nga)
self.clustersWidget.setMaxBarHeight(1)
self.clustersWidget.setClusters(None, None)
self.clustersWidget.update()
gaThread = runGAThread(nga)
gaThread.startGASignal.connect(self.gaStart_cb,
QtCore.Qt.QueuedConnection)
gaThread.endGASignal.connect(self.gaDone_cb,
QtCore.Qt.QueuedConnection)
if inThread:
thread.start_new_thread( gaThread.run, (args,) )
else:
gaThread.run(args)
def buildUI(self):
layout = QtGui.QVBoxLayout()
grp1 = QtGui.QGroupBox("input")
formLayout = QtGui.QFormLayout()
w = self.ligandEntryWidget = MyQLineEdit("Read Ligand", "PDBQT Files (*.pdbqt);; All files (*)")
#w.textChanged.connect(self.checkReady)
w.textChanged.connect(self.getLigand)
formLayout.addRow(self.tr("ligand:"), self.ligandEntryWidget)
w = self.mapsEntryWidget = QtGui.QLineEdit()
#w.textChanged.connect(self.checkReady)
w.textChanged.connect(self.getMaps)
formLayout.addRow(self.tr("target:"), self.mapsEntryWidget)
w = self.refLigWidget = QtGui.QLineEdit()
formLayout.addRow(self.tr("reference ligand:"), self.refLigWidget)
grp1.setLayout(formLayout)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
grp1.setSizePolicy(sizePolicy)
layout.addWidget(grp1)
#ret = layout.setStretch(grp1, 1)
#print 'FUGU', layout.stretch(0)
#print 'FUGU1', layout.stretch(1)
grp2 = QtGui.QGroupBox("parameters")
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
grp2.setSizePolicy(sizePolicy)
formLayout = QtGui.QFormLayout()
self.outputNameWidget = QtGui.QLineEdit()
self.outputNameWidget.textChanged.connect(self.setOutput)
formLayout.addRow(self.tr("output name:"), self.outputNameWidget)
import multiprocessing
ncpu = multiprocessing.cpu_count()
w = self.coreNbWidget = QtGui.QSpinBox()
w.setValue(ncpu-1)
w.setRange(1, ncpu)
formLayout.addRow(self.tr("cores:"), self.coreNbWidget)
w = self.gaNbWidget = QtGui.QSpinBox()
w.setValue(50)
w.setMinimum(1)
w.setMaximum(999999)
w.valueChanged.connect(self.setNbGA)
formLayout.addRow(self.tr("GA runs:"), self.gaNbWidget)
grp2.setLayout(formLayout)
w = self.maxEvalsWidget = QtGui.QSpinBox()
w.setMinimum(1)
w.setMaximum(99999999)
w.setValue(5000000)
formLayout.addRow(self.tr("max. evals.:"), self.maxEvalsWidget)
grp2.setLayout(formLayout)
layout.addWidget(grp2)
w = self.minimizeButton = QtGui.QPushButton('minimize')
w.clicked.connect(self.minimize)
layout.addWidget(w)
grp3 = QtGui.QGroupBox("run")
gLayout = QtGui.QVBoxLayout()
w = self.dockButton = QtGui.QPushButton('dock')
w.setDisabled(True)
gLayout.addWidget(w)
self.dockButton.clicked.connect(self.runDocking)
w = self.gaRunsMap = GARunsMap(self.dockButton)
gLayout.addWidget(w)
bestForm = QtGui.QFormLayout()
self.bestScoreLabel = QtGui.QLabel('None')
bestForm.addRow(self.tr("solution:"), self.bestScoreLabel)
self.bestScoreEnergyLabel = QtGui.QLabel('-1')
bestForm.addRow(self.tr("energies:"), self.bestScoreEnergyLabel)
self.rmsdsLabel = QtGui.QLabel('None')
bestForm.addRow(self.tr("RMSD:"), self.rmsdsLabel)
gLayout.addLayout(bestForm)
# add cluster histogram
self.clustersWidget = w = DockingClustersStackedHistograms()
# add color legend
hlayout = QtGui.QHBoxLayout()
colors = self.clustersWidget.colorsRGB
for i in range(len(colors)):
if i==len(colors)-1:
l1 = QtGui.QLabel(">%dKcal"%(i+1))
else:
l1 = QtGui.QLabel("%dKcal"%(i+1))
l1.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)
l1.setFrameStyle(QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain)
qcol = QtGui.QColor(*colors[i])
l1.setStyleSheet("background-color: %s"%qcol.name())
l1.setMinimumSize(QtCore.QSize(30, 15))
hlayout.addWidget(l1)
gLayout.addLayout(hlayout)
gLayout.addWidget(w)
self.clButtons = []
self.clButtonsLayout = QtGui.QGridLayout()
gLayout.addLayout(self.clButtonsLayout)
grp3.setLayout(gLayout)
layout.addWidget(grp3)
self.setLayout(layout)
class SingleDockingDetailsWidget(QtGui.QWidget):
def __init__(self, PmvViewer, parent=None):
super(SingleDockingDetailsWidget, self).__init__(parent)
self.PmvViewer = PmvViewer
self.buildUI(parent)
from DejaVu2.Spheres import Spheres
self.LRSpheres = Spheres('Ligand-Receptor grid interactions', visible=False,
inheritMaterial=False, transparent=True, opacity=0.4)
PmvViewer.AddObject(self.LRSpheres)
def buildUI(self, parent):
self.tabWidget = QtGui.QTabWidget(parent)
w = self.interactionsTableWidget = QtGui.QTableWidget(parent)
w.setColumnCount(6)
w.setHorizontalHeaderLabels(
["name", "element", "energy", "x", "y", "z"])
w.itemSelectionChanged.connect(self.onSelectLR)
self.recTableWidget = QtGui.QTableWidget(parent)
w = self.ligTableWidget = QtGui.QTableWidget(parent)
w.setColumnCount(8)
w.setHorizontalHeaderLabels(
["non-bond", | |
<filename>soco_cli/utils.py<gh_stars>10-100
"""Common utilities used across multiple modules."""
import datetime
import logging
import os
import pickle
import signal
try:
import readline
except ImportError:
pass
import sys
from collections.abc import Sequence
from platform import python_version
from time import sleep
import soco # type: ignore
from soco_cli.__init__ import __version__ # type: ignore
from soco_cli.match_speaker_names import speaker_name_matches
from soco_cli.speakers import Speakers
def event_unsubscribe(sub):
"""Unsubscribe from events, with a try/catch wrapper, and a pause
introduced to yield the thread."""
logging.info("Unsubscribing '{}'".format(sub))
try:
sleep(0.2)
sub.unsubscribe()
except Exception as e:
logging.info("Failed to unsubscribe: {}".format(e))
logging.info("Unsubscribed")
INTERACTIVE = False
API = False
SINGLE_KEYSTROKE = False
def set_interactive():
global INTERACTIVE
INTERACTIVE = True
def set_api():
global API
API = True
def set_single_keystroke(sk):
global SINGLE_KEYSTROKE
SINGLE_KEYSTROKE = sk
# Error handling
def error_report(msg):
# Print to stderr
print("Error:", msg, file=sys.stderr, flush=True)
# Use os._exit() to avoid the catch-all 'except'
if not (INTERACTIVE or API):
logging.info("Exiting program using os._exit(1)")
os._exit(1)
def parameter_type_error(action, required_params):
msg = "Action '{}' takes parameter(s): {}".format(action, required_params)
error_report(msg)
def parameter_number_error(action, parameter_number):
msg = "Action '{}' takes {} parameter(s)".format(action, parameter_number)
error_report(msg)
# Parameter count checking
def zero_parameters(f):
def wrapper(*args, **kwargs):
if len(args[2]) != 0:
parameter_number_error(args[1], "no")
return False
return f(*args, **kwargs)
return wrapper
def one_parameter(f):
def wrapper(*args, **kwargs):
if len(args[2]) != 1:
parameter_number_error(args[1], "1")
return False
return f(*args, **kwargs)
return wrapper
def zero_or_one_parameter(f):
def wrapper(*args, **kwargs):
if len(args[2]) not in [0, 1]:
parameter_number_error(args[1], "0 or 1")
return False
return f(*args, **kwargs)
return wrapper
def one_or_two_parameters(f):
def wrapper(*args, **kwargs):
if len(args[2]) not in [1, 2]:
parameter_number_error(args[1], "1 or 2")
return False
return f(*args, **kwargs)
return wrapper
def two_parameters(f):
def wrapper(*args, **kwargs):
if len(args[2]) != 2:
parameter_number_error(args[1], "2")
return False
return f(*args, **kwargs)
return wrapper
def zero_one_or_two_parameters(f):
def wrapper(*args, **kwargs):
if len(args[2]) > 2:
parameter_number_error(args[1], "zero, one or two")
return False
return f(*args, **kwargs)
return wrapper
def one_or_more_parameters(f):
def wrapper(*args, **kwargs):
if len(args[2]) < 1:
parameter_number_error(args[1], "1 or more")
return False
return f(*args, **kwargs)
return wrapper
# Time manipulation
def seconds_until(time_str):
# target_time = datetime.time.fromisoformat(time_str)
target_time = create_time_from_str(time_str)
now_time = datetime.datetime.now().time()
delta_target = datetime.timedelta(
hours=target_time.hour, minutes=target_time.minute, seconds=target_time.second
)
delta_now = datetime.timedelta(
hours=now_time.hour, minutes=now_time.minute, seconds=now_time.second
)
diff = int((delta_target - delta_now).total_seconds())
# Ensure 'past' times are treated as future times by adding 24hr
return diff if diff > 0 else diff + 24 * 60 * 60
def create_time_from_str(time_str):
"""Process times in HH:MM(:SS) format. Return a 'time' object."""
if ":" not in time_str:
raise ValueError
parts = time_str.split(":")
if len(parts) not in [2, 3]:
raise ValueError
hours = int(parts[0])
minutes = int(parts[1])
if len(parts) == 3:
seconds = int(parts[2])
else:
seconds = 0
# Accept time strings from 00:00:00 to 23:59:59
if 0 <= hours <= 23 and 0 <= minutes <= 59 and 0 <= seconds <= 59:
return datetime.time(hour=hours, minute=minutes, second=seconds)
raise ValueError
def convert_to_seconds(time_str):
"""Convert a time string to seconds.
time_str can be one of Nh, Nm or Ns, or of the form HH:MM:SS
:raises ValueError
"""
logging.info("Converting '{}' to a number of seconds".format(time_str))
time_str = time_str.lower()
try:
if ":" in time_str: # Assume form is HH:MM:SS or HH:MM
parts = time_str.split(":")
if len(parts) == 3: # HH:MM:SS
td = datetime.timedelta(
hours=int(parts[0]), minutes=int(parts[1]), seconds=int(parts[2])
)
else: # HH:MM
td = datetime.timedelta(hours=int(parts[0]), minutes=int(parts[1]))
return td.seconds
if time_str.endswith("s"): # Seconds (explicit)
duration = float(time_str[:-1])
elif time_str.endswith("m"): # Minutes
duration = float(time_str[:-1]) * 60
elif time_str.endswith("h"): # Hours
duration = float(time_str[:-1]) * 60 * 60
else: # Seconds (default)
duration = float(time_str)
return duration
except:
raise ValueError
# Miscellaneous
def convert_true_false(true_or_false, conversion="YesOrNo"):
if conversion == "YesOrNo":
return "Yes" if true_or_false is True else "No"
if conversion == "onoroff":
return "on" if true_or_false is True else "off"
return None
def version():
print("soco-cli version: {}".format(__version__), flush=True)
print("soco version: {}".format(soco.__version__), flush=True)
print("python version: {}".format(python_version()), flush=True)
def docs():
version = "v{}".format(__version__)
if __version__.endswith("+"):
url = "https://github.com/avantrec/soco-cli/blob/next_version/README.md"
else:
url = "https://github.com/avantrec/soco-cli/blob/{}/README.md".format(version)
print("Online documentation for {}: {}".format(version, url), flush=True)
def logo():
version = "v{}".format(__version__)
if __version__.endswith("+"):
url = "https://raw.githubusercontent.com/avantrec/soco-cli/next_version/assets/soco-cli-logo-01-large.png"
else:
url = "https://raw.githubusercontent.com/avantrec/soco-cli/{}/assets/soco-cli-logo-01-large.png".format(
version
)
print("SoCo-CLI Logo: {}".format(url), flush=True)
# Suspend signal handling processing for 'exec' in interactive shell
suspend_sighandling = False
def set_suspend_sighandling(suspend=True):
global suspend_sighandling
logging.info("Setting 'suspend_sighandling' to '{}'".format(suspend))
suspend_sighandling = suspend
# Stop a stream if playing a local file
speaker_playing_local_file = None
def set_speaker_playing_local_file(speaker):
global speaker_playing_local_file
if speaker:
logging.info(
"Setting speaker playing local file to '{}'".format(speaker.player_name)
)
else:
logging.info("No speaker playing local file")
speaker_playing_local_file = speaker
def sig_handler(signal_received, frame):
logging.info("Caught signal: {}".format(signal_received))
if suspend_sighandling:
logging.info("Signal handling suspended ... ignoring")
return
# Restore stdout and stderr ... these have been redirected if
# api.run_command() was used
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# Prevent SIGINT (CTRL-C) exit: untidy exit from readline can leave
# some terminals in a broken state
if signal_received == signal.SIGINT:
if SINGLE_KEYSTROKE:
logging.info("SINGLE_KEYSTROKE set ... preventing exit")
print("\nPlease use 'x' to exit >> ", end="", flush=True)
return
if INTERACTIVE:
logging.info("INTERACTIVE set ... preventing exit")
print("\nPlease use 'exit' to terminate the shell > ", end="", flush=True)
if os.name == "nt":
print(flush=True)
return
# Allow SIGTERM termination, but issue warning if interactive
if signal_received == signal.SIGTERM and INTERACTIVE:
print("\nSoCo-CLI process terminating ...", flush=True)
print(
"This can leave some terminals in a misconfigured state.",
flush=True,
)
if speaker_playing_local_file:
logging.info(
"Speaker '{}': 'play_file' active ... stopping".format(
speaker_playing_local_file.player_name
)
)
speaker_playing_local_file.stop()
logging.info("Unsubscribing from event notifications")
unsub_all_remembered_event_subs()
logging.info("Exiting program using 'os._exit(0)'")
print("", flush=True)
os._exit(0)
class RewindableList(Sequence):
"""This is a just-enough-implementation class to provide a list
that can be rewound during iteration.
"""
def __init__(self, items=[]):
self._items = items
self._index = 0
def __iter__(self):
self.rewind()
return self
def __getitem__(self, item):
return self._items[item]
def __len__(self):
return len(self._items)
def __next__(self):
if self._index < len(self._items):
item = self._items[self._index]
self._index += 1
return item
raise StopIteration
def rewind(self):
self._index = 0
def rewind_to(self, index):
if len(self._items) == 0 and index == 0:
self._index = 0
elif 0 <= index < len(self._items):
self._index = index
else:
raise IndexError
def __str__(self):
return str(self._items)
def index(self):
return self._index
def insert(self, index, element):
self._items.insert(index, element)
if index <= self._index:
self._index += 1
def pop_next(self):
item = self._items.pop(0)
if self._index != 0:
self._index -= 1
return item
# Set up logging
def configure_logging(log_level: str) -> None:
log_level = log_level.lower()
if log_level == "none":
# Disables all logging (i.e., CRITICAL and below)
logging.disable(logging.CRITICAL)
else:
log_format = (
"%(asctime)s %(filename)s:%(lineno)s - %(funcName)s() - %(message)s"
)
if log_level == "debug":
logging.basicConfig(format=log_format, level=logging.DEBUG)
elif log_level == "info":
logging.basicConfig(format=log_format, level=logging.INFO)
elif log_level in ["warn", "warning"]:
logging.basicConfig(format=log_format, level=logging.WARNING)
elif log_level == "error":
logging.basicConfig(format=log_format, level=logging.ERROR)
elif log_level == "critical":
logging.basicConfig(format=log_format, level=logging.CRITICAL)
else:
error_report(
"--log takes one of: NONE, DEBUG, INFO, WARN(ING), ERROR, CRITICAL"
)
# Local speaker list operations
speaker_list = None
def set_speaker_list(s):
global speaker_list
speaker_list = s
class SpeakerCache:
def __init__(self, max_threads=256, scan_timeout=0.1, min_netmask=24):
# _cache contains (soco_instance, speaker_name) tuples
self._cache = set()
self._scan_done = False
self._discovery_done = False
self._max_threads = max_threads
self._scan_timeout = scan_timeout
self._min_netmask = min_netmask
@property
def exists(self):
return bool(self._cache)
def cache_speakers(self, speakers):
logging.info("Adding speakers to cache: {}".format(speakers))
for speaker in speakers:
self._cache.add((speaker, speaker.player_name))
def discover(self, reset=False):
if not self._discovery_done or reset:
# Clear the current cache
self._cache = set()
speakers = soco.discovery.discover(
allow_network_scan=True,
max_threads=self._max_threads,
scan_timeout=self._scan_timeout,
min_netmask=self._min_netmask,
)
if speakers:
self.cache_speakers(speakers)
else:
logging.info("No speakers found to cache")
self._discovery_done = True
def scan(self, reset=False, scan_timeout_override=None):
if not self._scan_done or reset:
# Clear the current cache
self._cache = set()
scan_timeout = (
scan_timeout_override if scan_timeout_override else self._scan_timeout
)
logging.info(
"Performing full discovery scan with timeout = {}s".format(scan_timeout)
)
speakers = soco.discovery.scan_network(
multi_household=True,
max_threads=self._max_threads,
scan_timeout=scan_timeout,
min_netmask=self._min_netmask,
)
if speakers:
self.cache_speakers(speakers)
self._scan_done = True
else:
logging.info("No speakers found to cache")
else:
logging.info("Full discovery scan already done, and reset not requested")
def add(self, speaker):
logging.info("Adding speaker to cache")
self._cache.add((speaker, speaker.player_name))
def find_indirect(self, name):
speakers_found = set()
speakers_found_names = set()
for cached, _ in self._cache:
for speaker in cached.visible_zones:
match, exact = speaker_name_matches(name, speaker.player_name)
if match and exact:
return speaker
if match and not exact:
speakers_found.add(speaker)
speakers_found_names.add(speaker.player_name)
if len(speakers_found) == 1:
return speakers_found.pop()
if len(speakers_found) > 1:
error_report("'{}' is ambiguous: {}".format(name, speakers_found_names))
return None
def find(self, name):
speakers_found = set()
speakers_found_names = set()
for speaker, speaker_name in self._cache:
match, exact = speaker_name_matches(name, speaker_name)
if match and exact:
return speaker
if match and not exact:
speakers_found.add(speaker)
speakers_found_names.add(speaker_name)
if len(speakers_found) == 1:
return speakers_found.pop()
if | |
<filename>pytket/pytket/qasm/qasm.py
# Copyright 2019-2021 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Output custom gates
# TODO: Figure out nice way to make these class methods of Circuit
import io
import os
from typing import (
Any,
Callable,
Dict,
List,
Optional,
TextIO,
Tuple,
Type,
TypeVar,
Union,
)
from itertools import groupby
from sympy import sympify, pi # type: ignore
from pytket import Circuit, OpType, Qubit, Bit
from pytket.circuit import ( # type: ignore
CustomGateDef,
UnitID,
BitRegister,
QubitRegister,
Op,
)
NOPARAM_COMMANDS = {
"CX": OpType.CX, # built-in gate equivalent to "cx"
"cx": OpType.CX,
"x": OpType.X,
"y": OpType.Y,
"z": OpType.Z,
"h": OpType.H,
"s": OpType.S,
"sdg": OpType.Sdg,
"t": OpType.T,
"tdg": OpType.Tdg,
"sx": OpType.SX,
"sxdg": OpType.SXdg,
"cz": OpType.CZ,
"cy": OpType.CY,
"ch": OpType.CH,
"csx": OpType.CSX,
"ccx": OpType.CCX,
"ZZ": OpType.ZZMax,
"measure": OpType.Measure,
"reset": OpType.Reset,
"id": OpType.noop,
"barrier": OpType.Barrier,
"swap": OpType.SWAP,
"cswap": OpType.CSWAP,
"ecr": OpType.ECR,
}
PARAM_COMMANDS = {
"p": OpType.U1, # alias. https://github.com/Qiskit/qiskit-terra/pull/4765
"u": OpType.U3, # alias. https://github.com/Qiskit/qiskit-terra/pull/4765
"U": OpType.U3, # built-in gate equivalent to "u3"
"u3": OpType.U3,
"u2": OpType.U2,
"u1": OpType.U1,
"rx": OpType.Rx,
"ry": OpType.Ry,
"rz": OpType.Rz,
"Rz": OpType.Rz,
"U1q": OpType.PhasedX,
"crz": OpType.CRz,
"crx": OpType.CRx,
"cry": OpType.CRy,
"cu1": OpType.CU1,
"cu3": OpType.CU3,
}
included_gates = {
"qelib1": set(
(
"CX",
"cx",
"x",
"y",
"z",
"h",
"s",
"sdg",
"t",
"tdg",
"sx",
"sxdg",
"cz",
"cy",
"ch",
"csx",
"ccx",
"measure",
"reset",
"id",
"barrier",
"p",
"U",
"u",
"u3",
"u2",
"u1",
"rx",
"ry",
"rz",
"crz",
"crx",
"cry",
"cu1",
"cu3",
"swap",
"cswap",
"ecr",
)
),
"oqc": set(
(
"sx",
"rz",
"ecr",
"barrier",
"measure",
)
),
}
included_gates["hqslib1"] = included_gates["qelib1"].copy()
included_gates["hqslib1"].update(("U1q", "rz", "ZZ"))
included_gates["hqslib1"].difference_update(
("crx", "cry", "sx", "sxdg", "csx", "swap", "cswap")
)
_tk_to_qasm_noparams = dict(((item[1], item[0]) for item in NOPARAM_COMMANDS.items()))
_tk_to_qasm_noparams[OpType.CX] = "cx" # prefer "cx" to "CX"
_tk_to_qasm_params = dict(((item[1], item[0]) for item in PARAM_COMMANDS.items()))
_tk_to_qasm_params[OpType.U3] = "u3" # prefer "u3" to "U"
_tk_to_qasm_params[OpType.Rz] = "rz" # prefer "rz" to "Rz"
_classical_gatestr_map = {"AND": "&", "OR": "|", "XOR": "^"}
class QASMUnsupportedError(Exception):
pass
class QASMParseError(Exception):
pass
class QASMParser(object):
"""Class for parsing OpenQASM files into CQC tket Circuits."""
def __init__(self) -> None:
self.circuit = Circuit()
self.gate_dict: Dict[str, CustomGateDef] = dict()
self.reg_map: Dict[str, UnitID] = dict()
self.include = ""
def parse_qasm(self, qasm: str) -> Circuit:
lines = qasm.splitlines()
rows = []
# first, get rid of comments and whitespace lines
for l in lines:
i = l.find("//")
if i != -1:
s = l[0:i].strip()
else:
s = l.strip()
if s:
rows.append(s)
# now, throw away OPENQASM descriptor etc.
if not (
rows[0].startswith("OPENQASM 2.0")
and rows[1].startswith('include "')
and rows[1].endswith('.inc";')
):
raise QASMParseError("File must declare OPENQASM version and its includes.")
self.include = rows[1][len('include "') : -len('".inc;')]
if self.include not in ("qelib1", "hqslib1"):
raise QASMParseError("Header {}.inc not recognised".format(self.include))
data = "\n".join(rows[2:])
# now, separate out the custom gates to deal with elsewhere
while True:
i = data.find("gate ")
if i == -1:
break
j = data.find("}", i)
if j == -1:
raise QASMParseError("Custom gate definition is invalid.")
self.parse_custom_gate(data[i : j + 1]) # TODO: deal with custom gate
data = data[:i] + data[j + 1 :]
# now, parse the regular instructions
instructions: List[str] = [s.strip() for s in data.split(";") if s.strip()]
for instr in instructions:
self.parse_instruction(instr, self.circuit, self.reg_map)
return self.circuit
def parse_custom_gate(self, data: str) -> None:
signature, rest = data.split("{", 1)
_, signature = signature.split(" ", 1) # ignore "gate"
if signature.find("(") != -1:
gatename, other = signature.split("(")
symbol_list, arg_list = other.split(")")
else:
gatename, arg_list = signature.split(" ", 1)
symbol_list = ""
gatename = gatename.strip()
symbols = [sympify(s.strip()) for s in symbol_list.split(",")]
args = [a.strip() for a in arg_list.split(",")]
rename_map = {}
qb_map = {}
circ = Circuit()
for i, a in enumerate(args):
circ.add_qubit(Qubit(a))
rename_map.update({Qubit(a): Qubit(i)})
qb_map[a] = [Qubit(a)]
command_block, _ = rest.split("}", 1)
commands = [c.strip() for c in command_block.split(";") if c.strip()]
for com in commands:
self.parse_instruction(com, circ, qb_map)
circ.rename_units(rename_map)
symbol_map = {sym: sym * pi for sym in symbols}
circ.symbol_substitution(symbol_map) # qasm arguments are given in radians
self.gate_dict[gatename] = CustomGateDef.define(gatename, circ, symbols)
def parse_instruction(
self, instruction: str, circuit: Circuit, reg_map: Dict[str, List[UnitID]]
) -> None:
gate_kwargs: Dict[str, Any] = {}
if instruction.find("if") == 0:
###parse condition
if_phrase, rest = instruction.split("(", 1)
if if_phrase.strip() != "if":
raise QASMParseError(
'Error in parsing: cannot match "{}" against "if"'.format(if_phrase)
)
condition, rest = rest.split(")", 1)
creg, eq_value = condition.split("==", 1)
gate_kwargs.update({"condition_bits": reg_map[creg.strip()]})
value = int(eq_value.strip())
gate_kwargs.update({"condition_value": value})
instruction = rest.strip()
if instruction.find("->") != -1:
###handle measure gates
###currently assumes that there is just 1 qb being read to 1 bit
name_and_qbs, bits = instruction.split("->", 1)
if name_and_qbs.find("measure") == -1:
raise QASMParseError(
"Error in parsing: cannot accept a non-Measure gate writing to "
"classical register"
)
name_and_qbs = name_and_qbs.replace("measure", "")
name_and_qbs = name_and_qbs.replace(" ", "")
name_and_qbs.strip()
qubits_list: List[Bit]
if "[" in name_and_qbs:
qregname, qbindex = name_and_qbs.split("[")
qbindex, _ = qbindex.split("]")
qubits_list = [Qubit(qregname, int(qbindex))]
else:
qubits_list = reg_map[name_and_qbs]
bits = bits.replace(" ", "")
bits_list: List[Bit]
if "[" in bits:
bitreg, bitindex = bits.split("[")
bitindex, _ = bitindex.split("]")
bits_list = [Bit(bitreg, int(bitindex))]
else:
bits_list = reg_map[bits]
for q, b in zip(qubits_list, bits_list):
circuit.Measure(q, b, **gate_kwargs)
return
index = _find_respecting_brackets(instruction, " ")
name = instruction[:index]
rest = instruction[index + 1 :]
args = [s.strip() for s in rest.split(",") if s.strip()]
# deal with qubit register declarations
if name == "qreg" or name == "creg":
regname, rest = args[0].split("[", 1)
regname.strip()
size = int(rest[:-1])
if name == "qreg":
dict_map = circuit.add_q_register(regname, size)
else:
dict_map = circuit.add_c_register(regname, size)
reg_map[regname] = [dict_map[i] for i in range(size)]
return
# get qubits to append operation to
qubits = []
for a in args:
if "[" in a:
regname, rest = a.split("[", 1)
val = int(rest[:-1])
qubits.append([Qubit(regname, val)])
else:
qubits.append(reg_map[a])
# if the gate is parameterised, get these parameters
if name.find("(") != -1:
name, params = name.split("(", 1)
params = params[:-1] # cut off final close bracket
angle_start = 0
angle_end = _find_respecting_brackets(params, ",")
angles = []
while angle_end != -1:
angles.append(params[angle_start:angle_end].strip())
angle_start = angle_end + 1
angle_end = _find_respecting_brackets(params, ",", angle_start)
angles.append(params[angle_start:].strip())
halfturn_angles = []
for ang in angles:
try:
halfturns = sympify(ang) / pi
halfturn_angles.append(halfturns)
except:
raise QASMParseError("Cannot parse angle: {}".format(ang))
if name in PARAM_COMMANDS:
if (
self.include != "hqslib1"
and name in included_gates["hqslib1"]
and name not in included_gates["qelib1"]
):
raise QASMParseError(
"Gate of type {} is not defined in header {}.inc".format(
name, self.include
)
)
for qbs in zip(*qubits):
circuit.add_gate(
PARAM_COMMANDS[name], halfturn_angles, list(qbs), **gate_kwargs
)
elif name in self.gate_dict:
for qbs in zip(*qubits):
circuit.add_custom_gate(
self.gate_dict[name], halfturn_angles, list(qbs), **gate_kwargs
)
else:
raise QASMParseError("Cannot parse gate of type: {}".format(name))
else:
if name == "barrier":
circuit.add_barrier([q for qbs in qubits for q in qbs])
elif name in NOPARAM_COMMANDS:
if (
self.include != "hqslib1"
and name in included_gates["hqslib1"]
and name not in included_gates["qelib1"]
):
raise QASMParseError(
"Gate of type {} is not defined in header {}.inc".format(
name, self.include
)
)
for qbs in zip(*qubits):
circuit.add_gate(
NOPARAM_COMMANDS[name], [], list(qbs), **gate_kwargs
)
elif name in self.gate_dict:
for qbs in zip(*qubits):
circuit.add_custom_gate(
self.gate_dict[name], [], list(qbs), **gate_kwargs
)
else:
raise QASMParseError("Cannot parse gate of type: {}".format(name))
def circuit_from_qasm(input_file: Union[str, "os.PathLike[Any]"]) -> Circuit:
"""A method to generate a tket Circuit from a qasm file"""
ext = os.path.splitext(input_file)[-1]
if ext != ".qasm":
raise TypeError("Can only convert .qasm files")
with open(input_file, "r") as f:
circ = circuit_from_qasm_io(f)
return circ
def circuit_from_qasm_str(qasm_str: str) -> Circuit:
"""A method to generate a tket Circuit from a qasm str"""
p = QASMParser()
return p.parse_qasm(qasm_str)
def circuit_from_qasm_io(stream_in: TextIO) -> Circuit:
"""A method to generate a tket Circuit from a qasm text stream"""
return circuit_from_qasm_str(stream_in.read())
def circuit_to_qasm(circ: Circuit, output_file: str, header: str = "qelib1") -> None:
"""A method to generate a qasm file from a tket Circuit"""
with open(output_file, "w") as out:
| |
**red**, **green** and **blue** separately.
"""
),
}
def __init__(self, std: List[Float]):
super().__init__()
self.std = std
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
std = torch.Tensor(context(self.std)).to(image.device)
noise = torch.randn(image.shape).to(image.device)
return image + noise * std.reshape(3, 1, 1)
class BlackWhiteNoise(TransformBase):
"""
Adds gray-scale noise to the image.
The noise has a scalable normal distribution around zero.
"""
NAME = "bwnoise"
IS_RANDOM = True
PARAMS = {
"std": Parameter(
float, default=None,
doc="""
Specifies the standard deviation of the noise distribution.
"""
),
}
def __init__(self, std: Float):
super().__init__()
self.std = std
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
std = context(self.std)
noise = torch.randn(image.shape[-2:]).to(image.device) * std
return image + noise.unsqueeze(0).repeat(3, 1, 1)
class ScaledNoise(TransformBase):
"""
Adds noise with a different resolution to the image.
The noise has a scalable normal distribution around zero.
"""
NAME = "rnoise"
IS_RANDOM = True
PARAMS = {
"std": SequenceParameter(
float, length=3, default=None,
doc="""
Specifies the standard deviation of the noise distribution.
One value or three values to specify **red**, **green** and **blue** separately.
"""
),
"resolution": SequenceParameter(
int, length=2, default=None,
doc="""
The resolution of the noise image. It will be
resized to the processed image.
"""
),
}
def __init__(self, std: List[Float], resolution: List[Int]):
super().__init__()
self.std = std
self.resolution = resolution
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
std = torch.Tensor(context(self.std)).to(image.device)
size = context(self.resolution)
noise = torch.randn([3, size[1], size[0]]).to(image.device)
noise = VF.resize(noise, image.shape[-2:])
return image + noise * std.reshape(3, 1, 1)
class FNoise(TransformBase):
"""
Adds noise to the image's fourier space.
It's just a bit different than the normal [noise](reference.md#targetstransformsnoise).
The noise has a scalable normal distribution around zero.
"""
NAME = "fnoise"
IS_RANDOM = True
PARAMS = {
"std": SequenceParameter(
float, length=3, default=None,
doc="""
Specifies the standard deviation of the noise distribution.
The actual value is multiplied by `15.0` to give a visually
similar distribution as the normal [noise](reference.md#targetstransformsnoise).
One value or three values to specify **red**, **green** and **blue** separately.
"""
),
}
def __init__(self, std: List[Float]):
super().__init__()
self.std = std
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
std = 15. * torch.Tensor(context(self.std)).to(image.device).reshape(3, 1)
space = fft.rfft(image.reshape(3, -1))
space.real = space.real + torch.randn(space.shape).to(image.device) * std
space.imag = space.imag + torch.randn(space.shape).to(image.device) * std
return fft.irfft(space).reshape(*image.shape)
class Edge(TransformBase):
"""
This removes everything except edges and generally has a bad effect on image
quality. It might be useful, however.
A gaussian blur is used to detect the edges:
edge = amount * abs(image - blur(image))
"""
NAME = "edge"
PARAMS = {
"kernel_size": SequenceParameter(
int, length=2, default=[3, 3],
doc="""
The size of the pixel window used for gaussian blur.
Must be an **odd**, **positive** integer.
Two numbers define **width** and **height** separately.
"""
),
"sigma": SequenceParameter(
float, length=2, null=True, default=None,
doc="""
Gaussian kernel standard deviation. The larger, the more *blurry*.
If not specified it will default to `0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8`.
Two numbers define sigma for **x** and **y** separately.
"""
),
"amount": SequenceParameter(
float, length=3, default=[1., 1., 1.],
doc="""
A multiplier for the edge value. Three numbers to specify
**red**, **green** and **blue** separately.
"""
),
}
def __init__(self, kernel_size: List[Int], sigma: List[float], amount: List[float]):
super().__init__()
self.kernel_size = kernel_size
self.sigma = sigma
self.amount = amount
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
kernel_size = context(self.kernel_size)
if self.sigma is None:
sigma = None
else:
sigma = context(self.sigma)
amount = torch.Tensor(context(self.amount)).to(image.device)
edge = VF.gaussian_blur(image, kernel_size, sigma)
edge = torch.clamp((image - edge) * amount, 0, 1)
return edge
class Rotate(TransformBase):
"""
Rotates the image.
The resolution is not changed and areas outside of the image
are filled with black (zero).
"""
NAME = "rotate"
PARAMS = {
"degree": Parameter(
float, default=None,
doc="""
The counter-clockwise angle of ration in degrees (`[0, 360]`).
"""
),
"center": SequenceParameter(
float, length=2, default=[0.5, 0.5],
doc="""
The center of rotation in the range `[0, 1]`.
Two numbers to specify **x** and **y** separately.
"""
),
}
def __init__(self, degree: Float, center: List[Float]):
super().__init__()
self.degree = degree
self.center = center
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
degree = context(self.degree)
center = context(self.center)
center_pix = [
int(center[0] * image.shape[-1]),
int(center[1] * image.shape[-2]),
]
return VF.rotate(image, degree, center=center_pix)
class RandomRotate(TransformBase):
"""
Randomly rotates the image.
Degree and center of rotation are chosen randomly between in the range
of the specified values.
The resolution is not changed and areas outside of the image
are filled with black (zero).
"""
NAME = "random_rotate"
IS_RANDOM = True
PARAMS = {
"degree": SequenceParameter(
float, length=2, default=None,
doc="""
The minimum and maximum counter-clockwise angle of ration in degrees.
"""
),
"center": SequenceParameter(
float, length=2, default=[0.5, 0.5],
doc="""
The minimum and maximum center of rotation (for x and y) in the range `[0, 1]`.
"""
),
}
def __init__(self, degree: List[Float], center: List[Float]):
super().__init__()
self.degree = degree
self.center = center
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
angle_min, angle_max = context(self.degree)
center_min, center_max = context(self.center)
angle = random.uniform(angle_min, angle_max)
center_x = random.uniform(center_min, center_max)
center_y = random.uniform(center_min, center_max)
center_pix = [
int(center_x * image.shape[-1]),
int(center_y * image.shape[-2]),
]
return VF.rotate(image, angle, center=center_pix)
class RandomScale(TransformBase):
"""
Randomly scales an image in the range specified.
See [torchvision RandomAffine](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.RandomAffine).
The resolution does not change, only contents are scaled.
Areas outside of the image are filled with black (zero).
"""
NAME = "random_scale"
PARAMS = {
"scale": SequenceParameter(
float, length=2, default=None,
doc="""
Minimum and maximum scale, where `0.5` means half and `2.0` means double.
"""
),
}
def __init__(self, scale: List[Float]):
super().__init__()
self.scale = scale
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
scale = context(self.scale)
return VT.RandomAffine(degrees=0, scale=scale, fillcolor=None)(image)
class RandomTranslate(TransformBase):
"""
Randomly translates an image in the specified range.
The resolution does not change.
Areas outside of the image are filled with black (zero).
See [torchvision RandomAffine](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.RandomAffine).
"""
NAME = "random_translate"
PARAMS = {
"offset": SequenceParameter(
float, length=2, default=None,
doc="""
Maximum absolute fraction for horizontal and vertical translations.
For example: `random_translate: a, b`, then horizontal shift is randomly sampled in
the range `-img_width * a < dx < img_width * a` and vertical shift is randomly sampled in the range
`-img_height * b < dy < img_height * b`.
"""
),
}
def __init__(self, offset: List[Float]):
super().__init__()
self.offset = offset
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
offset = context(self.offset)
return VT.RandomAffine(degrees=0, translate=offset, fillcolor=None)(image)
class Shift(TransformBase):
"""
This translates the image while wrapping the edges around.
Pixels that are moved outside get attached on the other side.
"""
NAME = "shift"
PARAMS = {
"offset": SequenceParameter(
float, length=2, default=None,
doc="""
A number **larger 1** or **smaller -1** translates by the actual pixels.
A number **between -1 and 1** translates by the fraction of the image resolution.
E.g., `shift: .5` would move the center of the image to the previous bottom-right
corner.
A single number specifies translation on both **x** and **y** axes while
two numbers specify them separately.
"""
),
}
def __init__(self, offset: List[Float]):
super().__init__()
self.offset = offset
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
x, y = context(self.offset)
return self._shift(image, x, y)
def _shift(self, image: torch.Tensor, x: Union[int, float], y: Union[int, float]) -> torch.Tensor:
if abs(x) < 1:
x = x * image.shape[-1]
if abs(y) < 1:
y = y * image.shape[-2]
x = int(x) % image.shape[-1]
y = int(y) % image.shape[-2]
if x != 0:
image = torch.cat([image[:, :, -x:], image[:, :, :-x]], -1)
if y != 0:
image = torch.cat([image[:, -y:, :], image[:, :-y, :]], -2)
return image
class RandomShift(Shift):
"""
This randomly translates the pixels of the image.
Pixels that are moved outside get attached on the other side.
"""
NAME = "random_shift"
PARAMS = {
"offset": SequenceParameter(
float, length=[2, 4], default=None,
doc="""
Specifies the random range of translation.
A number **larger | |
xm = xm - m[n_yi, :][:, np.newaxis]
c[n_yi, :, :] = np.dot(xm, xm.T) / float(ntrl_y[n_yi] - 1)
chc[n_yi, :, :] = np.linalg.cholesky(c[n_yi, :, :])
hcond[n_yi] = np.sum(np.log(np.diagonal(chc[n_yi, :, :]))) + cc * nvarx
# class weights
w = ntrl_y / float(ntrl)
# mixture entropy via unscented transform
# See:
# Huber, Bailey, Durrant-Whyte and Hanebeck
# "On entropy approximation for Gaussian mixture random vectors"
# http://dx.doi.org/10.1109/MFI.2008.4648062
# Goldberger, Gordon, Greenspan
# "An efficient image similarity measure based on approximations of
# KL-divergence between two Gaussian mixtures"
# http://dx.doi.org/10.1109/ICCV.2003.1238387
d = nvarx
ds = np.sqrt(nvarx)
hmix = 0.0
for yi in range(len(ym)):
ps = ds * chc[yi, :, :].T
thsm = m[yi, :, np.newaxis]
# unscented points for this class
usc = np.hstack([thsm + ps, thsm - ps])
# class log-likelihoods at unscented points
log_lik = np.zeros((len(ym), 2 * nvarx))
for mi in range(len(ym)):
# demean points
dx = usc - m[mi, :, np.newaxis]
# gaussian likelihood
log_lik[mi, :] = _norm_innerv(
dx, chc[mi, :, :]) - hcond[mi] + .5 * nvarx
# log mixture likelihood for these unscented points
# sum over classes, axis=0
# logmixlik = sp.misc.logsumexp(log_lik, axis=0, b=w[:, np.newaxis])
logmixlik = np.log(np.sum(w[:, np.newaxis] * np.exp(log_lik)))
# add to entropy estimate (sum over unscented points for this class)
hmix = hmix + w[yi] * logmixlik.sum()
hmix = -hmix / (2 * d)
# no bias correct
i = (hmix - np.sum(w * hcond)) / np.log(2.)
return i
def _norm_innerv(x, chc):
"""Normalised innervations."""
m = np.linalg.solve(chc, x)
w = -0.5 * (m * m).sum(axis=0)
return w
def gcmi_mixture_1d_cd(x, y):
"""Gaussian-Copula MI between a continuous and a discrete variable.
This method evaluate MI from a Gaussian mixture.
The Gaussian mixture is fit using robust measures of location (median) and
scale (median absolute deviation) for each class.
I = gcmi_mixture_cd(x,y) returns the MI between the (possibly
multidimensional).
Parameters
----------
x, y : array_like
Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs). y
must be an array of integers
Returns
-------
i : float
Information shared by x and y (in bits)
"""
x, y = np.atleast_2d(x), np.squeeze(y)
if x.ndim > 2:
raise ValueError("x must be at most 2d")
if y.ndim > 1:
raise ValueError("only univariate discrete variables supported")
if not np.issubdtype(y.dtype, np.integer):
raise ValueError("y should be an integer array")
nvarx, ntrl = x.shape
ym = np.unique(y)
if y.size != ntrl:
raise ValueError("number of trials do not match")
# copula normalise each class
# shift and rescale to match loc and scale of raw data
# this provides a robust way to fit the gaussian mixture
classdat = []
ydat = []
for yi in ym:
# class conditional data
idx = y == yi
xm = x[:, idx]
cxm = copnorm_nd(xm, axis=1)
xmmed = np.median(xm, axis=1)[:, np.newaxis]
# robust measure of s.d. under Gaussian assumption from median
# absolute deviation
xmmad = np.median(np.abs(xm - xmmed), axis=1)[:, np.newaxis]
cxmscaled = cxm * (1.482602218505602 * xmmad)
# robust measure of loc from median
cxmscaled = cxmscaled + xmmed
classdat.append(cxmscaled)
ydat.append(yi * np.ones(xm.shape[1], dtype=np.int))
cx = np.concatenate(classdat, axis=1)
newy = np.concatenate(ydat)
return mi_mixture_1d_gd(cx, newy)
def cmi_1d_ggg(x, y, z, biascorrect=True, demeaned=False):
"""Conditional MI between two Gaussian variables conditioned on a third.
I = cmi_ggg(x,y,z) returns the CMI between two (possibly multidimensional)
Gaussian variables, x and y, conditioned on a third, z, with bias
correction.
Parameters
----------
x, y, z : array_like
Gaussians arrays of shape (n_epochs,) or (n_dimensions, n_epochs).
biascorrect : bool | True
Specifies whether bias correction should be applied to the estimated MI
demeaned : bool | False
Specifies whether the input data already has zero mean (true if it has
been copula-normalized)
Returns
-------
i : float
Information shared by x and y conditioned by z (in bits)
"""
x, y, z = np.atleast_2d(x), np.atleast_2d(y), np.atleast_2d(z)
if x.ndim > 2 or y.ndim > 2 or z.ndim > 2:
raise ValueError("x, y and z must be at most 2d")
ntrl = x.shape[1]
nvarx = x.shape[0]
nvary = y.shape[0]
nvarz = z.shape[0]
nvaryz = nvary + nvarz
nvarxy = nvarx + nvary
nvarxz = nvarx + nvarz
nvarxyz = nvarx + nvaryz
if y.shape[1] != ntrl or z.shape[1] != ntrl:
raise ValueError("number of trials do not match")
# joint variable
xyz = np.vstack((x, y, z))
if not demeaned:
xyz = xyz - xyz.mean(axis=1)[:, np.newaxis]
cxyz = np.dot(xyz, xyz.T) / float(ntrl - 1)
# submatrices of joint covariance
cz = cxyz[nvarxy:, nvarxy:]
cyz = cxyz[nvarx:, nvarx:]
cxz = np.zeros((nvarxz, nvarxz))
cxz[:nvarx, :nvarx] = cxyz[:nvarx, :nvarx]
cxz[:nvarx, nvarx:] = cxyz[:nvarx, nvarxy:]
cxz[nvarx:, :nvarx] = cxyz[nvarxy:, :nvarx]
cxz[nvarx:, nvarx:] = cxyz[nvarxy:, nvarxy:]
chcz = np.linalg.cholesky(cz)
chcxz = np.linalg.cholesky(cxz)
chcyz = np.linalg.cholesky(cyz)
chcxyz = np.linalg.cholesky(cxyz)
# entropies in nats
# normalizations cancel for cmi
hz = np.sum(np.log(np.diagonal(chcz)))
hxz = np.sum(np.log(np.diagonal(chcxz)))
hyz = np.sum(np.log(np.diagonal(chcyz)))
hxyz = np.sum(np.log(np.diagonal(chcxyz)))
ln2 = np.log(2)
if biascorrect:
psiterms = sp.special.psi(
(ntrl - np.arange(1, nvarxyz + 1)).astype(np.float) / 2.) / 2.
dterm = (ln2 - np.log(ntrl - 1.)) / 2.
hz = hz - nvarz * dterm - psiterms[:nvarz].sum()
hxz = hxz - nvarxz * dterm - psiterms[:nvarxz].sum()
hyz = hyz - nvaryz * dterm - psiterms[:nvaryz].sum()
hxyz = hxyz - nvarxyz * dterm - psiterms[:nvarxyz].sum()
# MI in bits
i = (hxz + hyz - hxyz - hz) / ln2
return i
def gccmi_1d_ccc(x, y, z, biascorrect=True):
"""Gaussian-Copula CMI between three continuous variables.
I = gccmi_1d_ccc(x,y,z) returns the CMI between two (possibly
multidimensional) continuous variables, x and y, conditioned on a third, z,
estimated via a Gaussian copula.
Parameters
----------
x, y, z : array_like
Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs).
Returns
-------
i : float
Information shared by x and y conditioned by z (in bits)
"""
x, y, z = np.atleast_2d(x), np.atleast_2d(y), np.atleast_2d(z)
if x.ndim > 2 or y.ndim > 2 or z.ndim > 2:
raise ValueError("x, y and z must be at most 2d")
nvarx, ntrl = x.shape
if y.shape[1] != ntrl or z.shape[1] != ntrl:
raise ValueError("number of trials do not match")
# copula normalization
cx = copnorm_nd(x, axis=1)
cy = copnorm_nd(y, axis=1)
cz = copnorm_nd(z, axis=1)
# parametric Gaussian CMI
return cmi_1d_ggg(cx, cy, cz, biascorrect=True, demeaned=True)
def cmi_1d_ggd(x, y, z, biascorrect=True, demeaned=False):
"""MI between 2 continuous variables conditioned on a discrete variable.
I = cmi_1d_ggd(x,y,z) returns the CMI between two (possibly
multidimensional) continuous variables, x and y, conditioned on a third
discrete variable z, estimated via a Gaussian copula.
Parameters
----------
x, y : array_like
Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs).
z : array_like
Discret array of shape (n_epochs,)
Returns
-------
cmi : float
Conditional Mutual Information shared by x and y conditioned by z
(in bits)
"""
x = np.atleast_2d(x)
y = np.atleast_2d(y)
if x.ndim > 2 or y.ndim > 2:
raise ValueError("x and y must be at most 2d")
if z.ndim > 1:
raise ValueError("only univariate discrete variables supported")
if not np.issubdtype(z.dtype, np.integer):
raise ValueError("z should be an integer array")
nvarx, ntrl = x.shape
u_z = np.unique(z)
if y.shape[1] != ntrl or z.size != ntrl:
raise ValueError("number of trials do not match")
# calculate gcmi for each z value
icond = np.zeros((len(u_z),))
pz = np.zeros((len(u_z),))
for n_z, zi in enumerate(u_z):
idx = z == zi
thsx, thsy = x[:, idx], y[:, idx]
pz[n_z] = idx.sum()
icond[n_z] = mi_1d_gg(thsx, thsy, biascorrect=biascorrect,
demeaned=demeaned)
pz /= float(ntrl)
# conditional mutual information
cmi = np.sum(pz * icond)
return cmi
def gccmi_1d_ccd(x, y, z, biascorrect=True, demeaned=False):
"""GCCMI between 2 continuous variables conditioned on a discrete variable.
I = gccmi_ccd(x,y,z) returns the CMI between two (possibly
multidimensional) continuous variables, x and y, conditioned on a third
discrete variable z, estimated via a Gaussian copula.
Parameters
----------
x, y : array_like
Continuous arrays of shape (n_epochs,) or (n_dimensions, n_epochs).
z : array_like
Discret array of shape (n_epochs,)
Returns
-------
cmi : float
Conditional Mutual Information shared by x | |
<filename>test/api/test_cwl_conformance_required_v1_0.py<gh_stars>0
"""Test CWL conformance for version $version."""
from .test_workflows_cwl import BaseCwlWorklfowTestCase
class CwlConformanceTestCase(BaseCwlWorklfowTestCase):
"""Test case mapping to CWL conformance tests for version $version."""
def test_conformance_v1_0_cl_basic_generation(self):
"""General test of command line generation
Generated from::
job: v1.0/bwa-mem-job.json
label: cl_basic_generation
output:
args:
- bwa
- mem
- -t
- '2'
- -I
- 1,2,3,4
- -m
- '3'
- chr20.fa
- example_human_Illumina.pe_1.fastq
- example_human_Illumina.pe_2.fastq
tags:
- required
- command_line_tool
tool: v1.0/bwa-mem-tool.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """General test of command line generation""")
def test_conformance_v1_0_nested_prefixes_arrays(self):
"""Test nested prefixes with arrays
Generated from::
job: v1.0/bwa-mem-job.json
label: nested_prefixes_arrays
output:
args:
- bwa
- mem
- chr20.fa
- -XXX
- -YYY
- example_human_Illumina.pe_1.fastq
- -YYY
- example_human_Illumina.pe_2.fastq
tags:
- required
- command_line_tool
tool: v1.0/binding-test.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test nested prefixes with arrays""")
def test_conformance_v1_0_cl_optional_inputs_missing(self):
"""Test command line with optional input (missing)
Generated from::
job: v1.0/cat-job.json
label: cl_optional_inputs_missing
output:
args:
- cat
- hello.txt
tags:
- required
- command_line_tool
tool: v1.0/cat1-testcli.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test command line with optional input (missing)""")
def test_conformance_v1_0_cl_optional_bindings_provided(self):
"""Test command line with optional input (provided)
Generated from::
job: v1.0/cat-n-job.json
label: cl_optional_bindings_provided
output:
args:
- cat
- -n
- hello.txt
tags:
- required
- command_line_tool
tool: v1.0/cat1-testcli.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test command line with optional input (provided)""")
def test_conformance_v1_0_stdout_redirect_docker(self):
"""Test command execution in Docker with stdout redirection
Generated from::
job: v1.0/cat-job.json
label: stdout_redirect_docker
output:
output_file:
checksum: sha1$47a013e660d408619d894b20806b1d5086aab03b
class: File
location: output.txt
size: 13
tags:
- required
- command_line_tool
tool: v1.0/cat3-tool.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test command execution in Docker with stdout redirection""")
def test_conformance_v1_0_stdout_redirect_shortcut_docker(self):
"""Test command execution in Docker with shortcut stdout redirection
Generated from::
job: v1.0/cat-job.json
label: stdout_redirect_shortcut_docker
output:
output_file:
checksum: sha1$47a013e660d408619d894b20806b1d5086aab03b
class: File
location: Any
size: 13
tags:
- required
- command_line_tool
tool: v1.0/cat3-tool-shortcut.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test command execution in Docker with shortcut stdout redirection""")
def test_conformance_v1_0_stdout_redirect_mediumcut_docker(self):
"""Test command execution in Docker with mediumcut stdout redirection
Generated from::
job: v1.0/cat-job.json
label: stdout_redirect_mediumcut_docker
output:
output_file:
checksum: sha1$47a013e660d408619d894b20806b1d5086aab03b
class: File
location: cat-out
size: 13
tags:
- required
- command_line_tool
tool: v1.0/cat3-tool-mediumcut.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test command execution in Docker with mediumcut stdout redirection""")
def test_conformance_v1_0_stdinout_redirect_docker(self):
"""Test command execution in Docker with stdin and stdout redirection
Generated from::
job: v1.0/cat-job.json
label: stdinout_redirect_docker
output:
output_txt:
checksum: sha1$47a013e660d408619d894b20806b1d5086aab03b
class: File
location: output.txt
size: 13
tags:
- required
- command_line_tool
tool: v1.0/cat4-tool.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test command execution in Docker with stdin and stdout redirection""")
def test_conformance_v1_0_any_outputSource_compatibility(self):
"""Testing Any type compatibility in outputSource
Generated from::
job: v1.0/any-type-job.json
label: any_outputSource_compatibility
output:
output1:
- hello
- world
output2:
- foo
- bar
output3: hello
tags:
- required
- workflow
tool: v1.0/any-type-compat.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Testing Any type compatibility in outputSource""")
def test_conformance_v1_0_stdinout_redirect(self):
"""Test command execution in with stdin and stdout redirection
Generated from::
job: v1.0/cat-job.json
label: stdinout_redirect
output:
output:
checksum: sha1$47a013e660d408619d894b20806b1d5086aab03b
class: File
location: output
size: 13
tags:
- required
- command_line_tool
tool: v1.0/cat-tool.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test command execution in with stdin and stdout redirection""")
def test_conformance_v1_0_wf_default_tool_default(self):
"""Test that workflow defaults override tool defaults
Generated from::
job: v1.0/empty.json
label: wf_default_tool_default
output:
default_output: workflow_default
tags:
- required
- workflow
tool: v1.0/echo-wf-default.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test that workflow defaults override tool defaults""")
def test_conformance_v1_0_any_input_param(self):
"""Test Any type input parameter
Generated from::
job: v1.0/env-job.json
label: any_input_param
output:
out: 'hello test env
'
tags:
- required
- command_line_tool
tool: v1.0/echo-tool.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test Any type input parameter""")
def test_conformance_v1_0_wf_simple(self):
"""Test simple workflow
Generated from::
job: v1.0/revsort-job.json
label: wf_simple
output:
output:
checksum: sha1$b9214658cc453331b62c2282b772a5c063dbd284
class: File
location: output.txt
size: 1111
tags:
- required
- workflow
tool: v1.0/revsort.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test simple workflow""")
def test_conformance_v1_0_hints_unknown_ignored(self):
"""Test unknown hints are ignored.
Generated from::
job: v1.0/cat-job.json
label: hints_unknown_ignored
output:
output_file:
checksum: sha1$47a013e660d408619d894b20806b1d5086aab03b
class: File
location: output.txt
size: 13
tags:
- required
- command_line_tool
tool: v1.0/cat5-tool.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test unknown hints are ignored.""")
def test_conformance_v1_0_param_evaluation_noexpr(self):
"""Test parameter evaluation, no support for JS expressions
Generated from::
job: v1.0/empty.json
label: param_evaluation_noexpr
output:
t1:
bar:
b az: 2
b"az: null
b'az: true
baz: zab1
buz:
- a
- b
- c
t10: true
t11: true
t12: null
t13: -zab1
t14: -zab1
t15: -zab1
t16: -zab1
t17: zab1 zab1
t18: zab1 zab1
t19: zab1 zab1
t2:
b az: 2
b"az: null
b'az: true
baz: zab1
buz:
- a
- b
- c
t20: zab1 zab1
t21: 2 2
t22: true true
t23: true true
t24: null null
t25: b
t26: b b
t27: null
t28: 3
t3:
b az: 2
b"az: null
b'az: true
baz: zab1
buz:
- a
- b
- c
t4:
b az: 2
b"az: null
b'az: true
baz: zab1
buz:
- a
- b
- c
t5: zab1
t6: zab1
t7: zab1
t8: zab1
t9: 2
tags:
- required
- command_line_tool
tool: v1.0/params.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test parameter evaluation, no support for JS expressions
""")
def test_conformance_v1_0_metadata(self):
"""Test metadata
Generated from::
job: v1.0/cat-job.json
label: metadata
output: {}
tags:
- required
tool: v1.0/metadata.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test metadata""")
def test_conformance_v1_0_format_checking(self):
"""Test simple format checking.
Generated from::
job: v1.0/formattest-job.json
label: format_checking
output:
output:
checksum: sha1$97fe1b50b4582cebc7d853796ebd62e3e163aa3f
class: File
format: http://edamontology.org/format_2330
location: output.txt
size: 1111
tags:
- required
- command_line_tool
tool: v1.0/formattest.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test simple format checking.
""")
def test_conformance_v1_0_format_checking_subclass(self):
"""Test format checking against ontology using subclassOf.
Generated from::
job: v1.0/formattest2-job.json
label: format_checking_subclass
output:
output:
checksum: sha1$971d88faeda85a796752ecf752b7e2e34f1337ce
class: File
format: http://edamontology.org/format_1929
location: output.txt
size: 12010
tags:
- required
- command_line_tool
tool: v1.0/formattest2.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test format checking against ontology using subclassOf.
""")
def test_conformance_v1_0_format_checking_equivalentclass(self):
"""Test format checking against ontology using equivalentClass.
Generated from::
job: v1.0/formattest2-job.json
label: format_checking_equivalentclass
output:
output:
checksum: sha1$971d88faeda85a796752ecf752b7e2e34f1337ce
class: File
format: http://edamontology.org/format_1929
location: output.txt
size: 12010
tags:
- required
- command_line_tool
tool: v1.0/formattest3.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test format checking against ontology using equivalentClass.
""")
def test_conformance_v1_0_multiple_glob_expr_list(self):
"""Test support for returning multiple glob patterns from expression
Generated from::
job: v1.0/abc.json
label: multiple_glob_expr_list
output:
files:
- checksum: sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709
class: File
location: a
size: 0
- checksum: sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709
class: File
location: b
size: 0
- checksum: sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709
class: File
location: c
size: 0
tags:
- required
- command_line_tool
tool: v1.0/glob-expr-list.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test support for returning multiple glob patterns from expression""")
def test_conformance_v1_0_wf_two_inputfiles_namecollision(self):
"""Test workflow two input files with same name.
Generated from::
job: v1.0/conflict-job.json
label: wf_two_inputfiles_namecollision
output:
fileout:
checksum: sha1$a2d8d6e7b28295dc9977dc3bdb652ddd480995f0
class: File
location: out.txt
size: 25
tags:
- required
- workflow
tool: v1.0/conflict-wf.cwl#collision
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test workflow two input files with same name.""")
def test_conformance_v1_0_directory_input_docker(self):
"""Test directory input in Docker
Generated from::
job: v1.0/dir-job.yml
label: directory_input_docker
output:
outlist:
checksum: sha1$13cda8661796ae241da3a18668fb552161a72592
class: File
location: output.txt
size: 20
tags:
- required
- command_line_tool
tool: v1.0/dir2.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test directory input in Docker""")
def test_conformance_v1_0_directory_output(self):
"""Test directory output
Generated from::
job: v1.0/dir3-job.yml
label: directory_output
output:
outdir:
class: Directory
listing:
- checksum: sha1$dd0a4c4c49ba43004d6611771972b6cf969c1c01
class: File
location: goodbye.txt
size: 24
- checksum: sha1$47a013e660d408619d894b20806b1d5086aab03b
class: File
location: hello.txt
size: 13
tags:
- required
- command_line_tool
tool: v1.0/dir3.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test directory output""")
def test_conformance_v1_0_input_file_literal(self):
"""Test file literal as input
Generated from::
job: v1.0/file-literal.yml
label: input_file_literal
output:
output_file:
checksum: sha1$d0e04ff6c413c7d57f9a0ca0a33cd3ab52e2dd9c
class: File
location: output.txt
size: 18
tags:
- required
- command_line_tool
tool: v1.0/cat3-tool.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test file literal as input""")
def test_conformance_v1_0_nameroot_nameext_stdout_expr(self):
"""Test nameroot/nameext expression in arguments, stdout
Generated from::
job: v1.0/wc-job.json
label: nameroot_nameext_stdout_expr
output:
b:
checksum: sha1$c4cfd130e7578714e3eef91c1d6d90e0e0b9db3e
class: File
location: whale.xtx
size: 21
tags:
- required
- command_line_tool
tool: v1.0/nameroot.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test nameroot/nameext expression in arguments, stdout""")
def test_conformance_v1_0_cl_gen_arrayofarrays(self):
"""Test command line generation of array-of-arrays
Generated from::
job: v1.0/nested-array-job.yml
label: cl_gen_arrayofarrays
output:
echo:
checksum: sha1$3f786850e387550fdab836ed7e6dc881de23001b
class: File
location: echo.txt
size: 2
tags:
- required
- command_line_tool
tool: v1.0/nested-array.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test command line generation of array-of-arrays""")
def test_conformance_v1_0_hints_import(self):
"""Test hints with $import
Generated from::
job: v1.0/empty.json
label: hints_import
output:
out:
checksum: sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519
class: File
location: out
size: 15
tags:
- required
- command_line_tool
tool: v1.0/imported-hint.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test hints with $import""")
def test_conformance_v1_0_default_path_notfound_warning(self):
"""Test warning instead of error when default path is not found
Generated from::
job: v1.0/default_path_job.yml
label: default_path_notfound_warning
output: {}
tags:
- required
- command_line_tool
tool: v1.0/default_path.cwl
"""
self.cwl_populator.run_conformance_test("""v1.0""", """Test warning instead of error when default | |
<reponame>MarcMoylan/BWModProjects
import argparse
import csv
import io
import os
import string
#parse arguments (CSV of paths, BW version (stock, fix, pro)
parser = argparse.ArgumentParser()
parser.add_argument('--csv', help='CSV File containing the paths of the .uc files to parameterize', required=True)
parser.add_argument('--ver', type=int, help='BW version to parse from. 0: stock, 1: fix, 2: pro, 3: real, 4: horde', default=0)
parser.add_argument('--nodef', help='Exclude default values', action="store_true")
parser.add_argument('--mult', help='Exclude default values', action="store_true")
args = vars(parser.parse_args())
#Import CSV as args
csvFile = args['csv']
version = args['ver']
noDefaults = args['nodef']
createMultFiles = args['mult']
gametypeString = ""
masterOutput = ""
#set default dicts
defaultInstantDict = {
'TraceRange': '(Min=5000.000000,Max=5000.000000)',
'WaterTraceRange':'128.0',
'DecayRange':'(Min=0f,Max=0f)',
'RangeAtten':'1.0',
'Damage':'0',
'HeadMult':'1.25',
'LimbMult':'0.7',
'DamageType':'None',
'DamageTypeHead':'None',
'DamageTypeArm':'None',
'UseRunningDamage':'False', #NO PREPENDED B?????
'RunningSpeedThreshold':'300',
'PenetrationEnergy':'0',
'PenetrateForce':'0',
'bPenetrate':'False',
'PDamageFactor':'0.75',
'WallPDamageFactor':'0.95',
'MomentumTransfer':'0',
'HookStopFactor':'0.0',
'HookPullForce':'0.0',
'SpreadMode':'FSM_Circle',
'MuzzleFlashClass':'None',
'FlashScaleFactor':'1.0',
'FireSound':'(Volume=1.000000,Radius=512.000000,Pitch=1.000000,bNoOverride=True)',
'Recoil':'0',
'Chaos':'0',
'PushbackForce':'0',
'Inaccuracy':'(X=0,Y=0)',
'SplashDamage':'False',
'RecommendSplashDamage':'False',
'BotRefireRate':'0.95',
'WarnTargetPct':'0.0'
}
defaultProjectileDict = {
'ERadiusFallOffType':'RFO_Linear',
'ProjectileClass':'None',
'SpawnOffset':'(X=0,Y=0,Z=0)',
'Speed':'0',
'MaxSpeed':'0.000000',
'AccelSpeed':'0',
'Damage':'0',
'DamageRadius':'0.000000',
'MomentumTransfer':'0',
'HeadMult':'1.500000',
'LimbMult':'0.700000',
'MaxDamageGainFactor':'0',
'DamageGainStartTime':'0',
'DamageGainEndTime':'0',
'SpreadMode':'FSM_Circle',
'MuzzleFlashClass':'None',
'FlashScaleFactor':'1.0',
'FireSound':'(Volume=1.000000,Radius=512.000000,Pitch=1.000000,bNoOverride=True)',
'Recoil':'0',
'Chaos':'0',
'PushbackForce':'0',
'Inaccuracy':'(X=0,Y=0)',
'SplashDamage':'False',
'RecommendSplashDamage':'False',
'BotRefireRate':'0.95',
'WarnTargetPct':'0.0'
}
defaultShotgunDict = {
'TracerChance':'0.500000',
'TraceRange':'(Min=500.000000,Max=2000.000000)',
'WaterTraceRange':'128.0',
'DecayRange':'(Min=0.0,Max=0.0)',
'RangeAtten':'1.0',
'TraceCount':'0',
'TracerClass':'None',
'ImpactManager':'None',
'bDoWaterSplash':'false',
'MaxHits':'0',
'Damage':'0',
'HeadMult':'1.4',
'LimbMult':'0.7',
'DamageType':'None',
'DamageTypeHead':'None',
'DamageTypeArm':'None',
'PenetrationEnergy':'0',
'PenetrateForce':'0',
'bPenetrate':'False',
'PDamageFactor':'0.75',
'WallPDamageFactor':'0.95',
'bPenetrate':'False',
'UseRunningDamage':'False',
'RunningSpeedThreshold':'300',
'HookStopFactor':'0.0',
'HookPullForce':'0.0',
'SpreadMode':'FSM_Scatter',
'ShotTypeString':'shots',
'MuzzleFlashClass':'None',
'FlashScaleFactor':'1.0',
'FireSound':'(Volume=1.000000,Radius=512.000000,Pitch=1.000000,bNoOverride=True)',
'Recoil':'0',
'Chaos':'0',
'PushbackForce':'0',
'MomentumTransfer':'0',
'Inaccuracy':'(X=0,Y=0)',
'SplashDamage':'False',
'RecommendSplashDamage':'False',
'BotRefireRate':'0.95',
'WarnTargetPct':'0.0'
}
defaultMeleeDict = {
'TraceRange':'(Min=145.000000,Max=145.000000)',
'WaterTraceRange':'128.0',
'DecayRange':'(Min=0.0,Max=0.0)',
'RangeAtten':'1.0',
'Damage':'50.000000',
'HeadMult':'1.0',
'LimbMult':'1.0',
'DamageType':'None',
'DamageTypeHead':'None',
'DamageTypeArm':'None',
'ChargeDamageBonusFactor':'1.0',
'FlankDamageMult':'1.15',
'BackDamageMult':'1.3',
'PenetrationEnergy':'0',
'PDamageFactor':'0.500000',
'RunningSpeedThreshold':'1000.000000',
'HookStopFactor':'0.0',
'HookPullForce':'0.0',
'SpreadMode':'FSM_Circle',
'MuzzleFlashClass':'None',
'FlashScaleFactor':'1.0',
'FireSound':'(Volume=1.000000,Radius=512.000000,Pitch=1.000000,bNoOverride=True)',
'Recoil':'0',
'Chaos':'0',
'PushbackForce':'0',
'MomentumTransfer':'0',
'Inaccuracy':'(X=0,Y=0)',
'SplashDamage':'False',
'RecommendSplashDamage':'False',
'BotRefireRate':'0.95',
'WarnTargetPct':'0.0'
}
defaultFireDataDict = {
'FireInterval':'0.5',
'AmmoPerFire':'1',
'PreFireTime':'0.0',
'MaxHoldTime':'0',
'TargetState':'',
'ModeName':'',
'MaxFireCount':'0',
'BurstFireRateFactor':'0.66',
'bCockAfterFire':'False',
'PreFireAnim':'"PreFire"',
'FireAnim':'"Fire"',
'FireLoopAnim':'"FireLoop"',
'FireEndAnim':'"FireEnd"',
'AimedFireAnim':'',
'PreFireAnimRate':'1.0',
'FireAnimRate':'1.0',
'FireLoopAnimRate':'1.0',
'FireEndAnimRate':'1.0'
}
defaultFireEffectDict = {
'SpreadMode':'FSM_Rectangle',
'MuzzleFlashClass':'None',
'FlashScaleFactor':'1.0',
'FireSound':'(Volume=1.000000,Radius=512.000000,Pitch=1.000000,bNoOverride=True)',
'Recoil':'0',
'Chaos':'0',
'PushbackForce':'0',
'Inaccuracy':'(X=0,Y=0)',
'SplashDamage':'False',
'RecommendSplashDamage':'False',
'BotRefireRate':'0.95',
'WarnTargetPct':'0.0'
}
defaultRecoilDict = {
'XCurve': '(Points=(,(InVal=1.000000)))',
'YCurve': '(Points=(,(InVal=1.000000,OutVal=1.000000)))',
'PitchFactor': '1.000000',
'YawFactor': '1.000000',
'XRandFactor': '0.000000',
'YRandFactor': '0.000000',
'MaxRecoil': '4096.000000',
'DeclineTime': '2.000000',
'DeclineDelay': '0.300000',
'ViewBindFactor': '1.000000',
'ADSViewBindFactor': '1.000000',
'HipMultiplier': '1.600000',
'CrouchMultiplier': '0.750000',
'bViewDecline': 'False'
}
defaultAimDict = {
'AimSpread': '(Min=16,Max=128)',
'AimAdjustTime': '0.500000',
'OffsetAdjustTime': '0.300000',
'CrouchMultiplier': '0.800000',
'ADSMultiplier': '1.000000',
'ViewBindFactor': '0.000000',
'SprintChaos': '0.100000',
'AimDamageThreshold': '100',
'ChaosDeclineTime': '0.640000',
'ChaosDeclineDelay': '0.000000',
'ChaosSpeedThreshold': '500.000000'
}
defaultWeaponDict = {
'PlayerSpeedFactor': '1.000000',
'PlayerJumpFactor': '1.000000',
'InventorySize': '12',
'SightMoveSpeedFactor': '0.900000',
'SightingTime': '0.350000',
'DisplaceDurationMult': '1.000000',
'MagAmmo': '30',
'SightOffset': '(X=0,Y=0,Z=0)',
'SightPivot':'(Pitch=0,Yaw=0,Roll=0)',
'ZoomType': 'ZT_Irons'
}
def createOutputString(paramsDict):
outputStringRecoil = '''
//=================================================================
// RECOIL
//=================================================================
Begin Object Class=RecoilParams Name='''+gametypeString+'''RecoilParams'''
for property in defaultRecoilDict:
if not noDefaults or defaultRecoilDict.get(property) != paramsDict.get(property):
outputStringRecoil += '\n\t\t' + property + '=' + str(paramsDict.get(property))
outputStringRecoil +='\n\tEnd Object'
outputStringAim = '''
//=================================================================
// AIM
//=================================================================
Begin Object Class=AimParams Name='''+gametypeString+'''AimParams'''
if 'ViewBindFactor2' in paramsDict:
paramsDict['ViewBindFactor'] = paramsDict.get('ViewBindFactor2') #duplicate workaround
for property in defaultAimDict:
if not noDefaults or defaultAimDict.get(property) != paramsDict.get(property):
outputStringAim += '\n\t\t' + property + '=' + str(paramsDict.get(property))
outputStringAim +='\n\tEnd Object'
outputStringBasic = '''
//=================================================================
// BASIC PARAMS
//=================================================================
Begin Object Class=WeaponParams Name='''+gametypeString+'''Params'''
for property in defaultWeaponDict:
if not noDefaults or defaultWeaponDict.get(property) != paramsDict.get(property):
outputStringBasic += '\n\t\t' + property + '=' + str(paramsDict.get(property))
outputStringBasic += '''
RecoilParams(0)=RecoilParams\''''+gametypeString+'''RecoilParams'
AimParams(0)=AimParams\''''+gametypeString+'''AimParams'
FireParams(0)=FireParams\''''+gametypeString+'''PrimaryFireParams'
AltFireParams(0)=FireParams\''''+gametypeString+'''SecondaryFireParams'
End Object
Layouts(0)=WeaponParams\''''+gametypeString+'''Params\'\n\n'''
return outputStringRecoil + "\n" + outputStringAim + "\n" + outputStringBasic
def createFiremodeOutputString(paramsDict, fireModeNum):
firemodeNumberString = ''
if not paramsDict:
return ''
firemodeType = paramsDict.get("firemodeType")
if fireModeNum == 0:
firemodeNumberString = 'Primary'
else:
firemodeNumberString = 'Secondary'
effectString = '''
//=================================================================
// '''+firemodeNumberString.upper()+''' FIRE
//=================================================================
'''
if firemodeType == 0: #Instant fire
effectString += '''
Begin Object Class=InstantEffectParams Name='''+gametypeString+firemodeNumberString+'''EffectParams'''
for property in defaultInstantDict:
if not noDefaults or defaultInstantDict.get(property) != paramsDict.get(property):
effectString += '\n\t\t\t' + property + '=' + str(paramsDict.get(property))
effectString += '''
End Object
Begin Object Class=FireParams Name='''+gametypeString+firemodeNumberString+'''FireParams'''
for property in defaultFireDataDict:
if not noDefaults or defaultFireDataDict.get(property) != paramsDict.get(property):
effectString += '\n\t\t\t' + property + '=' + str(paramsDict.get(property))
effectString += '''
FireEffectParams(0)=InstantEffectParams\''''+gametypeString+firemodeNumberString+'''EffectParams\'
End Object
'''
elif firemodeType == 1 or firemodeType == 4: #Projectile fire and grenade fire
effectString += '''
Begin Object Class=ProjectileEffectParams Name='''+gametypeString+firemodeNumberString+'''EffectParams'''
for property in defaultProjectileDict:
if not noDefaults or defaultProjectileDict.get(property) != paramsDict.get(property):
effectString += '\n\t\t\t' + property + '=' + str(paramsDict.get(property))
effectString += '''
End Object
Begin Object Class=FireParams Name='''+gametypeString+firemodeNumberString+'''FireParams'''
for property in defaultFireDataDict:
if not noDefaults or defaultFireDataDict.get(property) != paramsDict.get(property):
effectString += '\n\t\t\t' + property + '=' + str(paramsDict.get(property))
effectString += '''
FireEffectParams(0)=ProjectileEffectParams\''''+gametypeString+firemodeNumberString+'''EffectParams\'
End Object
'''
elif firemodeType == 2: #Shotgun fire
effectString += '''
Begin Object Class=ShotgunEffectParams Name='''+gametypeString+firemodeNumberString+'''EffectParams'''
for property in defaultShotgunDict:
if not noDefaults or defaultShotgunDict.get(property) != paramsDict.get(property):
effectString += '\n\t\t\t' + property + '=' + str(paramsDict.get(property))
effectString += '''
End Object
Begin Object Class=FireParams Name='''+gametypeString+firemodeNumberString+'''FireParams'''
for property in defaultFireDataDict:
if not noDefaults or defaultFireDataDict.get(property) != paramsDict.get(property):
effectString += '\n\t\t\t' + property + '=' + str(paramsDict.get(property))
effectString += '''
FireEffectParams(0)=ShotgunEffectParams\''''+gametypeString+firemodeNumberString+'''EffectParams\'
End Object
'''
elif firemodeType == 3: #Melee fire
effectString += '''
Begin Object Class=MeleeEffectParams Name='''+gametypeString+firemodeNumberString+'''EffectParams'''
for property in defaultMeleeDict:
if not noDefaults or defaultMeleeDict.get(property) != paramsDict.get(property):
effectString += '\n\t\t\t' + property + '=' + str(paramsDict.get(property))
effectString += '''
End Object
Begin Object Class=FireParams Name='''+gametypeString+firemodeNumberString+'''FireParams'''
for property in defaultFireDataDict:
if not noDefaults or defaultFireDataDict.get(property) != paramsDict.get(property):
effectString += '\n\t\t\t' + property + '=' + str(paramsDict.get(property))
effectString += '''
FireEffectParams(0)=MeleeEffectParams\''''+gametypeString+firemodeNumberString+'''EffectParams\'
End Object
'''
elif firemodeType == 5: #Other fire
effectString += '''
Begin Object Class=FireEffectParams Name='''+gametypeString+firemodeNumberString+'''EffectParams'''
for property in defaultFireEffectDict:
if not noDefaults or defaultFireEffectDict.get(property) != paramsDict.get(property):
effectString += '\n\t\t\t' + property + '=' + str(paramsDict.get(property))
effectString += '''
End Object
Begin Object Class=FireParams Name='''+gametypeString+firemodeNumberString+'''FireParams'''
for property in defaultFireDataDict:
if not noDefaults or defaultFireDataDict.get(property) != paramsDict.get(property):
effectString += '\n\t\t\t' + property + '=' + str(paramsDict.get(property))
effectString += '''
FireEffectParams(0)=FireEffectParams\''''+gametypeString+firemodeNumberString+'''EffectParams\'
End Object
'''
return effectString
def setDefaultParams():
paramsDict = {}
if version == 0 or version == 1:
#recoil params
paramsDict['RecoilXCurve'] = '(Points=(,(InVal=1.000000,OutVal=1.000000)))'
paramsDict['RecoilYCurve'] = '(Points=(,(InVal=1.000000,OutVal=1.000000)))'
paramsDict['RecoilPitchFactor'] = '1.000000'
paramsDict['RecoilYawFactor'] = '1.000000'
paramsDict['RecoilXFactor'] = '0.500000'
paramsDict['RecoilYFactor'] = '0.500000'
paramsDict['RecoilMax'] = '2048.000000'
paramsDict['RecoilDeclineTime'] = '2.000000'
paramsDict['RecoilDeclineDelay'] = '0.300000'
paramsDict['ViewRecoilFactor'] = '0.500000'
paramsDict['CrouchAimFactor'] = '0.700000'
paramsDict['HipMultiplier'] = '1.000000'
paramsDict['bViewDecline'] = 'True'
#aim params
paramsDict['AimSpread'] = '(X=(Min=-16.000000,Max=16.000000),Y=(Min=-16.000000,Max=16.000000))'
paramsDict['ChaosAimSpread'] = '(X=(Min=-2560.000000,Max=2560.000000),Y=(Min=-2560.000000,Max=2560.000000))'
paramsDict['AimAdjustTime'] = '0.500000'
paramsDict['CrouchAimFactor'] = '0.700000'
paramsDict['SightAimFactor'] = '0.700000'
paramsDict['ViewAimFactor'] = '0.500000'
paramsDict['SprintChaos'] = '0.400000'
paramsDict['AimDamageThreshold'] = '100'
paramsDict['ChaosDeclineTime'] = '2.000000'
paramsDict['ChaosSpeedThreshold'] = '500.000000'
paramsDict['ChaosDeclineDelay'] = '0.000000'
paramsDict['OffsetAdjustTime'] = '0.300000'
#basic params
paramsDict['MagAmmo'] = '30'
paramsDict['InventorySize'] = '35'
paramsDict['SightOffset'] = '(X=0,Y=0,Z=0)'
paramsDict['SightPivot'] = '(Pitch=0,Yaw=0,Roll=0)'
paramsDict['PlayerSpeedFactor'] = '1.000000'
paramsDict['PlayerJumpFactor'] = '1.000000'
paramsDict['DisplaceDurationMult'] = '1.000000'
paramsDict['SightingTime'] = '0.350000'
paramsDict['SightMoveSpeedFactor'] = '0.500000'
paramsDict['ZoomType'] = 'ZT_Irons'
elif version == 2:
#recoil params
paramsDict = defaultRecoilDict.copy()
#aim params
paramsDict = paramsDict.copy() | defaultAimDict.copy()
#basic params
paramsDict = paramsDict.copy() | defaultWeaponDict.copy()
return paramsDict
def setDefaultFiremodeParams(firemodeType):
paramsDict = {}
#stock/fix
if version == 0 or version == 1:
# fire effect params
paramsDict['FireSpreadMode'] = 'FSM_Rectangle'
paramsDict['MuzzleFlashClass'] = 'None'
paramsDict['FlashScaleFactor'] = '1.0'
paramsDict['BallisticFireSound'] = '(Volume=1.000000,Radius=255.000000,Pitch=1.000000,bNoOverride=True)'
paramsDict['RecoilPerShot'] = '0.0'
paramsDict['FireChaos'] = '-1.0'
paramsDict['PushbackForce'] = '0'
paramsDict['XInaccuracy'] = '0.0'
paramsDict['YInaccuracy'] = '0.0'
paramsDict['bSplashDamage'] = 'False'
paramsDict['bRecommendSplashDamage'] = 'False'
paramsDict['BotRefireRate'] = '0.95'
paramsDict['WarnTargetPct'] = '0.0'
# fire data params
paramsDict['FireRate'] = '0.5'
paramsDict['AmmoPerFire'] = '1'
paramsDict['PreFireTime'] = '0.0'
paramsDict['MaxHoldTime'] = '0'
paramsDict['TargetState'] = ''
paramsDict['ModeName'] = ''
paramsDict['MaxFireCount'] = '0'
paramsDict['BurstFireRateFactor'] = '1.00'
paramsDict['bCockAfterFire'] = 'False'
paramsDict['PreFireAnim'] = '"PreFire"'
paramsDict['FireAnim'] = '"Fire"'
paramsDict['FireLoopAnim'] = '"FireLoop"'
paramsDict['FireEndAnim'] = '"FireEnd"'
paramsDict['AimedFireAnim'] = ''
paramsDict['PreFireAnimRate'] = '1.0'
paramsDict['FireAnimRate'] = '1.0'
paramsDict['FireLoopAnimRate'] = '1.0'
paramsDict['FireEndAnimRate'] = '1.0'
if firemodeType == 0 or firemodeType == 2 or firemodeType == 3:
#instant fire params
paramsDict['TraceRange'] = '(Min=5000.000000,Max=5000.000000)'
paramsDict['WaterTraceRange'] = '5000.0'
paramsDict['DecayRange'] = '(Min=0.0,Max=0.0)'
paramsDict['RangeAtten'] = '1.0'
paramsDict['Damage'] = '0'
paramsDict['HeadMult'] = '2.0'
paramsDict['LimbMult'] = '0.5'
paramsDict['DamageType'] = 'None'
paramsDict['DamageTypeHead'] = 'None'
paramsDict['DamageTypeArm'] = 'None'
paramsDict['UseRunningDamage'] = 'False'
paramsDict['RunningSpeedThreshold'] = '300'
paramsDict['MaxWallSize'] = '0'
paramsDict['PenetrateForce'] = '0'
paramsDict['bPenetrate'] = 'False'
paramsDict['PDamageFactor'] = '0.6'
paramsDict['WallPDamageFactor'] = '0.4'
paramsDict['MomentumTransfer'] = '0'
paramsDict['HookStopFactor'] = '0.0'
paramsDict['HookPullForce'] = '0.0'
if firemodeType == 1:
#projectile fire params
paramsDict['ERadiusFallOffType'] = 'RFO_Linear'
paramsDict['ProjectileClass'] = 'None'
paramsDict['SpawnOffset'] = '(X=0,Y=0,Z=0)'
paramsDict['Speed'] = '0'
paramsDict['MaxSpeed'] = '0.000000'
paramsDict['AccelSpeed'] = '0'
paramsDict['Damage'] = '0'
paramsDict['DamageHead'] = '0'
paramsDict['DamageLimb'] = '0'
paramsDict['DamageRadius'] = '0.000000'
paramsDict['MomentumTransfer'] = '0'
paramsDict['HeadMult'] = '2.000000'
paramsDict['LimbMult'] = '0.500000'
paramsDict['MaxDamageGainFactor'] = '0'
paramsDict['DamageGainStartTime'] = '0'
paramsDict['DamageGainEndTime'] = '0'
paramsDict['WarnTargetPct'] = '0.500000'
if firemodeType == 2:
#shotgun fire params
paramsDict['TracerChance'] = '0.500000'
paramsDict['TraceRange'] = '(Min=500.000000,Max=2000.000000)'
paramsDict['TraceCount'] = '0'
paramsDict['TracerClass'] = 'None'
paramsDict['ImpactManager'] = 'None'
paramsDict['bDoWaterSplash'] = 'false'
paramsDict['MaxHits'] = '0'
paramsDict['HeadMult'] = '1.8'
paramsDict['LimbMult'] = '0.24'
paramsDict['MaxWallSize'] = '16.000000'
paramsDict['MaxWalls'] = '2'
paramsDict['bPenetrate'] = 'False'
paramsDict['FireSpreadMode'] = 'FSM_Scatter'
paramsDict['ShotTypeString'] = 'shots'
if firemodeType == 3:
#melee fire params
paramsDict['TraceRange'] = '(Min=128.000000,Max=128.000000)'
paramsDict['Damage'] = '50.000000'
paramsDict['HeadMult'] = '1.0'
paramsDict['LimbMult'] = '1.0'
paramsDict['RangeAtten'] = '1.0'
paramsDict['ChargeDamageBonusFactor'] = '1'
paramsDict['FlankDamageMult'] = '1.15'
paramsDict['BackDamageMult'] = '1.3'
paramsDict['MaxWallSize'] = '0.000000'
paramsDict['PDamageFactor'] = '0.500000'
paramsDict['RunningSpeedThreshold'] = '1000.000000'
if firemodeType == 4:
#grenade fire params
paramsDict['ERadiusFallOffType'] = 'RFO_Linear'
paramsDict['ProjectileClass'] = 'None'
paramsDict['SpawnOffset'] = '(X=0,Y=0,Z=0)'
paramsDict['Speed'] = '1000'
paramsDict['MaxSpeed'] = '0.000000'
paramsDict['AccelSpeed'] = '0'
paramsDict['Damage'] = '70'
paramsDict['DamageHead'] = '0'
paramsDict['DamageLimb'] = '0'
paramsDict['DamageRadius'] = '240.000000'
paramsDict['MomentumTransfer'] = '75000'
paramsDict['HeadMult'] = '2.000000'
paramsDict['LimbMult'] = '0.500000'
paramsDict['MaxDamageGainFactor'] = '0'
paramsDict['DamageGainStartTime'] = '0'
paramsDict['DamageGainEndTime'] = '0'
paramsDict['WarnTargetPct'] = '0.500000'
#pro
elif version == 2:
# fire data params
paramsDict = defaultFireDataDict.copy()
#instant fire params
if firemodeType == 0 or firemodeType == 2 or firemodeType == 3:
paramsDict = paramsDict.copy() | defaultInstantDict.copy()
#projectile fire params
if firemodeType == 1 or firemodeType == 4:
paramsDict = paramsDict.copy() | defaultProjectileDict.copy()
#shotgun fire params
if firemodeType == 2:
paramsDict = paramsDict.copy() | defaultShotgunDict.copy()
#melee fire params
if firemodeType == 3:
paramsDict = paramsDict.copy() | defaultMeleeDict.copy()
return paramsDict
#convert variable format from stock/fix to pro
def updateFiremodeVariableData(firemodeDict, firemodeType):
#pro has its own sets of variables to convert
if version == 2:
if "FireRecoil" in firemodeDict:
firemodeDict['Recoil'] = firemodeDict.get("FireRecoil")
if "FireChaos" in firemodeDict:
firemodeDict['Chaos'] = firemodeDict.get("FireChaos")
if "FireRate" in firemodeDict:
firemodeDict['FireInterval'] = firemodeDict.get("FireRate")
if 'FirePushbackForce' in firemodeDict:
firemodeDict['PushbackForce'] = firemodeDict.get("FirePushbackForce")
if 'Damage' in firemodeDict:
firemodeDict['Damage'] = int(float(firemodeDict.get("Damage")))
if "BallisticFireSound" in firemodeDict:
firemodeDict['FireSound'] = firemodeDict.get("BallisticFireSound")
if "bSplashDamage" in firemodeDict:
firemodeDict['SplashDamage'] = firemodeDict.get("bSplashDamage")
if "bRecommendSplashDamage" in firemodeDict:
firemodeDict['RecommendSplashDamage'] = firemodeDict.get("bRecommendSplashDamage")
return firemodeDict
#fix and stock variables convert here
if not firemodeType == 5: #convert damage and pen for regular fire types only
if "DamageRange" in firemodeDict:
scanString="DamageRange"
else:
scanString="Damage"
#Damage
if 'Max=' in firemodeDict.get(scanString): #protect against incorrect version user input
damageString = str(firemodeDict.get(scanString))
damageMin = damageString[damageString.find('Min=')+4:damageString.find(',')].strip()
damageMax = damageString[damageString.find('Max=')+4:damageString.find(')')].strip()
try:
convertedDamage = (float(damageMin)+float(damageMax))//2
firemodeDict['Damage'] = convertedDamage
except ValueError:
firemodeDict['Damage'] = 0
damageString = str(firemodeDict.get(scanString+"Head"))
damageMin = damageString[damageString.find('Min=')+4:damageString.find(',')].strip()
damageMax = damageString[damageString.find('Max=')+4:damageString.find(')')].strip()
try:
convertedDamageHead = (float(damageMin)+float(damageMax))//2
headMultiplier = str(convertedDamageHead/convertedDamage)[:8]
except ValueError:
headMultiplier = 1.0
firemodeDict['HeadMult'] = headMultiplier
damageString = str(firemodeDict.get(scanString+"Limb"))
damageMin = damageString[damageString.find('Min=')+4:damageString.find(',')].strip()
damageMax = damageString[damageString.find('Max=')+4:damageString.find(')')].strip()
try:
convertedDamageLimb = (float(damageMin)+float(damageMax))//2
limbMultiplier = str(convertedDamageLimb/convertedDamage)[:8]
except ValueError:
limbMultiplier = 1.0
firemodeDict['LimbMult'] = limbMultiplier
elif float(firemodeDict.get('Damage')) != 0:
if float(firemodeDict.get('DamageHead')) != 0:
firemodeDict['HeadMult'] = str(float(firemodeDict.get("DamageHead"))/float(firemodeDict.get('Damage')))[:8]
else:
firemodeDict['HeadMult'] = 1.0
if float(firemodeDict.get('DamageLimb')) != 0:
firemodeDict['LimbMult'] = str(float(firemodeDict.get("DamageLimb"))/float(firemodeDict.get('Damage')))[:8]
else:
firemodeDict['LimbMult'] = 1.0
if not firemodeType == 1 and not firemodeType == 4:
#water and walls
traceRangeString = str(firemodeDict.get("TraceRange"))
traceRangeMax = traceRangeString[traceRangeString.find('Max=')+4:traceRangeString.find(')')].strip()
if firemodeDict.get("WaterRangeFactor") != None:
waterTraceRange = float(traceRangeMax)*float(firemodeDict.get("WaterRangeFactor"))
firemodeDict['WaterTraceRange'] = waterTraceRange
firemodeDict['PenetrationEnergy'] = firemodeDict.get("MaxWallSize")
#basic ones
if "RecoilPerShot" in firemodeDict:
firemodeDict['Recoil'] = firemodeDict.get("RecoilPerShot")
if "VelocityRecoil" in firemodeDict:
firemodeDict['PushbackForce'] = firemodeDict.get("VelocityRecoil")
if "FireChaos" in firemodeDict:
firemodeDict['Chaos'] = firemodeDict.get("FireChaos")
if "FireSpreadMode" in firemodeDict:
firemodeDict['SpreadMode'] = firemodeDict.get("FireSpreadMode")
if "BallisticFireSound" in firemodeDict:
firemodeDict['FireSound'] = firemodeDict.get("BallisticFireSound")
if "bSplashDamage" in firemodeDict:
firemodeDict['SplashDamage'] = firemodeDict.get("bSplashDamage")
if "bRecommendSplashDamage" in firemodeDict:
firemodeDict['RecommendSplashDamage'] = firemodeDict.get("bRecommendSplashDamage")
if "FireRate" in firemodeDict:
firemodeDict['FireInterval'] = firemodeDict.get("FireRate")
try:
firemodeDict['Inaccuracy'] = '(X={},Y={})'.format(int(float(firemodeDict.get("XInaccuracy"))),int(float(firemodeDict.get("YInaccuracy"))))
except ValueError:
firemodeDict['Inaccuracy'] = '(X=0,Y=0)'
return firemodeDict
#convert variable format from stock/fix to pro
def updateVariableData(paramsDict):
if version == 1 or version == 0:
#aimspread
aimString = str(paramsDict.get("AimSpread"))
chaosAimString = str(paramsDict.get("ChaosAimSpread"))
aimMax = aimString[aimString.find('Max=')+4:aimString.find(')')].strip()
chaosAimMax = chaosAimString[chaosAimString.find('Max=')+4:chaosAimString.find(')')].strip()
paramsDict['AimSpread']='(Min={},Max={})'.format(int(float(aimMax)),int(float(chaosAimMax))) #dont judge me
#basic ones
paramsDict['XCurve'] = paramsDict.get("RecoilXCurve")
paramsDict['YCurve'] = paramsDict.get("RecoilYCurve")
paramsDict['PitchFactor'] = paramsDict.get("RecoilPitchFactor")
paramsDict['YawFactor'] = paramsDict.get("RecoilYawFactor")
paramsDict['XRandFactor'] = paramsDict.get("RecoilXFactor")
paramsDict['YRandFactor'] = paramsDict.get("RecoilYFactor")
paramsDict['MaxRecoil'] = paramsDict.get("RecoilMax")
paramsDict['DeclineTime'] = paramsDict.get("RecoilDeclineTime")
paramsDict['DeclineDelay'] = paramsDict.get("RecoilDeclineDelay")
paramsDict['ViewBindFactor'] = paramsDict.get("ViewRecoilFactor")
paramsDict['ADSViewBindFactor'] =paramsDict.get("ViewRecoilFactor")
paramsDict['CrouchMultiplier'] = paramsDict.get("CrouchAimFactor")
paramsDict['ADSMultiplier'] = paramsDict.get("SightAimFactor")
if 'bNoMeshInScope' in paramsDict and 'ZoomType' not in paramsDict and paramsDict.get("bNoMeshInScope") == True:
paramsDict['ZoomType'] = paramsDict.get("ZT_Smooth")
if "ViewAimFactor" in paramsDict:
paramsDict['ViewBindFactor2'] = paramsDict.get("ViewAimFactor") #dict's cant's store multiples
#check supertype, return supertype
def extractFileFiremodeType(data):
if data.find('InstantFire') != -1 or data.find('RangeAttenFire') != -1 or data.find('BallisticRailgunFire') != -1:
firemodeType=0
elif data.find('ProjectileFire') != -1:
firemodeType=1
elif data.find('ShotgunFire') != -1:
firemodeType=2
elif data.find('MeleeFire') != -1:
firemodeType=3
elif data.find('GrenadeFire') != -1:
firemodeType=4
else:
firemodeType=5 #wtf are you feeding me?? scopefires?!
return firemodeType
#get the params after defaultproperties, return as a | |
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import os
import pickle
from os import listdir
from os.path import isfile, join
from shapely.ops import cascaded_union
import numpy as np
from datetime import datetime,timedelta
from pytz import timezone
#import folium
from shapely.ops import cascaded_union
#numero settori 4302
#numero poligoni 93697
#settore nullo 4303
#con soglia a quotecut
#numero settori 4302
#numero poligoni 74250
#settore nullo 4303
NPoints = 500000
QUOTECUT = 250
QUOTECUT_sector = 280
current_path = '.'
start_datetime = "2017-09-01 00:00:00"
end_datetime = "2017-09-01 23:59:59"
confpath = "./config/"
delay_map = ""#./input/tw.txt"
file_conteggi_per_capacity = ""#"./input/conteggi_settori_nuovo_13_350.txt"
file_capacity_tw = "capacita_secondi.out"
filename_poligoni_settori = "settori_trieste_apertura_chiusura_secondi.out"
so6_folder = "./"
#filename_so6_input = "multidelay_"+os.getcwd().split("/")[-1].replace("v","")+".so6"#TriesteM1.so6"
#filename_so6_input = "multidelay_"+os.getcwd().split("/")[-1].replace("v","")+".so6"
filename_so6_input = "20170901_m1.so6"
print(filename_so6_input)
century = '20' # for 2017 etc.. use '19' for 1997 and other years
bound_file = current_path+"/config/boundary/"
temp_nvp = current_path+"/config/sectors_temp_nvp.dat"
shock_tmp = current_path+"/config/temp_nvp.dat"
capacity_file = current_path+"/config/sector_capacities.dat"
#delay_file = current_path+"/config/delay.dat"
lat_max = 82.0
lat_min = 19.0
lon_max = 46.625
lon_min = -30.0
if not os.path.exists(confpath):
os.makedirs(confpath)
if not os.path.exists(confpath+"boundary/"):
os.makedirs(confpath+"boundary/")
ritardi = dict()
if delay_map!="":
with open(delay_map) as fin:
for line in fin:
ll = line.replace("\n","").replace("\t","").split(" ")
fid = int(ll[0])
delay = int(ll[1])
if not fid in ritardi:
ritardi[fid]=delay
npoligons = 0
capacity = dict()
if file_conteggi_per_capacity!='':
with open(file_conteggi_per_capacity) as fin:
for line in fin:
ll = line.replace("\n","").split(" ")
sector = ll[2]
cap = int(ll[3])
if not sector in capacity:
capacity[sector] = cap
else:
if cap> capacity[sector]:
capacity[sector] = cap
else:
if file_capacity_tw!='':
with open(file_capacity_tw) as fin:
for line in fin:
if line[0]=='#':
continue
ll = line.replace("\n","").split("\t")
sector = ll[0]
npoligons += int(ll[1])
start = int(ll[2])
#start_datetime = "2017-09-01 00:00:00"
start = int(datetime.timestamp((datetime.strptime(start_datetime, '%Y-%m-%d %H:%M:%S') + timedelta(seconds=int(ll[2]))).replace(tzinfo=timezone('GMT'))))
stop = int(ll[3])
stop = int(datetime.timestamp((datetime.strptime(start_datetime, '%Y-%m-%d %H:%M:%S') + timedelta(seconds=int(ll[3]))).replace(tzinfo=timezone('GMT'))))
cap = int(ll[4])
if not sector in capacity:
capacity[sector] = {"capacity":dict(),"bounds":dict()}
if not (start,stop) in capacity[sector]['capacity']:
capacity[sector]['capacity'][(start,stop)] = cap
else:
print("Manca il file con le capacità")
exit(0)
ecac = None
with open(filename_poligoni_settori) as fin:
for line in fin:
if line[0]=='#':
continue
ll = line.replace("\n","").split("\t")
#print(ll)
sector = ll[0]
#[start,stop] = [int(x) for x in ll[1].split(", ")]
#print(ll[1])
[start, stop] = [int(datetime.timestamp((datetime.strptime(start_datetime, '%Y-%m-%d %H:%M:%S') + timedelta(seconds=int(x))).replace(tzinfo=timezone('GMT')))) for x in ll[1].split(", ")]
#print([start,stop])
sub = ll[2]
low,high = int(ll[3]),int(ll[4])
if high >= QUOTECUT:
punti = ll[5].replace("POLYGON ((","").replace(")","").replace(",","").split(" ")
if sector in capacity:
if not (low,high) in capacity[sector]["bounds"]:
capacity[sector]["bounds"][(low,high)] = dict()
if not sub in capacity[sector]["bounds"][(low,high)]:
capacity[sector]["bounds"][(low,high)][sub] = {"points":[],"Polygon":None}
i=0
while(i<len(punti)):
lat = float(punti[i])
lon = float(punti[i+1])
capacity[sector]["bounds"][(low,high)][sub]["points"].append((lat,lon))
i += 2
capacity[sector]["bounds"][(low,high)][sub]["Polygon"] = Polygon(capacity[sector]["bounds"][(low,high)][sub]["points"])
if ecac==None:
ecac = capacity[sector]["bounds"][(low,high)][sub]["Polygon"]
else:
ecac = cascaded_union([ecac,capacity[sector]["bounds"][(low,high)][sub]["Polygon"]])
else:
print("Errore!!! controllare i file dei settori")
exit(0)
#m = folium.Map(location=[45.5236, -122.6750])
#folium.PolyLine(ecac.exterior.coords, color="green", weight=2.5, opacity=1).add_to(m)
#folium.Circle([40.2000000000000028422,31.8230000000000003979],color="red").add_to(m)
#print(ecac.contains(Point(40.2000000000000028422,31.8230000000000003979)))
#m.save("./ecac.html")
with open("./config/bound_latlon.dat","w") as fout:
for p in ecac.exterior.coords:
fout.write(str(p[0])+"\t"+str(p[1])+"\n")
mappa_settore_numero = dict()
mappa_numero_settore = dict()
s = 0
import os
npoly_tot = 0
for sector in capacity:
#print(sector)
#print(capacity[sector])
for (start,stop) in capacity[sector]['capacity']:
npoly = 0
for (l,h) in capacity[sector]["bounds"]:
for sub in capacity[sector]["bounds"][(l,h)]:
npoly += 1
if npoly>0:
cap = capacity[sector]['capacity'][(start,stop)]
if not (sector,start,stop,cap) in mappa_settore_numero:
s += 1
mappa_settore_numero[(sector,start,stop,cap)] = s
mappa_numero_settore[s] = (sector,start,stop,cap)
with open(confpath+"boundary/"+str(s)+"_bound_latlon.dat","w") as fout:
fout.write(str(npoly)+"\n")
#print("poligoni",npoly)
with open(confpath+"boundary/"+str(s)+"_bound_latlon.dat","a") as fout:
for (l,h) in capacity[sector]["bounds"]:
for sub in capacity[sector]["bounds"][(l,h)]:
ppp = ""
for p in capacity[sector]["bounds"][(l,h)][sub]['points']:
ppp +=str(p[0])+","+str(p[1])+"\t"
ppp = ppp[:-1]+"\n"
fout.write(str(l)+"\t"+str(h)+"\t"+str(start)+"\t"+str(stop)+"\t"+str(len(capacity[sector]["bounds"][(l,h)][sub]['points']))+"\t"+ppp)
npoly_tot += 1
#print(mappa_settore_numero)
#print(mappa_numero_settore)
#input("?")
with open(capacity_file,"w") as fout:
fout.write("#Sector\tCapacity\n")
for ss in sorted(mappa_numero_settore):
#mappa_settore_numero[(sector,start,stop,cap)] = s
fout.write(str(ss)+"\t"+str(mappa_numero_settore[ss][3])+"\n")
settore_nullo = s + 1
mappa_numero_settore[settore_nullo] = ("ECAC",0,0,9999)
pickle.dump(mappa_numero_settore,open("mappa_numero_settore.pp","wb"))
pickle.dump(mappa_settore_numero,open("mappa_settore_numero.pp","wb"))
print("numero settori",s)
print("numero poligoni",npoly_tot)
print("settore nullo",settore_nullo)
voli = dict()
new_fid = -1
with open(filename_so6_input.replace(".so6",".ids"),"w") as fout:
fout.write("#so6 abm\n")
with open(so6_folder+filename_so6_input) as f:
old_fid = None
for row in f:
campi = row.strip("\n").strip("\r").split(" ")
#segment = campi[0]
segorigin = campi[0].split("_")[0]
segdest = campi[0].split("_")[1]
origin = campi[1]
dest = campi[2]
aircraft = campi[3]
begintime = campi[4]
endtime = campi[5]
flbegin = float(campi[6])
flend = float(campi[7])
status = campi[8]
callsign = campi[9]
datestart = campi[10]
datestop = campi[11]
latitudebegin = "%.3f" % (float(campi[12])/60.)
latitudebegin = float(latitudebegin)
longitudebegin = "%.3f" % (float(campi[13])/60.)
longitudebegin = float(longitudebegin)
latitudeend = "%.3f" % (float(campi[14])/60.)
latitudeend = float(latitudeend)
longitudeend = "%.3f" % (float(campi[15])/60.)
longitudeend = float(longitudeend)
fid = campi[16]
seq = int(campi[17])
seg_len = campi[18]
parity = campi[19]
p1 = (latitudebegin,longitudebegin)
p2 = (latitudeend,longitudeend)
if old_fid!=fid:
new_fid += 1
old_fid = fid
if not new_fid in voli:
voli[new_fid] = {"fid":fid}
fout.write(str(fid)+" "+str(new_fid)+"\n")
if not seq in voli[new_fid]:
if seq==1:
#{"Origin":segorigin,"Aircraft":aircraft,
voli[new_fid][0] = {"Time":begintime,"Date":datestart,"latitude":float(latitudebegin),"longitude":float(longitudebegin),"quota":flbegin}
voli[new_fid][seq] = {"Time":endtime,"Date":datestop,"latitude":float(latitudeend),"longitude":float(longitudeend),"quota":flend}
print(len(voli),"voli caricati!")
#with open(confpath+"bound_latlon.dat","w") as fout:
# for point in ecac:
# fout.write(str(point[0])+"\t"+str(point[1])+"\n")
usati = dict()
punti_usati = dict()
Nflight = 0
print("so6")
with open(filename_so6_input+".abm","w") as fout:
lines = ''
for fid in voli:
del voli[fid]['fid']
sequences = list(sorted(voli[fid].keys()))
line = ''
pp = 0
lat_lon = dict()
for seq in sorted(sequences):
if voli[fid][seq]["quota"]>QUOTECUT:
#print(voli[fid][seq])
#check for points inside ecac area (segment level)
if not ecac.contains(Point(voli[fid][seq]['latitude'],voli[fid][seq]['longitude'])):
if seq+1 in sequences:
if not ecac.contains(Point(voli[fid][seq+1]['latitude'],voli[fid][seq+1]['longitude'])):
continue
else:
if seq-1 in sequences:
if not ecac.contains(Point(voli[fid][seq-1]['latitude'],voli[fid][seq-1]['longitude'])):
continue
else:
continue
# here the segment is inside ecac
# check for duplicate points on the route
if (voli[fid][seq]['latitude'],voli[fid][seq]['longitude']) in lat_lon:
continue
lat_lon[(voli[fid][seq]['latitude'],voli[fid][seq]['longitude'])] = None
# updating "punti usati" list to avoid using duplicates point on random navps
if not (voli[fid][seq]['latitude'],voli[fid][seq]['longitude']) in punti_usati:
punti_usati[(voli[fid][seq]['latitude'],voli[fid][seq]['longitude'])] = None
#Add line to input
pp +=1
line += str(voli[fid][seq]['latitude'])+","+str(voli[fid][seq]['longitude'])
line += ","+str(float(voli[fid][seq]['quota']))+","
line += century+voli[fid][seq]['Date'][:2]+"-"+voli[fid][seq]['Date'][2:4]+"-"+voli[fid][seq]['Date'][4:]
line += " "+voli[fid][seq]['Time'][:2]+":"+voli[fid][seq]["Time"][2:4]+":"+voli[fid][seq]["Time"][4:]
point = Point(float(voli[fid][seq]['latitude']),float(voli[fid][seq]['longitude']))
quota = voli[fid][seq]['quota']
st = settore_nullo
#print(voli[fid][seq]["Date"],voli[fid][seq]["Time"])
if not voli[fid][seq]["Date"][4:] == start_datetime[8:10]:
st = 0
else:
if ecac.contains(point):
#secondi = int(voli[fid][seq]["Time"][:2])*3600+int(voli[fid][seq]["Time"][2:4])*60+int(voli[fid][seq]["Time"][4:])
secondi = int(datetime.timestamp(datetime.strptime(voli[fid][seq]["Date"]+" "+voli[fid][seq]["Time"],"%y%m%d %H%M%S").replace(tzinfo=timezone('GMT'))))
if voli[fid][seq]['quota']>=QUOTECUT_sector:
for sector in capacity:
if st != settore_nullo:
break
for (start,stop) in capacity[sector]['capacity']:
if st != settore_nullo:
break
if start <= secondi and secondi <stop:
for (l,h) in capacity[sector]['bounds']:
if st !=settore_nullo:
break
if l <=quota and quota < h:
for sub in capacity[sector]['bounds'][(l,h)]:
if capacity[sector]['bounds'][(l,h)][sub]["Polygon"].contains(point):
st = mappa_settore_numero[sector,start,stop,capacity[sector]['capacity'][(start,stop)]]
#print(st)
break
else:
st=0
else:
st = 0
line += ","+str(st)+"\t"
#print(line)
#print(line)
#input("?")
line = str(fid)+"\t"+str(pp)+"\t"+line+"\n"
#print(line)
if (pp>1):
#fout.write(line)
Nflight += 1
lines += line
if not fid in usati:
usati[fid] = None
#new_fid += 1
fout.write(str(Nflight)+"\tNflight\n")
fout.write(lines)
#pickle.dump(voli,open("voli_nuovo.pp","wb"))
nsim = 50
max_ang = 0.2745
extr_ang = 0.4745
direct_thr = 0.21275862069
x_capacity = 0.672413793103
rer_active = 1
ls=1200
as_=1.
max_t = 1200
xdelay =0
pdelay = 0
use_delay = 0
t_w = 45
t_d = 90
t_i = 10
t_r = 0.4
shortest_path = 1
d_thr = 10000
noise_d_thr = 10000
geom = 1
sig_V = 0
laplacian_vel = 0
Nm_shock = 0
radius = 18500
shock_f_lvl_min = 240
shock_f_lvl_max = 300
lifetime = 3
tmp_from_file = 1
if current_path == '':
current_path = os.getcwd()
def old_delay():
print("flight id usati",len(usati))
with open(delay_file,"w") as fout:
fout.write("#FlightID\tDelay\n")
for fid in ritardi:
if str(fid) in usati:
fout.write(str(fid)+"\t"+ritardi[str(fid)]+"\n")
continue
if int(fid) in usati:
fout.write(str(fid)+"\t"+str(ritardi[int(fid)])+"\n")
with open("./config/config.cfg","w") as fout:
fout.write("# Configuration File. Attention each value has to be followed by the sharp with the label of variable\n\n")
fout.write("# Number of simulation performed by the ABM\n")
fout.write(str(nsim)+"\t#nsim\n\n")
fout.write("# Maximum Angle of deviation from original trajectory in rerouting (rad)\n")
fout.write("# and Extreame angle for deviation (rad)\n")
fout.write(str(max_ang)+"\t#max_ang\n")
fout.write(str(extr_ang)+"\t#extr_ang\n\n")
fout.write("# Percentage of possibility to have a direct\n")
fout.write(str(direct_thr)+"\t#direct_thr\n\n")
fout.write("#A moltiplicative factor for capacity\n")
fout.write(str(x_capacity)+"\t#x_capacity\n\n")
fout.write("#To Activate the rerouting module (Boolean)\n")
fout.write(str(rer_active)+"\t#rer_active\n\n")
fout.write("# Minimum Improvement of a direct (meters)\n")
fout.write(str(ls)+"\t#ls\n\n")
fout.write("# Sensitivity Angle for direct (deg)\n")
fout.write(str(as_)+"\t#as\n\n")
fout.write("# Maximum reroute time for direct\n")
fout.write(str(max_t)+"\t#max_t\n\n")
fout.write("# Maximum amount of delay on departure (sec)\n")
fout.write(str(xdelay)+"\t#xdelay\n\n")
fout.write("# Percentage of flight with xdelay\n")
fout.write(str(pdelay)+"\t#pdelay\n\n")
fout.write("# Use external delay file: 1 Yes, 0 No\n")
fout.write(str(use_delay)+"\t#use_delay\n\n")
fout.write("# Number of elementary time increment in a time-step\n")
fout.write(str(t_w)+"\t#t_w\n\n")
fout.write("# Number of elementary time increment for direct\n")
fout.write(str(t_d)+"\t#t_d\n\n")
fout.write("# Size of a time incremet (sec)\n")
fout.write(str(t_i)+"\t#t_i\n\n")
fout.write("# Fraction of t_w after which the alghorithm is updated\n")
fout.write(str(t_r)+"\t#t_r\n\n")
fout.write("#Boolean 1) shortest path 0) minimum deviation (rerouting)\n")
fout.write(str(shortest_path)+"\t#shortest_path\n\n")
fout.write("#threshold value of the safety distance between aircraft (meters)\n")
fout.write(str(d_thr)+"\t#d_thr\n\n")
fout.write("#threshold value of the safety event at 15m (meters)\n")
fout.write(str(noise_d_thr)+"\t#noise_d_thr\n\n")
fout.write("#Boolean 1) Peter-Gall projection 2) Spheric Geometry\n")
fout.write(str(geom)+"\t#geom\n\n")
fout.write("#Width of distribution of noise on velocity. Needs to be between -1 and 1 (not included).\n")
fout.write(str(sig_V)+"\t#sig_V\n\n")
fout.write("# Boolean to have a laplacian variation of velocity\n")
fout.write(str(laplacian_vel)+"\t#laplacian_vel\n\n")
fout.write("# Average number of shock per time-step per flight level; (Unstable)\n")
fout.write(str(Nm_shock)+"\t#Nm_shock\n\n")
fout.write("# Radius of the shock (meters); (Unstable)\n")
fout.write(str(radius)+"\t#radius\n\n")
fout.write("# Minimum and Maximum flight level for shocks; (Unstable)\n")
fout.write(str(shock_f_lvl_min)+"\t#shock_f_lvl_min\n")
fout.write(str(shock_f_lvl_max)+"\t#shock_f_lvl_max\n\n")
fout.write("# Average lifetime of a shock ( t_w*t_r*t_i unity ); (Unstable)\n")
fout.write(str(lifetime)+"\t#lifetime\n\n")
fout.write("# Boolean. If 1, new temporary navpoints are read from the disk. Otherwise they are generated. Remark: should always be set to 1! TODO: remove this.\n")
fout.write(str(tmp_from_file)+"\t#tmp_from_file\n\n")
fout.write("# Stating and Ending Datetime of the Simulation Year-Mounth-Day Hour:minute:second\n")
fout.write(start_datetime+"\t#start_datetime\n")
fout.write(end_datetime+"\t#end_datetime\n")
fout.write("# Directories | |
x.str.lower())
A 1
b 2
C 3
d 4
dtype: int64
"""
return super().sort_index(
axis,
level,
ascending,
inplace,
kind,
na_position,
sort_remaining,
ignore_index,
key,
)
def argsort(self, axis=0, kind="quicksort", order=None) -> Series:
"""
Return the integer indices that would sort the Series values.
Override ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values.
Parameters
----------
axis : {0 or "index"}
Has no effect but is accepted for compatibility with numpy.
kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable algorithms.
order : None
Has no effect but is accepted for compatibility with numpy.
Returns
-------
Series[np.intp]
Positions of values within the sort order with -1 indicating
nan values.
See Also
--------
numpy.ndarray.argsort : Returns the indices that would sort this array.
"""
values = self._values
mask = isna(values)
if mask.any():
result = Series(-1, index=self.index, name=self.name, dtype="int64")
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
return self._constructor(result, index=self.index).__finalize__(
self, method="argsort"
)
else:
return self._constructor(
np.argsort(values, kind=kind), index=self.index, dtype="int64"
).__finalize__(self, method="argsort")
def nlargest(self, n=5, keep="first") -> Series:
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3``. Default `keep` value is 'first'
so Malta will be kept.
>>> s.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
The `n` largest elements where ``n=3`` and keeping the last duplicates.
Brunei will be kept since it is the last with value 434000 based on
the index order.
>>> s.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
>>> s.nlargest(3, keep='all')
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
def nsmallest(self, n: int = 5, keep: str = "first") -> Series:
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Brunei": 434000, "Malta": 434000,
... "Maldives": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Brunei 434000
Malta 434000
Maldives 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` smallest elements where ``n=5`` by default.
>>> s.nsmallest()
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
Iceland 337000
dtype: int64
The `n` smallest elements where ``n=3``. Default `keep` value is
'first' so Nauru and Tuvalu will be kept.
>>> s.nsmallest(3)
Montserrat 5200
Nauru 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` and keeping the last
duplicates. Anguilla and Tuvalu will be kept since they are the last
with value 11300 based on the index order.
>>> s.nsmallest(3, keep='last')
Montserrat 5200
Anguilla 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has four elements due to the three duplicates.
>>> s.nsmallest(3, keep='all')
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
def swaplevel(self, i=-2, j=-1, copy=True) -> Series:
"""
Swap levels i and j in a :class:`MultiIndex`.
Default is to swap the two innermost levels of the index.
Parameters
----------
i, j : int, str
Level of the indices to be swapped. Can pass level name as string.
copy : bool, default True
Whether to copy underlying data.
Returns
-------
Series
Series with levels swapped in MultiIndex.
"""
assert isinstance(self.index, MultiIndex)
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index, copy=copy).__finalize__(
self, method="swaplevel"
)
def reorder_levels(self, order) -> Series:
"""
Rearrange index levels using input order.
May not drop or duplicate levels.
Parameters
----------
order : list of int representing new level order
Reference level by number or key.
Returns
-------
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception("Can only reorder levels on a hierarchical axis.")
result = self.copy()
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
return result
def explode(self, ignore_index: bool = False) -> Series:
"""
Transform each element of a list-like to a row.
.. versionadded:: 0.25.0
Parameters
----------
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of elements in
the output will be non-deterministic when exploding sets.
Examples
--------
>>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]])
>>> s
0 [1, 2, 3]
1 foo
2 []
3 [3, 4]
dtype: object
>>> s.explode()
0 1
0 2
0 3
1 foo
2 NaN
3 3
3 4
dtype: object
"""
if not len(self) or not is_object_dtype(self):
result = self.copy()
return result.reset_index(drop=True) if ignore_index else result
values, counts = reshape.explode(np.asarray(self._values))
if ignore_index:
index = ibase.default_index(len(values))
else:
index = self.index.repeat(counts)
return self._constructor(values, index=index, name=self.name)
def unstack(self, level=-1, fill_value=None) -> DataFrame:
"""
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
Parameters
----------
level : int, str, or list of these, default last level
Level(s) to unstack, can pass level name.
fill_value : scalar value, default None
Value to use when replacing NaN | |
<gh_stars>1-10
from __future__ import division, print_function, absolute_import
### THIS SCRIPT IS BASED ON PyTran, WHICH IS PART OF THE COURSEWARE IN
### Pierrehumbert, 2010, Principles of Planetary Climate
###
### MODIFIED BY dkoll
#--------------------------------------------------------
#Description:
# [...]
#
#Note that a limitation of PyTran is that it uses a cutoff
#Lorentz line shape to synthesize absorption from line data.
#This is mainly to keep things simple and easy to understand,
#and it covers most cases of interest adequately well. However,
#a "professional strength" code should use the Voigt line shape
#instead, to allow for the dominance of Doppler broadening near
#line centers when pressure is low. In addition, to get the line
#overlap right in high pressure situations (including Early Mars)
#one ought to consider a more careful treatment of the non-Lorentz
#far tails of collisionally broadened lines. The student who wishes
#to explore these effects (the latter of which is leading-edge research)
#will find it easy to modify the code to accomodate different assumptions
#on line shape.
#
#As currently written, Pytran loads in the lines for the dominant
#isotopologue for each molecule (based on abundance in Earth's
#atmosphere). If you want to modify the code to look at minor
#isotopologues, it is important to note that the HITRAN database
#downweights the line strengths for each isotopologue according
#to relative abundance in Earth's atmosphere.
#--------------------------------------------------------
#
#Change Log
# 3/13/2012: Corrected algebraic prefactor in temperature
# scaling of line strength, and put in a more general
# line-dependent scaling formula for the exponential factor
# In this release, a generic power-law dependence for the
# partition function Q(T) is used, but in the next release
# I will implement the exact Q(T) for selected molecules,
# based on routines provided as part of the HITRAN distribution.
#
# 2/22/2018: DKOLL - adapt PyTran to a newer database, like HITRAN2016
#
# Dec 2021: DKOLL - clean up old functions;
# replace math with numpy;
# include Voigt line shape
# a) based on canned routine/scipy Faddeeva fn: https://scipython.com/book/chapter-8-scipy/examples/the-voigt-profile/
# -> this implementation is 3-4x slower than the Lorentz approx!
#
# NOTES:
# - potential alternatives for voigt implemtation, https://atmos.eoc.dlr.de/tools/lbl4IR/
# - The 'relative' line cutoff option causes issues at low pressures!!
# First is physical: kappa in line center blows up as p->0. Not a bug, in that it's consistent with Lorentz line approx.
# Second is numerical: see "nsum = int(numWidths*gam/dWave)"
# the line gets narrower & narrower, until it falls below the numerical wave spacing, numWidths*gam < dWave,
# at which point numpy.arange(i1-iw,i2-iw)=numpy.arange(iw,iw) produces an empty array.
# So first kappa gets large, but once p-broadened linewidths drop below grid spacing kappa=0.
# - The 'absolute' line cutoff just blows up as p->0, for lorentz lines...
#---------------------------------------------------------
#import string,math
import numpy as np
from .ClimateUtilities import *
from . import phys
import os
from scipy.special import wofz # DKOLL -- for voigt profile: accurate but slower than lorentz
#Path to the datasets
datapath = '/'.join( os.path.abspath(__file__).split('/')[:-2] ) + '/DATA/HITRAN_DATA/'
#Path to the hitran by-molecule database
hitranPath = datapath+'HITRAN2016/ThermalOnly_0-5000cm.MainIsotopesOnly/'
#------------Constants and file data------------
#
#Hitran field locations
fieldLengths = [2,1,12,10,10,5,5,10,4,8,15,15,15,15,6,12,1,7,7]
Sum = 0
fieldStart = [0]
for length in fieldLengths:
Sum += length
fieldStart.append(Sum)
iso = 1
waveNum = 2
lineStrength = 3
airWidth = 5
selfWidth = 6
Elow = 7
TExp = 8
#
#
#Total internal partition functions (or generic approximations).
#These are used in the temperature scaling of line strength.
#The generic partition functions are OK up to about 500K
#(somewhat less for CO2)
def QGenericLin(T): #Approx. for linear molecules like CO2
return T
def QGenericNonLin(T): #Approx for nonlinear molecules like H2O
return T**1.5
#**ToDo: Provide actual partition functions for CO2, H2O and CH4
#Molecule numbers and molecular weights
#Add more entries here if you want to do other
#molecules in the HITRAN database. These entries are
#for the major isotopologues, but by using different
#molecule numbers you can do other isotopologues.
#The dictionary below maps the molecule name to the HITRAN
#molecule number (see documentation) and corresponding molecular
#weight.
#
#**ToDo:*Correct this to allow for minor isotopomers.
# *Improve structure of the molecule dictionary,
# e.g. use objects instead of arrays, allow for isotopologues
# *Add in entries for the rest of the molecules
molecules = {} #Start an empty dictionary
molecules['H2O'] = [1,18.,QGenericNonLin] #Entry is [molecule number,mol wt,partition fn]
molecules['CO2'] = [2,44.,QGenericLin]
molecules['O3'] = [3,48.,QGenericNonLin]
molecules['N2O'] = [4,44.,QGenericLin]
molecules['CH4'] = [6,16.,QGenericNonLin]
molecules['NH3'] = [11,17.,QGenericNonLin] # linear structure?
molecules['HCN'] = [23,27.,QGenericNonLin]
molecules['C2H6'] = [27,30.,QGenericNonLin]
molecules['SF6'] = [30,146.,QGenericNonLin] # careful: old file!
#-----------------------------------------------
# DKOLL: line shape functions
# baed on https://scipython.com/book/chapter-8-scipy/examples/the-voigt-profile/
""" Return Gaussian line shape at x with HWHM alpha """
def lineshape_G(x, alpha):
return np.sqrt(np.log(2) / np.pi)/alpha * np.exp(-(x/alpha)**2 * np.log(2))
""" Return Lorentzian line shape at x with HWHM gamma """
def lineshape_L(x, gamma):
return gamma / (np.pi* (x**2 + gamma**2))
"""
Return the Voigt line shape at x with Lorentzian component HWHM gamma
and Gaussian component HWHM alpha. """
def lineshape_V(x, alpha, gamma):
sigma = alpha / np.sqrt(2 * np.log(2))
return np.real(wofz((x + 1j*gamma)/(sigma*np.sqrt(2)))) / (sigma*np.sqrt(2*np.pi))
#-----------------------------------------------
#Gets the fieldNum'th data item from a Hitran2004 record
def get(line,fieldNum):
return line[fieldStart[fieldNum]:fieldStart[fieldNum]+fieldLengths[fieldNum]]
#Computes the absorption spectrum on a wave grid, by summing up
#contributions from each line. numWidths is the number
#of line widths after which the line contribution is cut off.
#Typically we use 100-1000 for Earth troposphere, but in low pressure
#(like Mars or upper strat) values as high as 10000 might be needed.
#The validity of the Lorentz shape at such large cutoffs is dubious.
#At The cutoff can affect the absorption significantly in the
#water vapor or CO2 window, or "continuum" regions
def computeAbsorption(waveGrid,getGamma,p,T,dWave,numWidths = 1000.):
N_grid = len(waveGrid)
absGrid = numpy.zeros(N_grid,numpy.Float)
# DKOLL ..
alpha_factor = 1./phys.c * np.sqrt(phys.N_avogadro*phys.k*T*np.log(2.)/(molecules[molName][1]*1e-3)) # [unitless]; 1e-3 from g->kg
for i in range(len(waveList)):
n = waveList[i] # Wavenumber of the line
#gam = gamList[i]*(p/1.013e5)*(296./T)**TExpList[i] # DKOLL: old
gam = getGamma(i)*(296./T)**TExpList[i] # DKOLL: new. getGamma includes p-scaling
#Temperature scaling of line strength
Tfact = np.exp(-100.*(phys.h*phys.c/phys.k)*ElowList[i]*(1/T-1/296.))
#The following factor is usually pretty close to unity
#for lines that aren't far from the peak of the Planck spectrum
#for temperature T, but it can become important on the low frequency
#side, and is easy to incorporate.
Tfact1 = (1.- np.exp(-100.*(phys.h*phys.c/phys.k)*n/T))/ \
(1.- np.exp(-100.*(phys.h*phys.c/phys.k)*n/296.))
#The following has the incorrect algebraic prefactor used in
# the original version of PyTran (see Errata/Updates document)
#S = sList[i]*(T/296.)**TExpList[i]*Tfact
#The following is the corrected version, including also the
# low frequency factor Tfact1
#S = sList[i]*(Q(296.)/Q(T))*TExpList[i]*Tfact*Tfact1
#Preceding line didn't delete "*TExpList" factor. Results now
#checked against LMD kspectrum code, for Lorentz line case
#-->Corrected on 6/10/2013
S = sList[i]*(Q(296.)/Q(T))*Tfact*Tfact1
#
iw = int(N_grid*(n-waveStart)/(waveEnd-waveStart))
nsum = int(numWidths*gam/dWave)
i1 = max(0,iw-nsum)
i2 = min(N_grid-1,iw+nsum)
if i2>0:
dn = numpy.arange(i1-iw,i2-iw)*dWave
#abs = S*gam/(np.pi*( dn**2 + gam**2)) # old
## New - lorentz only
#abs = S*lineshape_L(dn,gam)
## New - doppler onlu
#alpha = n*alpha_factor
#abs = S*lineshape_G(dn,alpha) # units of alpha=[n], so cm-1
## New - voigt line
alpha = n*alpha_factor
abs = S*lineshape_V(dn,alpha,gam) # units of alpha=[n], so cm-1
absGrid[i1:i2] += abs
return absGrid
### DKOLL: add option to have a fixed cutoff.
### i.e., truncate line at N cm^-1 away from center instead of N halfwidths
### For example, MT_CKD continuum is defined as everything beyond 25cm^-1.
###
### DKOLL: also allow for option to remove the Lorenz line 'plinth',
## cf. MTCKD continuum references
def computeAbsorption_fixedCutoff(waveGrid,getGamma,p,T,dWave,numWidths=25.,remove_plinth=False):
N_grid = len(waveGrid)
absGrid = numpy.zeros(N_grid,numpy.Float)
# DKOLL ..
alpha_factor = 1./phys.c * np.sqrt(phys.N_avogadro*phys.k*T*np.log(2.)/(molecules[molName][1]*1e-3)) # [unitless]; 1e-3 from g->kg
for i in range(len(waveList)):
n = waveList[i] # Wavenumber of the line
gam = getGamma(i)*(296./T)**TExpList[i] # DKOLL: new. getGamma includes p-scaling
#Temperature scaling of line strength
Tfact = np.exp(-100.*(phys.h*phys.c/phys.k)*ElowList[i]*(1/T-1/296.))
#The following factor is usually pretty close to unity
#for lines that aren't far from the peak of the Planck spectrum
#for temperature T, but it can become important on the low frequency
#side, and is easy to incorporate.
Tfact1 = (1.- np.exp(-100.*(phys.h*phys.c/phys.k)*n/T))/ \
(1.- np.exp(-100.*(phys.h*phys.c/phys.k)*n/296.))
#The following has the incorrect algebraic prefactor used in
# the original version of PyTran (see Errata/Updates document)
#S = sList[i]*(T/296.)**TExpList[i]*Tfact
#The following is the corrected version, including also the
# low frequency factor Tfact1
#S = sList[i]*(Q(296.)/Q(T))*TExpList[i]*Tfact*Tfact1
#Preceding line didn't delete "*TExpList" factor. Results now
#checked against LMD kspectrum code, for Lorentz line case
#-->Corrected on 6/10/2013
S = sList[i]*(Q(296.)/Q(T))*Tfact*Tfact1
#
iw = int(N_grid*(n-waveStart)/(waveEnd-waveStart))
#nsum = int(numWidths*gam/dWave) # DKOLL: old
nsum = int( numWidths/dWave ) # DKOLL: new
i1 = max(0,iw-nsum)
i2 = min(N_grid-1,iw+nsum)
# DKOLL:
if (i2>0) | |
outputs["platform_elem_t"][:nelem] = elem_t
outputs["platform_elem_A"][:nelem] = elem_A
outputs["platform_elem_Asx"][:nelem] = elem_Asx
outputs["platform_elem_Asy"][:nelem] = elem_Asy
outputs["platform_elem_Ixx"][:nelem] = elem_Ixx
outputs["platform_elem_Iyy"][:nelem] = elem_Iyy
outputs["platform_elem_Izz"][:nelem] = elem_Izz
outputs["platform_elem_rho"][:nelem] = elem_rho
outputs["platform_elem_E"][:nelem] = elem_E
outputs["platform_elem_G"][:nelem] = elem_G
outputs["platform_elem_sigma_y"][:nelem] = elem_sigy
outputs["platform_elem_Px1"][:nelem] = elem_Px1
outputs["platform_elem_Px2"][:nelem] = elem_Px2
outputs["platform_elem_Py1"][:nelem] = elem_Py1
outputs["platform_elem_Py2"][:nelem] = elem_Py2
outputs["platform_elem_Pz1"][:nelem] = elem_Pz1
outputs["platform_elem_Pz2"][:nelem] = elem_Pz2
outputs["platform_elem_qdyn"][:nelem] = elem_qdyn
discrete_outputs["platform_elem_memid"] = elem_memid
outputs["platform_mass"] = mass
outputs["platform_ballast_mass"] = m_ball
outputs["platform_hull_mass"] = mass - m_ball
outputs["platform_cost"] = cost
outputs["platform_displacement"] = volume
outputs["platform_hull_center_of_mass"] = cg_plat
outputs["platform_center_of_buoyancy"] = cb_plat
outputs["platform_I_hull"] = util.unassembleI(I_hull)
outputs["platform_Awater"] = Awater
outputs["platform_Iwater"] = Iwater
outputs["platform_added_mass"] = m_added
outputs["platform_variable_capacity"] = variable_capacity
class TowerPreMember(om.ExplicitComponent):
def setup(self):
self.add_input("transition_node", np.zeros(3), units="m")
self.add_input("tower_height", 0.0, units="m")
self.add_output("tower_top_node", np.zeros(3), units="m")
def compute(self, inputs, outputs):
transition_node = inputs["transition_node"]
tower_top_node = 0 # previous code altered the original definition of transition_node
tower_top_node += transition_node
tower_top_node[2] += float(inputs["tower_height"])
outputs["tower_top_node"] = tower_top_node
class PlatformTowerFrame(om.ExplicitComponent):
def initialize(self):
self.options.declare("options")
def setup(self):
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
n_attach = opt["mooring"]["n_attach"]
self.add_input("platform_nodes", NULL * np.ones((NNODES_MAX, 3)), units="m")
self.add_input("platform_Fnode", NULL * np.ones((NNODES_MAX, 3)), units="N")
self.add_input("platform_Rnode", NULL * np.ones(NNODES_MAX), units="m")
self.add_input("platform_elem_n1", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_input("platform_elem_n2", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_input("platform_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("platform_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("platform_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_rho", NULL * np.ones(NELEM_MAX), units="kg/m**3")
self.add_input("platform_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_sigma_y", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_Px1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Px2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Py1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Py2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Pz1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Pz2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_qdyn", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_hull_center_of_mass", np.zeros(3), units="m")
self.add_input("platform_mass", 0.0, units="kg")
self.add_input("platform_I_hull", np.zeros(6), units="kg*m**2")
self.add_input("platform_displacement", 0.0, units="m**3")
self.add_input("tower_nodes", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_output("tower_Fnode", copy_shape="tower_nodes", units="N")
self.add_input("tower_Rnode", NULL * np.ones(MEMMAX), units="m")
self.add_output("tower_elem_n1", copy_shape="tower_elem_A")
self.add_output("tower_elem_n2", copy_shape="tower_elem_A")
self.add_output("tower_elem_L", copy_shape="tower_elem_A", units="m")
self.add_input("tower_elem_D", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_t", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_A", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asx", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asy", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Ixx", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Iyy", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Izz", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_rho", NULL * np.ones(MEMMAX), units="kg/m**3")
self.add_input("tower_elem_E", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_G", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_sigma_y", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_Px", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Px1", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Px2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Py", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Py1", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Py2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Pz", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Pz1", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Pz2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_qdyn", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_center_of_mass", np.zeros(3), units="m")
self.add_input("tower_mass", 0.0, units="kg")
self.add_input("rho_water", 0.0, units="kg/m**3")
self.add_input("tower_top_node", np.zeros(3), units="m")
self.add_input("transition_node", np.zeros(3), units="m")
self.add_input("rna_mass", 0.0, units="kg")
self.add_input("rna_cg", np.zeros(3), units="m")
self.add_input("mooring_neutral_load", np.zeros((n_attach, 3)), units="N")
self.add_input("platform_variable_capacity", np.zeros(n_member), units="m**3")
for k in range(n_member):
self.add_input(f"member{k}:nodes_xyz", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_input(f"member{k}:variable_ballast_Vpts", val=np.zeros(10), units="m**3")
self.add_input(f"member{k}:variable_ballast_spts", val=np.zeros(10))
self.add_output("system_nodes", NULL * np.ones((NNODES_MAX, 3)), units="m")
self.add_output("system_Fnode", NULL * np.ones((NNODES_MAX, 3)), units="N")
self.add_output("system_Rnode", NULL * np.ones(NNODES_MAX), units="m")
self.add_output("system_elem_n1", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("system_elem_n2", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("system_elem_L", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("system_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("system_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("system_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("system_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("system_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("system_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("system_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("system_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("system_elem_rho", NULL * np.ones(NELEM_MAX), units="kg/m**3")
self.add_output("system_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_elem_sigma_y", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_elem_Px1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Px2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Py1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Py2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Pz1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Pz2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_qdyn", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_structural_center_of_mass", np.zeros(3), units="m")
self.add_output("system_structural_mass", 0.0, units="kg")
self.add_output("system_center_of_mass", np.zeros(3), units="m")
self.add_output("system_mass", 0.0, units="kg")
self.add_output("variable_ballast_mass", 0.0, units="kg")
self.add_output("variable_center_of_mass", val=np.zeros(3), units="m")
self.add_output("constr_variable_margin", val=0.0)
self.add_output("member_variable_volume", val=np.zeros(n_member), units="m**3")
self.add_output("member_variable_height", val=np.zeros(n_member))
self.add_output("platform_total_center_of_mass", np.zeros(3), units="m")
self.add_output("platform_I_total", np.zeros(6), units="kg*m**2")
def compute(self, inputs, outputs):
# Combine nodes
node_platform = inputs["platform_nodes"]
node_tower = inputs["tower_nodes"]
nnode_platform = np.where(node_platform[:, 0] == NULL)[0][0]
nnode_tower = np.where(node_tower[:, 0] == NULL)[0][0]
nnode_system = nnode_platform + np.maximum(1, nnode_tower) - 1
nelem_platform = np.where(inputs["platform_elem_A"] == NULL)[0][0]
nelem_tower = np.where(inputs["tower_elem_A"] == NULL)[0][0]
nelem_system = nelem_platform + nelem_tower
# Combine elements indices and have tower base node point to platform transition node
outputs["tower_Fnode"] = np.zeros(node_tower.shape)
outputs["tower_elem_n1"] = NULL * np.ones(MEMMAX, dtype=np.int_)
outputs["tower_elem_n2"] = NULL * np.ones(MEMMAX, dtype=np.int_)
outputs["tower_elem_L"] = NULL * np.ones(MEMMAX)
tower_n1 = np.arange(nelem_tower, dtype=np.int_)
tower_n2 = np.arange(nelem_tower, dtype=np.int_) + 1
outputs["tower_elem_n1"][:nelem_tower] = idx1 = tower_n1.copy()
outputs["tower_elem_n2"][:nelem_tower] = idx2 = tower_n2.copy()
itrans_platform = util.closest_node(node_platform[:nnode_platform, :], inputs["transition_node"])
tower_n1 += nnode_platform - 1
tower_n2 += nnode_platform - 1
tower_n1[0] = itrans_platform
outputs["tower_elem_L"][:nelem_tower] = np.sqrt(
np.sum((node_tower[idx2, :] - node_tower[idx1, :]) ** 2, axis=1)
)
# Store all outputs
outputs["system_nodes"] = NULL * np.ones((NNODES_MAX, 3))
outputs["system_Fnode"] = NULL * np.ones((NNODES_MAX, 3))
outputs["system_Rnode"] = NULL * np.ones(NNODES_MAX)
outputs["system_elem_n1"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["system_elem_n2"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["system_elem_L"] = NULL * np.ones(NELEM_MAX)
outputs["system_nodes"][:nnode_system, :] = sysnode = np.vstack(
(node_platform[:nnode_platform, :], node_tower[1:nnode_tower, :])
)
outputs["system_Fnode"][:nnode_system, :] = np.vstack(
(inputs["platform_Fnode"][:nnode_platform, :], outputs["tower_Fnode"][1:nnode_tower, :])
)
outputs["system_Rnode"][:nnode_system] = np.r_[
inputs["platform_Rnode"][:nnode_platform], inputs["tower_Rnode"][1:nnode_tower]
]
outputs["system_elem_n1"][:nelem_system] = idx1 = np.r_[
inputs["platform_elem_n1"][:nelem_platform],
tower_n1,
]
outputs["system_elem_n2"][:nelem_system] = idx2 = np.r_[
inputs["platform_elem_n2"][:nelem_platform],
tower_n2,
]
outputs["system_elem_L"][:nelem_system] = np.sqrt(
np.sum((sysnode[np.int_(idx2), :] - sysnode[np.int_(idx1), :]) ** 2, axis=1)
)
for var in [
"elem_D",
"elem_t",
"elem_A",
"elem_Asx",
"elem_Asy",
"elem_Ixx",
"elem_Iyy",
"elem_Izz",
"elem_rho",
"elem_E",
"elem_G",
"elem_sigma_y",
"elem_qdyn",
]:
outputs["system_" + var] = NULL * np.ones(NELEM_MAX)
outputs["system_" + var][:nelem_system] = np.r_[
inputs["platform_" + var][:nelem_platform], inputs["tower_" + var][:nelem_tower]
]
# Have to divide up tower member loads to beginning and end points
for var in ["elem_Px1", "elem_Py1", "elem_Pz1", "elem_Px2", "elem_Py2", "elem_Pz2"]:
outputs["system_" + var] = NULL * np.ones(NELEM_MAX)
outputs["tower_" + var] = NULL * np.ones(MEMMAX)
tower_P = inputs["tower_" + var[:-1]]
outputs["tower_" + var][:nelem_tower] = (
tower_P[:nelem_tower] if var[-1] == "1" else tower_P[1 : (nelem_tower + 1)]
)
outputs["system_" + var][:nelem_system] = np.r_[
inputs["platform_" + var][:nelem_platform], outputs["tower_" + var][:nelem_tower]
]
# Mass summaries
m_platform = inputs["platform_mass"]
cg_platform = inputs["platform_hull_center_of_mass"]
I_platform = util.assembleI(inputs["platform_I_hull"])
m_tower = inputs["tower_mass"]
m_rna = inputs["rna_mass"]
m_sys = m_platform + m_tower + m_rna
outputs["system_structural_mass"] = m_sys
outputs["system_structural_center_of_mass"] = (
m_platform * cg_platform
+ m_tower * inputs["tower_center_of_mass"]
+ m_rna * (inputs["rna_cg"] + inputs["tower_top_node"])
) / m_sys
# Balance out variable ballast
mooringFz = inputs["mooring_neutral_load"][:, 2].sum()
capacity = inputs["platform_variable_capacity"]
capacity_sum = capacity.sum() + EPS # Avoid divide by zeros
rho_water = inputs["rho_water"]
m_variable = inputs["platform_displacement"] * rho_water - m_sys + mooringFz / gravity
V_variable = m_variable / rho_water
outputs["variable_ballast_mass"] = m_variable
outputs["constr_variable_margin"] = V_variable / capacity_sum
V_variable_member = V_variable * capacity / capacity_sum
outputs["member_variable_volume"] = V_variable_member
m_variable_member = V_variable_member * rho_water
# Now find the CG of the variable mass assigned to each member
n_member = capacity.size
outputs["member_variable_height"] = np.zeros(n_member)
cg_variable_member = np.zeros((n_member, 3))
for k in range(n_member):
if V_variable_member[k] == 0.0:
continue
xyz = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(xyz[:, 0] == NULL)[0][0]
xyz = xyz[:inodes, :]
dxyz = xyz[-1, :] - xyz[0, :]
spts = inputs[f"member{k}:variable_ballast_spts"]
Vpts = inputs[f"member{k}:variable_ballast_Vpts"]
s_cg = np.interp(0.5 * V_variable_member[k], Vpts, spts)
cg_variable_member[k, :] = xyz[0, :] + s_cg * dxyz
s_end = np.interp(V_variable_member[k], Vpts, spts)
outputs["member_variable_height"][k] = s_end - spts[0]
cg_variable = np.dot(V_variable_member, cg_variable_member) / V_variable
outputs["variable_center_of_mass"] = cg_variable
# Now find total system mass
outputs["system_mass"] = m_sys + m_variable
outputs["system_center_of_mass"] = (
m_sys * outputs["system_structural_center_of_mass"] + m_variable * cg_variable
) / (m_sys + m_variable)
# Compute the total cg for the platform and the variable ballast together using a weighted sum approach
cg_plat_total = (m_variable * cg_variable + m_platform * cg_platform) / (m_variable + m_platform)
outputs["platform_total_center_of_mass"] = cg_plat_total
# Now loop again to compute variable I
unit_z = np.array([0.0, 0.0, 1.0])
I_variable = np.zeros((3, 3))
for k in range(n_member):
if V_variable_member[k] == 0.0:
continue
xyz = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(xyz[:, 0] == NULL)[0][0]
xyz = xyz[:inodes, :]
vec_k = xyz[-1, :] - xyz[0, :]
ds = outputs["member_variable_height"][k]
# Compute I aligned with member
h_k = ds * np.sqrt(np.sum(vec_k ** 2))
if h_k == 0.0:
continue
r_k = | |
<reponame>sosuperic/sketching-with-language
# segmentation.py
"""
Currently uses trained StrokesToInstruction model to segment unseen sequences.
Usage:
CUDA_VISIBLE_DEVICES=6 PYTHONPATH=. python src/models/segmentation.py -ds progressionpair
"""
import argparse
import copy
from datetime import datetime
import numpy as np
from PIL import Image
import os
from pprint import pprint
from uuid import uuid4
import spacy
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from config import SEGMENTATIONS_PATH, LABELED_PROGRESSION_PAIRS_TOKEN2IDX_PATH, \
BEST_STROKES_TO_INSTRUCTION_PATH, BEST_INSTRUCTION_TO_STROKES_PATH
from src import utils
from src.data_manager.quickdraw import final_categories, create_progression_image_from_ndjson_seq
from src.models.base.stroke_models import NdjsonStrokeDataset
from src.models.base.instruction_models import ProgressionPairDataset, map_sentence_to_index, \
DrawingsAsImagesAnnotatedDataset, prune_seg_tree
from src.models.core import experiments, nn_utils
from src.models.instruction_to_strokes import InstructionToStrokesModel
from src.models.strokes_to_instruction import HParams as s2i_default_hparams
from src.models.strokes_to_instruction import StrokesToInstructionModel, EOS_ID
##############################################################################
#
# Hyperparameters
#
##############################################################################
class HParams():
def __init__(self):
self.split_scorer = 'strokes_to_instruction' # 'instruction_to_strokes'
self.score_parent_child_text_sim = False # similarity b/n parent text and children text (concatenated)
self.score_exponentiate = 1.0 # seg1_score ** alpha * seg2_score ** alpha
self.score_childinst_parstroke = False # P(parent_strokes | [child_inst1, child_inst2])
self.strokes_to_instruction_dir = BEST_STROKES_TO_INSTRUCTION_PATH
self.instruction_to_strokes_dir = BEST_INSTRUCTION_TO_STROKES_PATH
self.notes = ''
# Dataset (for larger ndjson dataset)
self.categories = 'all'
self.max_per_category = 2750
##############################################################################
#
# Utils
#
##############################################################################
def remove_stopwords(nlp, text):
"""
Args:
nlp (spacy model): [description]
text (str):
Returns:
str
"""
doc = nlp(text.lower())
result = [token.text for token in doc if token.text not in nlp.Defaults.stop_words]
result = ' '.join(result)
return result
##############################################################################
#
# Model
#
##############################################################################
class SegmentationModel(object):
def __init__(self, hp, save_dir):
"""
Args:
hp: HParams object
save_dir: str
"""
self.hp = hp
self.save_dir = save_dir
# Load hp used to train model
self.s2i_hp = experiments.load_hp(copy.deepcopy(hp), hp.strokes_to_instruction_dir)
default_s2i_hp = s2i_default_hparams()
# For backwards compatibility:
# hparams may have been added since model was trained; add them to s2i_hp
for k, v in vars(default_s2i_hp).items():
if not hasattr(self.s2i_hp, k):
setattr(self.s2i_hp, k, v)
self.s2i_hp.drawing_type = 'stroke' # TODO: this should be image if we switch to the images model
self.strokes_to_instruction = StrokesToInstructionModel(self.s2i_hp, save_dir=None) # save_dir=None means inference mode
self.strokes_to_instruction.load_model(hp.strokes_to_instruction_dir)
self.strokes_to_instruction.cuda()
if (hp.split_scorer == 'instruction_to_strokes') or (hp.score_childinst_parstroke):
self.i2s_hp = experiments.load_hp(copy.deepcopy(hp), hp.instruction_to_strokes_dir)
# TODO: should do same backwards compatibility as above
self.instruction_to_strokes = InstructionToStrokesModel(self.i2s_hp, save_dir=None)
self.instruction_to_strokes.load_model(hp.instruction_to_strokes_dir) # TODO: change param for load_model
self.instruction_to_strokes.cuda()
if hp.score_parent_child_text_sim:
spacy.prefer_gpu()
self.nlp = spacy.load('en_core_web_md')
# TODO: this should be probably be contained in some model...
self.token2idx = utils.load_file(LABELED_PROGRESSION_PAIRS_TOKEN2IDX_PATH)
def segment_all_progressionpair_data(self):
"""
Segment all samples in the ProgressionPairDataset
"""
for split in ['train', 'valid', 'test']:
print(split)
if self.s2i_hp.drawing_type == 'stroke':
self.ds = ProgressionPairDataset(split, use_full_drawings=True)
loader = DataLoader(self.ds, batch_size=1, shuffle=False, collate_fn=ProgressionPairDataset.collate_fn)
elif self.s2i_hp.drawing_type == 'image':
self.ds = DrawingsAsImagesAnnotatedDataset(split, images=self.s2i_hp.images, data_aug_on_text=False)
loader = DataLoader(self.ds, batch_size=1, shuffle=False, collate_fn=DrawingsAsImagesAnnotatedDataset.collate_fn)
for i, sample in enumerate(loader):
try:
id, category = loader.dataset.data[i]['id'], loader.dataset.data[i]['category']
out_dir = self.save_dir / split
if self.s2i_hp.drawing_type == 'image':
sample = loader.dataset.data[i] # contains the fp, n_segments data we need
# save segmentations
segmented = self.segment_sample(sample, dataset='progressionpair')
# TODO: save sample / strokes as well so that we have all the data in one place?
out_fp = out_dir / f'{category}_{id}.json'
utils.save_file(segmented, out_fp)
# save original image too for comparisons
# TODO: image dataset doesn't have ndjson_strokes
# ndjson_strokes = loader.dataset.data[i]['ndjson_strokes']
# img = create_progression_image_from_ndjson_seq(ndjson_strokes)
out_fp = out_dir / f'{category}_{id}.jpg'
open(out_fp, 'a').close()
# img.save(out_fp)
except Exception as e:
print(e)
continue
def segment_all_ndjson_data(self):
"""
Segment all samples in the NdjsonStrokeDataset
"""
for split in ['train', 'valid', 'test']:
for category in final_categories():
# Skip if not in hparam's categories list
if (self.hp.categories != 'all') and (category not in self.hp.categories):
continue
print(f'{split}: {category}')
# ds = NdjsonStrokeDataset(category, split)
ds = NdjsonStrokeDataset(category, split, max_per_category=self.hp.max_per_category)
loader = DataLoader(ds, batch_size=1, shuffle=False)
n_segd = 0
for i, sample in enumerate(loader):
try:
id, category = loader.dataset.data[i]['id'], loader.dataset.data[i]['category']
out_dir = self.save_dir / category
out_fp = out_dir / f'{id}.json'
if os.path.exists(out_fp):
continue
# note: we are NOT saving it into separate split categories in the case that
# we want to train on 30 categories and then do test on 5 held out categories.
# (i.e. keep it flexible to splitting within categories vs. across categories, which
# can be specified in that Dataset)
# TODO: should we do the same for ProgressionPair?
# save segmentations
segmented = self.segment_sample(sample, dataset='ndjson')
# TODO: save sample / strokes as well so that we have all the data in one place?
utils.save_file(segmented, out_fp)
# save original image too for comparisons
ndjson_strokes = loader.dataset.data[i]['ndjson_strokes']
img = create_progression_image_from_ndjson_seq(ndjson_strokes)
out_fp = out_dir / f'{id}.jpg'
img.save(out_fp)
n_segd += 1
if n_segd == self.hp.max_per_category:
break
except Exception as e:
print(e)
continue
def construct_batch_of_segments_from_one_sample_image(self, sample):
"""
See construct_batch_of_segments_from_one_sample_stroke for more details
Args:
sample (dict): one data point from DrawingAsImage...Dataset
contains fp's and n_segments
"""
fn = os.path.basename(sample['post_seg_fp']) # data/quickdraw/precurrentpost/data/pig/5598031527280640/7-10.jpg
start, end = fn.strip('.jpg').split('-')
end = int(end)
n_penups = end
seg_idx = 0
seg_idx_map = {} # maps tuple of (left_idx, right_idx) in terms of penups to seg_idx in batch
batch = []
for i in range(n_penups): # i is left index
for j in range(i+1, n_penups + 1): # j is right index
img = self.ds._construct_rank_image(i, j, n_penups, sample)
batch.append(img)
seg_idx_map[(i,j)] = seg_idx
seg_idx += 1
seg_lens = [1 for _ in range(len(batch))] # dummy lengths (not used)
batch = np.stack(batch) # [n_segs, C, H, W]
batch = torch.Tensor(batch)
batch = batch.transpose(0,1) # [C, n_segs, H, W]
batch = nn_utils.move_to_cuda(batch)
return batch, n_penups, seg_lens, seg_idx_map
def construct_batch_of_segments_from_one_sample_stroke(self, strokes):
"""
Args:
strokes: [len, 5] np array
Returns:
batch: [n_pts (seq_len), n_segs, 5] FloatTensor
n_penups: int
seg_lens: list of ints, length n_segs
seg_idx_map: dict
Maps penup_idx tuples to seg_idx
Example with 5 penups
{(0, 1): 0,
(0, 2): 1,
(0, 3): 2,
(0, 4): 3,
(0, 5): 4,
(1, 2): 5,
(1, 3): 6,
(1, 4): 7,
(1, 5): 8,
(2, 3): 9,
(2, 4): 10,
(2, 5): 11,
(3, 4): 12,
(3, 5): 13,
(4, 5): 14}
"""
# get locations of segments using penup (4th point in stroke5 format)
n_pts = strokes.size(0)
strokes = strokes.cpu().numpy()
pen_up = (np.where(strokes[:, 3] == 1)[0]).tolist()
n_penups = len(pen_up)
n_segs = int(n_penups * (n_penups + 1) / 2)
# construct tensor of segments
batch = np.zeros((n_segs, n_pts, 5))
seg_lens = []
seg_idx = 0
seg_idx_map = {} # maps tuple of (left_idx, right_idx) in terms of penups to seg_idx in batch
pen_up = [0] + pen_up # insert dummy
for i in range(len(pen_up) - 1): # i is left index
for j in range(i+1, len(pen_up)): # j is right index
start_stroke_idx = pen_up[i]
end_stroke_idx = pen_up[j]
seg = strokes[start_stroke_idx:end_stroke_idx + 1]
seg_len = len(seg)
batch[seg_idx, :seg_len, :] = seg
seg_lens.append(seg_len)
seg_idx_map[(i,j)] = seg_idx
seg_idx += 1
batch = torch.Tensor(batch)
batch = batch.transpose(0,1) # [n_pts, n_segs, 5]
batch = nn_utils.move_to_cuda(batch)
return batch, n_penups, seg_lens, seg_idx_map
def _calc_instruction_to_strokes_score(self, batch_of_segs, seg_lens, texts, cats_idx):
"""
P(S|I). Note that it's the prob, not the loss (NLL) returned by the model.
Args:
batch_of_segs: [n_pts (seq_len), n_segs, 5] CudaFloatTensor
seg_lens: list of ints, length n_segs
texts (list): n_segs list of strings
cats_idx: list of the same int, length n_segs
Returns:
scores: (n_segs) np array
"""
text_indices_list = [map_sentence_to_index(text, self.token2idx) for text in texts]
# Construct inputs to instruction_to_strokes model
bsz = batch_of_segs.size(1)
text_lens = [len(t) for t in text_indices_list]
max_len = max(text_lens)
text_indices = np.zeros((max_len, bsz))
for i, indices in enumerate(text_indices_list):
text_indices[:len(indices), i] = indices
text_indices = nn_utils.move_to_cuda(torch.LongTensor(text_indices))
cats = ['' for _ in range(bsz)] # dummy
urls = ['' for _ in range(bsz)] # dummy
batch = (batch_of_segs, seg_lens, texts, text_lens, text_indices, cats, cats_idx, urls)
with torch.no_grad():
result = self.instruction_to_strokes.one_forward_pass(batch, average_loss=False) # [n_segs]?
scores = result['loss'].cpu().numpy().astype(np.float64) # float32 doesn't serialize to json for some reason
scores = np.exp(-scores) # map losses (NLL) to probs
return scores
def calculate_seg_scores(self, batch_of_segs, seg_lens, cats_idx, seg_idx_map):
"""
Calculate
Calculate the (log) probability of each segment
(To be used as a error/goodness of fit for each segment)
Args:
batch_of_segs: [n_pts (seq_len), n_segs, 5] CudaFloatTensor (n_segs is the "batch")
seg_lens: list of ints, length n_segs
cats_idx: list of the same int, length n_segs
seg_idx_map: dict
Maps penup_idx tuples to seg_idx
Returns:
scores ([n_segs] np array)
texts (list): n_segs list of strings
parchild_scores: [n_par_segs] np arrray, indexed by paridx; n_par_segs != n_segs
leftrightsegidx_to_paridx: tuple (left_seg_id, right_seg_idx) to int
paridx indexes into parchild_scores
left_seg_idx, right_seg_idx index into batch_of_segs and seg_lens
(note: seg_idx_map | |
<reponame>BruceW91/cogdl<filename>cogdl/layers/gcc_module.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.model_zoo.chem.gnn import GATLayer
from dgl.nn.pytorch import NNConv, Set2Set
from dgl.nn.pytorch.conv import GINConv
from dgl.nn.pytorch.glob import AvgPooling, MaxPooling, SumPooling
class SELayer(nn.Module):
"""Squeeze-and-excitation networks"""
def __init__(self, in_channels, se_channels):
super(SELayer, self).__init__()
self.in_channels = in_channels
self.se_channels = se_channels
self.encoder_decoder = nn.Sequential(
nn.Linear(in_channels, se_channels),
nn.ELU(),
nn.Linear(se_channels, in_channels),
nn.Sigmoid(),
)
def forward(self, x):
""""""
# Aggregate input representation
x_global = torch.mean(x, dim=0)
# Compute reweighting vector s
s = self.encoder_decoder(x_global)
return x * s
class ApplyNodeFunc(nn.Module):
"""Update the node feature hv with MLP, BN and ReLU."""
def __init__(self, mlp, use_selayer):
super(ApplyNodeFunc, self).__init__()
self.mlp = mlp
self.bn = (
SELayer(self.mlp.output_dim, int(np.sqrt(self.mlp.output_dim)))
if use_selayer
else nn.BatchNorm1d(self.mlp.output_dim)
)
def forward(self, h):
h = self.mlp(h)
h = self.bn(h)
h = F.relu(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim, use_selayer):
"""MLP layers construction
Paramters
---------
num_layers: int
The number of linear layers
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
"""
super(MLP, self).__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(
SELayer(hidden_dim, int(np.sqrt(hidden_dim)))
if use_selayer
else nn.BatchNorm1d(hidden_dim)
)
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h)
class UnsupervisedGAT(nn.Module):
def __init__(
self, node_input_dim, node_hidden_dim, edge_input_dim, num_layers, num_heads
):
super(UnsupervisedGAT, self).__init__()
assert node_hidden_dim % num_heads == 0
self.layers = nn.ModuleList(
[
GATLayer(
in_feats=node_input_dim if i == 0 else node_hidden_dim,
out_feats=node_hidden_dim // num_heads,
num_heads=num_heads,
feat_drop=0.0,
attn_drop=0.0,
alpha=0.2,
residual=False,
agg_mode="flatten",
activation=F.leaky_relu if i + 1 < num_layers else None,
)
for i in range(num_layers)
]
)
def forward(self, g, n_feat, e_feat):
for i, layer in enumerate(self.layers):
n_feat = layer(g, n_feat)
return n_feat
class UnsupervisedMPNN(nn.Module):
"""
MPNN from
`Neural Message Passing for Quantum Chemistry <https://arxiv.org/abs/1704.01212>`__
Parameters
----------
node_input_dim : int
Dimension of input node feature, default to be 15.
edge_input_dim : int
Dimension of input edge feature, default to be 15.
output_dim : int
Dimension of prediction, default to be 12.
node_hidden_dim : int
Dimension of node feature in hidden layers, default to be 64.
edge_hidden_dim : int
Dimension of edge feature in hidden layers, default to be 128.
num_step_message_passing : int
Number of message passing steps, default to be 6.
num_step_set2set : int
Number of set2set steps
num_layer_set2set : int
Number of set2set layers
"""
def __init__(
self,
output_dim=32,
node_input_dim=32,
node_hidden_dim=32,
edge_input_dim=32,
edge_hidden_dim=32,
num_step_message_passing=6,
lstm_as_gate=False,
):
super(UnsupervisedMPNN, self).__init__()
self.num_step_message_passing = num_step_message_passing
self.lin0 = nn.Linear(node_input_dim, node_hidden_dim)
edge_network = nn.Sequential(
nn.Linear(edge_input_dim, edge_hidden_dim),
nn.ReLU(),
nn.Linear(edge_hidden_dim, node_hidden_dim * node_hidden_dim),
)
self.conv = NNConv(
in_feats=node_hidden_dim,
out_feats=node_hidden_dim,
edge_func=edge_network,
aggregator_type="sum",
)
self.lstm_as_gate = lstm_as_gate
if lstm_as_gate:
self.lstm = nn.LSTM(node_hidden_dim, node_hidden_dim)
else:
self.gru = nn.GRU(node_hidden_dim, node_hidden_dim)
def forward(self, g, n_feat, e_feat):
"""Predict molecule labels
Parameters
----------
g : DGLGraph
Input DGLGraph for molecule(s)
n_feat : tensor of dtype float32 and shape (B1, D1)
Node features. B1 for number of nodes and D1 for
the node feature size.
e_feat : tensor of dtype float32 and shape (B2, D2)
Edge features. B2 for number of edges and D2 for
the edge feature size.
Returns
-------
res : Predicted labels
"""
out = F.relu(self.lin0(n_feat)) # (B1, H1)
h = out.unsqueeze(0) # (1, B1, H1)
c = torch.zeros_like(h)
for i in range(self.num_step_message_passing):
m = F.relu(self.conv(g, out, e_feat)) # (B1, H1)
if self.lstm_as_gate:
out, (h, c) = self.lstm(m.unsqueeze(0), (h, c))
else:
out, h = self.gru(m.unsqueeze(0), h)
out = out.squeeze(0)
return out
class UnsupervisedGIN(nn.Module):
"""GIN model"""
def __init__(
self,
num_layers,
num_mlp_layers,
input_dim,
hidden_dim,
output_dim,
final_dropout,
learn_eps,
graph_pooling_type,
neighbor_pooling_type,
use_selayer,
):
"""model parameters setting
Paramters
---------
num_layers: int
The number of linear layers in the neural network
num_mlp_layers: int
The number of linear layers in mlps
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
final_dropout: float
dropout ratio on the final linear layer
learn_eps: boolean
If True, learn epsilon to distinguish center nodes from neighbors
If False, aggregate neighbors and center nodes altogether.
neighbor_pooling_type: str
how to aggregate neighbors (sum, mean, or max)
graph_pooling_type: str
how to aggregate entire nodes in a graph (sum, mean or max)
"""
super(UnsupervisedGIN, self).__init__()
self.num_layers = num_layers
self.learn_eps = learn_eps
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(self.num_layers - 1):
if layer == 0:
mlp = MLP(
num_mlp_layers, input_dim, hidden_dim, hidden_dim, use_selayer
)
else:
mlp = MLP(
num_mlp_layers, hidden_dim, hidden_dim, hidden_dim, use_selayer
)
self.ginlayers.append(
GINConv(
ApplyNodeFunc(mlp, use_selayer),
neighbor_pooling_type,
0,
self.learn_eps,
)
)
self.batch_norms.append(
SELayer(hidden_dim, int(np.sqrt(hidden_dim)))
if use_selayer
else nn.BatchNorm1d(hidden_dim)
)
# Linear function for graph poolings of output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(num_layers):
if layer == 0:
self.linears_prediction.append(nn.Linear(input_dim, output_dim))
else:
self.linears_prediction.append(nn.Linear(hidden_dim, output_dim))
self.drop = nn.Dropout(final_dropout)
if graph_pooling_type == "sum":
self.pool = SumPooling()
elif graph_pooling_type == "mean":
self.pool = AvgPooling()
elif graph_pooling_type == "max":
self.pool = MaxPooling()
else:
raise NotImplementedError
def forward(self, g, h, efeat):
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.num_layers - 1):
h = self.ginlayers[i](g, h)
h = self.batch_norms[i](h)
h = F.relu(h)
hidden_rep.append(h)
score_over_layer = 0
# perform pooling over all nodes in each graph in every layer
all_outputs = []
for i, h in list(enumerate(hidden_rep)):
pooled_h = self.pool(g, h)
all_outputs.append(pooled_h)
score_over_layer += self.drop(self.linears_prediction[i](pooled_h))
return score_over_layer, all_outputs[1:]
class GraphEncoder(nn.Module):
"""
MPNN from
`Neural Message Passing for Quantum Chemistry <https://arxiv.org/abs/1704.01212>`__
Parameters
----------
node_input_dim : int
Dimension of input node feature, default to be 15.
edge_input_dim : int
Dimension of input edge feature, default to be 15.
output_dim : int
Dimension of prediction, default to be 12.
node_hidden_dim : int
Dimension of node feature in hidden layers, default to be 64.
edge_hidden_dim : int
Dimension of edge feature in hidden layers, default to be 128.
num_step_message_passing : int
Number of message passing steps, default to be 6.
num_step_set2set : int
Number of set2set steps
num_layer_set2set : int
Number of set2set layers
"""
def __init__(
self,
positional_embedding_size=32,
max_node_freq=8,
max_edge_freq=8,
max_degree=128,
freq_embedding_size=32,
degree_embedding_size=32,
output_dim=32,
node_hidden_dim=32,
edge_hidden_dim=32,
num_layers=6,
num_heads=4,
num_step_set2set=6,
num_layer_set2set=3,
norm=False,
gnn_model="mpnn",
degree_input=False,
lstm_as_gate=False,
):
super(GraphEncoder, self).__init__()
if degree_input:
node_input_dim = positional_embedding_size + degree_embedding_size + 1
else:
node_input_dim = positional_embedding_size + 1
edge_input_dim = freq_embedding_size + 1
if gnn_model == "mpnn":
self.gnn = UnsupervisedMPNN(
output_dim=output_dim,
node_input_dim=node_input_dim,
node_hidden_dim=node_hidden_dim,
edge_input_dim=edge_input_dim,
edge_hidden_dim=edge_hidden_dim,
num_step_message_passing=num_layers,
lstm_as_gate=lstm_as_gate,
)
elif gnn_model == "gat":
self.gnn = UnsupervisedGAT(
node_input_dim=node_input_dim,
node_hidden_dim=node_hidden_dim,
edge_input_dim=edge_input_dim,
num_layers=num_layers,
num_heads=num_heads,
)
elif gnn_model == "gin":
self.gnn = UnsupervisedGIN(
num_layers=num_layers,
num_mlp_layers=2,
input_dim=node_input_dim,
hidden_dim=node_hidden_dim,
output_dim=output_dim,
final_dropout=0.5,
learn_eps=False,
graph_pooling_type="sum",
neighbor_pooling_type="sum",
use_selayer=False,
)
self.gnn_model = gnn_model
self.max_node_freq = max_node_freq
self.max_edge_freq = max_edge_freq
self.max_degree = max_degree
self.degree_input = degree_input
if degree_input:
self.degree_embedding = nn.Embedding(
num_embeddings=max_degree + 1, embedding_dim=degree_embedding_size
)
self.set2set = Set2Set(node_hidden_dim, num_step_set2set, num_layer_set2set)
self.lin_readout = nn.Sequential(
nn.Linear(2 * node_hidden_dim, node_hidden_dim),
nn.ReLU(),
nn.Linear(node_hidden_dim, output_dim),
)
self.norm = norm
def forward(self, g, return_all_outputs=False):
"""Predict molecule labels
Parameters
----------
g : DGLGraph
Input DGLGraph for molecule(s)
n_feat : tensor of dtype float32 and shape (B1, D1)
Node features. B1 for number of nodes and D1 for
the node feature size.
e_feat : tensor of dtype float32 and shape (B2, D2)
Edge features. B2 for number of edges and D2 for
the edge feature size.
Returns
-------
res : Predicted labels
"""
if self.degree_input:
device = g.ndata["seed"].device
degrees = g.in_degrees()
if device != torch.device("cpu"):
degrees = degrees.cuda(device)
n_feat = torch.cat(
(
g.ndata["pos_undirected"],
self.degree_embedding(degrees.clamp(0, self.max_degree)),
g.ndata["seed"].unsqueeze(1).float(),
),
dim=-1,
)
else:
n_feat = torch.cat(
(g.ndata["pos_undirected"], g.ndata["seed"].unsqueeze(1).float()),
| |
if value in visited_sheets:
final = True
else:
final = False
visited_sheets.append(value)
C.add_edge(node,succ)
C.node[succ]['value'] = value
C.node[succ]['label'] = "$%d$"%(value)
C.node[succ]['final'] = final
C.node[succ]['level'] = level+1
C.node[succ]['nrots'] = idx if idx <= n/2 else idx-n
ctr += 1
# we are done adding succesors to all endpoints at this
# level. level up!
level += 1
return C
def final_edges(C):
"""Returns a list of final edges from the homology graph.
The final edges are those that define the c-cycles on the Riemann
surface. Note that the edges returned are such that the nodes of the edge
are _both_ final nodes.
The final edges are ordered such that the sheet number appears first in the
edge.
Input:
- homology graph
Output:
- list of (ordered) tuples representing the final edges
"""
final_nodes = [n for n in C.nodes() if C.node[n]['final']]
edges = []
while len(final_nodes) > 0:
node = final_nodes.pop()
pred = C.neighbors(node)[0]
pred_val = C.node[pred]['value']
other = [n for n in final_nodes if C.node[n]['value'] == pred_val and
C.node[C.neighbors(n)[0]]['value'] == C.node[node]['value']]
other = other[0]
final_nodes.remove(other)
# order is important: the nodes with final vertices "don't
# actually exist" in the homology graph. they're only there to
# help determine replative ordering of cycles. We choose final
# edges such that the predecessors of the nodes give the correct
# ordering
if isinstance(C.node[node]['value'],tuple):
edges.append((other,node))
else:
edges.append((node,other))
return edges
def intersection_matrix(final_edges, g):
"""Returns the intersection matrix from a list of final edges.
Compute the intersection matrix of the c-cycles from the Tretkoff graph and
final edge data output by `tretkoff_graph()`.
Input:
- C: (networkx.Graph) Tretkoff graph
- final_edges: each edge corresponds to a c-cycle on the Riemann surface
- g: the expected genus of the riemann surface as given by
singularities.genus()
"""
def intersection_number(ei,ej):
"""Returns the intersection number of two edges of the Tretkoff graph.
Note: Python is smart and uses lexicographical ordering on lists which
is exactly what we need.
"""
ei_start,ei_end = ei
ej_start,ej_end = ej
# the intersection number changes sign when a single edge is
# reversed. normalize the edges such that the starting node of
# each edge occurs before the ending node and that ei's starting
# node occurs before ej's. (intersection is anti-symmetic)
if ei_start > ei_end:
return (-1)*intersection_number((ei[1],ei[0]),ej)
elif ej_start > ej_end:
return (-1)*intersection_number(ei,(ej[1],ej[0]))
elif ei_start > ej_start:
return (-1)*intersection_number(ej,ei)
# after the above transformations, there is only one
# configuration resulting in a non-zero intersection number. (24
# total intersection possibilities / 2**3 = 3, because of three
# binary transformations)
if ej_start < ei_end < ej_end:
return 1
else:
return 0
raise ValueError('Unable to determine intersection index of ' + \
'edge %s with edge %s'%(ei,ej))
# the intersection matrix is anti-symmetric, so we only determine the
# intersection numbers of the upper triangle
num_final_edges = len(final_edges)
K = numpy.zeros((num_final_edges, num_final_edges), dtype=numpy.int)
for i in range(num_final_edges):
ei = final_edges[i]
for j in range(i+1,num_final_edges):
ej = final_edges[j]
K[i,j] = intersection_number(ei,ej)
# obtain the intersection numbers below the diagonal
K = K - K.T
# sanity_check: make sure the intersection matrix predicts the same genus
# that the genus formula otuputs
rank = numpy.linalg.matrix_rank(K)
if rank/2 != g:
raise ValueError("Found inconsistent genus in homolgy " + \
"intersection matrix.")
return K
def compute_c_cycles(tretkoff_graph, final_edges):
"""Returns the c-cycles of the Riemann surface.
Input:
- C: the Tretkoff graph
- final_edges: a list of the final edges of the Tretkoff graph
Output:
A list of the form
[s_0, (b_{i_0}, n_{i_0}), s_1, (b_{i_1}, n_{i_1}), ...]
where "s_k" is a sheet number, "b_{i_k}" is the {i_k}'th branch
point, and "n_{i_k}" is the number of times and direction to go
about branch point "b_{i_k}".
"""
root = tuple([0])
C = tretkoff_graph
c_cycles = []
# recall that the edges have a direction: edge[0] is the starting
# node and edge[1] is the ending node. This determines the direction
# of the c-cycle.
for final_edge in final_edges:
# obtain the vertices on the Tretkoff graph starting from the
# base place, going through the edge, and then back to the
# base_place
#
# see the comment in homology:final_edges() for an explanation
# on the ordering / direction of the cycle.
edge = map(lambda n: C.neighbors(n)[0], final_edge)
path_to_edge = nx.shortest_path(C,root,edge[0])
path_from_edge = nx.shortest_path(C,edge[1],root)
path = path_to_edge + path_from_edge
path_values = map(lambda n: C.node[n]['value'], path)
# convert branch places (branch point, permutation) to
# point-rotations pairs (branch point, number and direction of
# rotations)
# for n in range(1,len(path),2):
# branch_place = path_values[n]
# if n <= len(path_to_edge):
# next_sheet = path[n+1]
# nrots = C.node[next_sheet]['nrots']
# else:
# next_sheet = path[n-1]
# nrots = - C.node[next_sheet]['nrots']
# path_values[n] = (branch_place[0], nrots)
# go the the sheet number in the final edge, recording number of
# rotations normally
len_path_to_edge = len(path_to_edge)
for n in range(1,len(path),2):
bi,pi = path_values[n]
prev_sheet = C.node[path[n-1]]['value']
next_sheet = C.node[path[n+1]]['value']
nrots = pi.index(next_sheet) - pi.index(prev_sheet)
if nrots > len(pi)/2: nrots -= len(pi)
path_values[n] = (bi, nrots)
c_cycles.append(path_values)
return c_cycles
def reverse_cycle(cycle):
"""
Returns the reversed cycle. Note that rotation numbers around
branch points are correctly computed.
"""
rev_cycle = list(reversed(cycle))
for n in range(1,len(cycle),2):
rev_cycle[n] = (rev_cycle[n][0], -rev_cycle[n][1])
return rev_cycle
def compress_cycle(cycle, tretkoff_graph):
"""
Given a cycle, the Tretkoff graph, and the monodromy graph, return a
shortened equivalent cycle.
"""
# Compression #1: add rotation numbers of successive cycle
# elements if the branch points are equal
N = len(cycle)
n = 1
while n < (N-2):
curr_sheet = cycle[n-1]
curr_place = cycle[n]
next_sheet = cycle[n+1]
next_place = cycle[n+2]
# if two successive branch points are the same then delete one
# of them and sum the number of rotations.
if curr_place[0] == next_place[0]:
cycle[n] = (curr_place[0], curr_place[1] + next_place[1])
cycle.pop(n+1)
cycle.pop(n+1)
N -= 2
else:
n += 2
# Compression #2: delete cycle elements with zero rotations
N = len(cycle)
n = 0
while n < (N-1):
sheet = cycle[n]
branch = cycle[n+1]
if branch[1] == 0:
cycle.pop(n)
cycle.pop(n)
N -= 2
else:
n += 2
return cycle
def compute_ab_cycles(c_cycles, linear_combinations, g, tretkoff_graph):
"""
Returns the a- and b-cycles of the Riemann surface given the
intermediate 'c-cycles' and linear combinations matrix.
Input:
- c_cycles
- linear_combinations: output of the Frobenius transform of the
"""
lincomb = linear_combinations
M,N = lincomb.shape
a_cycles = []
b_cycles = []
for i in range(g):
a = []
b = []
for j in range(N):
cij = lincomb[i,j]
c = c_cycles[j] if cij >= 0 else reverse_cycle(c_cycles[j])
a.extend(abs(cij)*c[:-1])
cij = lincomb[i+g,j]
c = c_cycles[j] if cij >= 0 else reverse_cycle(c_cycles[j])
b.extend(abs(cij)*c[:-1])
a = a + [0]
b = b + [0]
a = compress_cycle(a, tretkoff_graph)
b = compress_cycle(b, tretkoff_graph)
a_cycles.append(a)
b_cycles.append(b)
return a_cycles, b_cycles
class YPathFactory(object):
"""Defines the basic y-path structure of the Riemann surface.
In particular, this class offers methods for determining which
*y-paths*, given by a list of branch points in the complex x-plane
and rotation numbers, to take order to define homology basis cycles
as well as sheet switching paths.
.. note::
This class is a light wrapper around legacy code. This legacy
code should eventually be made part of this class. What's
implemented here is a temporary hack.
Attributes
----------
RS : Riemann Surface
C : networkx.Graph
A graph encoding the y-skeleton of the Riemann surface.
Methods
-------
a_cycles
b_cycles
c_cycles
y_path_sheet_swap
"""
def __init__(self, RS, monodromy_group):
"""Initializes the Y-Skeleton by computing the monodromy graph and
homology cycles of the Riemann surface.
Parameters
----------
RS : RiemannSurface
base_sheets : complex, list
An ordered list of the sheets above the base point.
monodromy_group : dict
The monodromy group of the curve as given by
:py:func:`RiemannSurfacePathFactory.monodromy_group`
"""
self.RS = RS
self.C = tretkoff_graph(monodromy_group)
# compute the a-, b-, and c-cycles by calling self.homology()
self._a_cycles, self._b_cycles, self._c_cycles, \
self._linear_combinations = self.homology()
def _value(self, node):
"""Gets the value associated with `node` on the y-skeleton `self.C`.
"""
return self.C.node[node]['value']
def _node(self, value):
"""Converts `value` to its associated node on the y-skeleton `self.C`.
"""
nodes = []
nodes = [n for n,d in self.C.nodes(data=True)
if numpy.all(d['value'] == value) and not d['final']]
return nodes[0]
def _values(self, ypath, rotations=False):
| |
# -*- coding: utf-8 -*-
"""Analysis Pipeline."""
__all__ = [
"Pipeline",
"PipelineResult",
]
##############################################################################
# IMPORTS
# BUILT-IN
import typing as T
import weakref
# THIRD PARTY
import astropy.coordinates as coord
import numpy as np
import typing_extensions as TE
# PROJECT-SPECIFIC
import discO.type_hints as TH
from .fitter import PotentialFitter
from .measurement import CERR_Type, MeasurementErrorSampler
from .residual import ResidualMethod
from .sample import PotentialSampler, RandomLike
from .wrapper import PotentialWrapper
from discO.utils.pbar import get_progress_bar
##############################################################################
# CODE
##############################################################################
class Pipeline:
"""Analysis Pipeline.
Parameters
----------
sampler : `PotentialSampler`
The object for sampling the potential.
Can have a frame and representation type.
measurer : `MeasurementErrorSampler` or None (optional)
The object for re-sampling, given observational errors.
fitter : `PotentialFitter` or None (optional)
residualer : None (optional)
statistic : None (optional)
Raises
------
ValueError
If can't set `residualer` without `fitter`.
If can't set `statistic` without `residualer`.
"""
def __init__(
self,
sampler: PotentialSampler,
measurer: T.Optional[MeasurementErrorSampler] = None,
fitter: T.Optional[PotentialFitter] = None,
residualer: T.Optional[ResidualMethod] = None,
statistic: T.Optional[T.Callable] = None,
):
# CAN set `fitter` without `measurer`
if fitter is not None and measurer is None:
pass
# can't set `residualer` without `fitter`
if residualer is not None and fitter is None:
raise ValueError("Can't set `residualer` without `fitter`.")
# can't set `statistic` without `residualer`
if statistic is not None and residualer is None:
raise ValueError("Can't set `statistic` without `residualer`")
if sampler is not None and fitter is not None:
if fitter.frame != sampler.frame:
raise ValueError(
"sampler and fitter must have the same frame.",
)
self._sampler = sampler
self._measurer = measurer
self._fitter = fitter
self._residualer = residualer
self._statisticer = statistic
self._result = None
# /def
# ---------------------------------------------------------------
@property
def sampler(self) -> PotentialSampler:
"""The sampler."""
return self._sampler
# /def
@property
def potential(self) -> T.Any:
"""The potential from which we sample."""
return self.sampler.potential
# /def
@property
def potential_frame(self) -> TH.OptFrameType:
"""The frame in which the potential is sampled and fit."""
return self.sampler.frame
# /def
@property
def potential_representation_type(self) -> TH.OptRepresentationType:
"""Representation type of potential."""
return self.sampler.representation_type
# /def
@property
def measurer(self) -> T.Optional[MeasurementErrorSampler]:
"""The measurer."""
return self._measurer
# /def
@property
def observer_frame(self) -> TH.OptFrameType:
"""Observer frame."""
return self._measurer.frame
# /def
@property
def observer_representation_type(self) -> TH.OptRepresentationType:
"""Observer representation type."""
return self._measurer.representation_type
# /def
@property
def fitter(self) -> T.Optional[PotentialFitter]:
"""The fitter."""
return self._fitter
# /def
@property
def residualer(self) -> T.Optional[ResidualMethod]:
"""The residual function."""
return self._residualer
# /def
@property
def statisticer(self) -> T.Optional[T.Callable]:
"""The statistic function."""
return self._statisticer
# /def
#################################################################
# Call
def __call__(
self,
n_or_sample: T.Union[int, TH.SkyCoordType],
*,
# sampler
total_mass: TH.QuantityType = None,
# observer
c_err: T.Optional[CERR_Type] = None,
# residual
observable: T.Optional[str] = None,
# extra
random: T.Optional[RandomLike] = None,
**kwargs,
) -> object:
"""Run the pipeline for 1 iteration.
Parameters
----------
n_or_sample : int or (N,) SkyCoord (optional)
number of sample points
observable : str or None (optional, keyword-only)
**kwargs
Passed to ``run``.
Returns
-------
(1,) :class:`PipelineResult`
Notes
-----
This actually calls the more general function ``run``, with
``niter`` pinned to 1.
"""
# We will make a pipeline result and then work thru it.
result = PipelineResult(self)
# TODO! resolve_randomstate(random)
# we need to resolve the random state now, so that an `int` isn't
# set as the same random state each time
random = (
np.random.RandomState(random)
if not isinstance(random, np.random.RandomState)
else random
)
# ----------
# 1) sample
if isinstance(n_or_sample, int):
sample: TH.SkyCoordType = self.sampler(
n_or_sample,
total_mass=total_mass,
random=random,
**kwargs,
)
elif isinstance(n_or_sample, coord.SkyCoord):
sample = n_or_sample
else:
raise TypeError
result["sample"][0] = sample
# ----------
# 2) measure
# optionally skip this step if c_err is False
if self.measurer is not None and c_err is not False:
sample: TH.SkyCoordType = self.measurer(
sample,
random=random,
c_err=c_err,
**kwargs,
)
result["measured"][0] = sample
# ----------
# 3) fit
# we force the fit to be in the same frame & representation type
# as the samples.
fit_pot: T.Any = self.fitter(sample, **kwargs)
result["fit"][0] = fit_pot
# ----------
# 4) residual
# only if 3)
if self.residualer is not None:
resid: T.Any = self.residualer(
fit_pot,
original_potential=self.potential,
observable=observable,
**kwargs,
)
result["residual"][0] = resid
# ----------
# 5) statistic
# only if 4)
if self.statisticer is not None:
stat: T.Any = self.statisticer(resid, **kwargs)
result["statistic"][0] = stat
# ----------
self._result: PipelineResult = result # link to most recent result
return result[0]
# /defs
# -----------------------------------------------------------------
def _run_iter(
self,
n_or_sample: T.Union[int, TH.SkyCoordType],
iterations: int = 1,
*,
# observer
c_err: T.Optional[CERR_Type] = None,
# residual
observable: T.Optional[str] = None,
# extra
random: T.Optional[RandomLike] = None,
progress: bool = True,
**kwargs,
) -> object:
"""Run pipeline, yielding :class:`PipelineResult` over ``iterations``.
.. todo::
- See ``emcee`` for the backend.
Parameters
----------
n_or_sample : int (optional)
number of sample points
iterations : int (optional)
Number of iterations. Must be > 0.
Only used if `n_or_sample` is int.
random : int or |RandomState| or None (optional, keyword-only)
Random state or seed.
original_pot : object or None (optional, keyword-only)
observable : str or None (optional, keyword-only)
Yields
------
:class:`PipelineResult`
For each of ``iterations``
"""
# reshape n_or_sample
if isinstance(n_or_sample, int):
n_or_sample = [n_or_sample] * iterations
elif isinstance(n_or_sample, coord.SkyCoord):
if len(n_or_sample.shape) == 1: # scalar
n_or_sample = [n_or_sample]
else: # TODO! not use jank iterator
def jank_iter(samples, masses):
for samp, mass in zip(samples, masses):
samp.cache["mass"] = mass
yield samp
n_or_sample = jank_iter(
n_or_sample.T,
n_or_sample.cache["mass"].T,
)
# iterate over number of iterations
# for _ in tqdm(range(niter), desc="Running Pipeline...", total=niter):
with get_progress_bar(progress, iterations) as pbar:
for arg in n_or_sample:
pbar.update(1)
yield self(
arg,
random=random,
# observer
c_err=c_err,
# residual
observable=observable,
**kwargs,
)
# /with
# /def
# ---------------------------------------------------------------
def _run_batch(
self,
n_or_sample: T.Union[int, T.Sequence[int]],
iterations: int = 1,
*,
random: T.Optional[RandomLike] = None,
# sampler
total_mass: TH.QuantityType = None,
# observer
c_err: T.Union[CERR_Type, None, TE.Literal[False]] = None,
# fitter
# residual
observable: T.Optional[str] = None,
progress: bool = False,
**kwargs,
) -> object:
"""Call.
Parameters
----------
n : int (optional)
number of sample points
iterations : int (optional)
Number of iterations. Must be > 0.
random : int or |RandomState| or None (optional, keyword-only)
Random state or seed.
In order that a sequence of samples is different in each element
we here resolve random seeds into a |RandomState|.
original_pot : object or None (optional, keyword-only)
observable : str or None (optional, keyword-only)
Returns
-------
:class:`PipelineResult`
"""
# reshape n_or_sample
if isinstance(n_or_sample, coord.SkyCoord):
if len(n_or_sample.shape) == 1: # scalar
iterations = 1
else:
iterations = n_or_sample.shape[1]
# We will make a pipeline result and then work thru it.
results = np.recarray(
(iterations,),
dtype=[
("sample", coord.SkyCoord),
("measured", coord.SkyCoord),
("fit", PotentialWrapper),
("residual", object),
("statistic", object),
],
).view(PipelineResult)
results._parent_ref = weakref.ref(self)
run_gen = self._run_iter(
n_or_sample,
iterations,
random=random,
total_mass=total_mass,
c_err=c_err,
observable=observable,
progress=progress,
**kwargs,
)
for i, result in enumerate(run_gen):
results[i] = result
return results
# /defs
# ---------------------------------------------------------------
def run(
self,
n_or_sample: T.Union[int, T.Sequence[int]],
iterations: int = 1,
*,
random: T.Optional[RandomLike] = None,
# sampler
total_mass: TH.QuantityType = None,
# observer
c_err: T.Union[CERR_Type, None, TE.Literal[False]] = None,
# residual
observable: T.Optional[str] = None,
# extra
batch: bool = False,
progress: bool = True,
**kwargs,
) -> object:
"""Call.
Parameters
----------
n : int (optional)
number of sample points
iterations : int (optional)
Number of iterations. Must be > 0.
random : int or |RandomState| or None (optional, keyword-only)
Random state or seed.
In order that a sequence of samples is different in each element
we here resolve random seeds into a |RandomState|.
original_pot : object or None (optional, keyword-only)
observable : str or None (optional, keyword-only)
Returns
-------
:class:`PipelineResult`
"""
run_func = self._run_batch if batch else self._run_iter
# we need to resolve the random state now, so that an `int` isn't
# set as the same random state each time
random = (
np.random.RandomState(random)
if not isinstance(random, np.random.RandomState)
else random
)
return run_func(
n_or_sample,
iterations,
random=random,
total_mass=total_mass,
c_err=c_err,
observable=observable,
progress=progress,
**kwargs,
)
# /def
#################################################################
# utils
def __repr__(self) -> str:
"""String Representation.
Returns
-------
str
"""
s = (
"Pipeline:\n"
f" sampler: {self._sampler}\n"
f" measurer: {self._measurer}\n"
f" fitter: {self._fitter}\n"
f" residual: {self._residualer}\n"
f" statistic: {self._statisticer}\n"
| |
"""
Author: vigarbuaa
"""
import hashlib
import hmac
import sys
import time
from copy import copy
from datetime import datetime, timedelta
from urllib.parse import urlencode
import pytz
from vnpy.api.rest import Request, RestClient
from vnpy.api.websocket import WebsocketClient
from vnpy.event import Event
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.constant import (
Direction,
Exchange,
OrderType,
Product,
Status,
Interval
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
BarData,
AccountData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
BASE_URL = "https://api.bitfinex.com/"
REST_HOST = "https://api.bitfinex.com/"
WEBSOCKET_HOST = "wss://api-pub.bitfinex.com/ws/2"
STATUS_BITFINEX2VT = {
"ACTIVE": Status.NOTTRADED,
"PARTIALLY FILLED": Status.PARTTRADED,
"EXECUTED": Status.ALLTRADED,
"CANCELED": Status.CANCELLED,
}
ORDERTYPE_VT2BITFINEX = {
OrderType.LIMIT: "EXCHANGE LIMIT",
OrderType.MARKET: "EXCHANGE MARKET",
}
ORDERTYPE_BITFINEX2VT = {
"EXCHANGE LIMIT": OrderType.LIMIT,
"EXCHANGE MARKET": OrderType.MARKET,
"LIMIT": OrderType.LIMIT,
"MARKET": OrderType.MARKET
}
DIRECTION_VT2BITFINEX = {
Direction.LONG: "Buy",
Direction.SHORT: "Sell",
}
DIRECTION_BITFINEX2VT = {
"Buy": Direction.LONG,
"Sell": Direction.SHORT,
}
INTERVAL_VT2BITFINEX = {
Interval.MINUTE: "1m",
Interval.HOUR: "1h",
Interval.DAILY: "1D",
}
TIMEDELTA_MAP = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
Interval.DAILY: timedelta(days=1),
}
UTC_TZ = pytz.utc
class BitfinexGateway(BaseGateway):
"""
VN Trader Gateway for bitfineX connection.
"""
default_setting = {
"key": "",
"secret": "",
"session": 3,
"proxy_host": "127.0.0.1",
"proxy_port": 1080,
"margin": ["False", "True"]
}
exchanges = [Exchange.BITFINEX]
def __init__(self, event_engine):
"""Constructor"""
super(BitfinexGateway, self).__init__(event_engine, "BITFINEX")
self.timer_count = 0
self.resubscribe_interval = 60
self.rest_api = BitfinexRestApi(self)
self.ws_api = BitfinexWebsocketApi(self)
def connect(self, setting: dict):
""""""
key = setting["key"]
secret = setting["secret"]
session = setting["session"]
proxy_host = setting["proxy_host"]
proxy_port = setting["proxy_port"]
if setting["margin"] == "True":
margin = True
else:
margin = False
self.rest_api.connect(key, secret, session, proxy_host, proxy_port)
self.ws_api.connect(key, secret, proxy_host, proxy_port, margin)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def subscribe(self, req: SubscribeRequest):
""""""
self.ws_api.subscribe(req)
def send_order(self, req: OrderRequest):
""""""
return self.ws_api.send_order(req)
def cancel_order(self, req: CancelRequest):
""""""
self.ws_api.cancel_order(req)
def query_account(self):
""""""
pass
def query_position(self):
""""""
pass
def query_history(self, req: HistoryRequest):
""""""
return self.rest_api.query_history(req)
def close(self):
""""""
self.rest_api.stop()
self.ws_api.stop()
def process_timer_event(self, event: Event):
""""""
self.timer_count += 1
if self.timer_count < self.resubscribe_interval:
return
self.timer_count = 0
self.ws_api.resubscribe()
class BitfinexRestApi(RestClient):
"""
BitfineX REST API
"""
def __init__(self, gateway: BaseGateway):
""""""
super(BitfinexRestApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.key = ""
self.secret = ""
self.order_count = 1_000_000
self.connect_time = 0
def sign(self, request):
"""
Generate BitfineX signature.
"""
# Sign
nonce = str(int(round(time.time() * 1000000)))
if request.params:
query = urlencode(request.params)
path = request.path + "?" + query
else:
path = request.path
if request.data:
request.data = urlencode(request.data)
else:
request.data = ""
msg = request.method + \
"/api/v2/{}{}{}".format(path, nonce, request.data)
signature = hmac.new(
self.secret, msg.encode("utf8"), digestmod=hashlib.sha384
).hexdigest()
# Add headers
headers = {
"bfx-nonce": nonce,
"bfx-apikey": self.key,
"bfx-signature": signature,
"content-type": "application/json"
}
request.headers = headers
return request
def connect(
self,
key: str,
secret: str,
session: int,
proxy_host: str,
proxy_port: int
):
"""
Initialize connection to REST server.
"""
self.key = key
self.secret = secret.encode()
self.connect_time = (
int(datetime.now(UTC_TZ).strftime("%y%m%d%H%M%S")) * self.order_count
)
self.init(REST_HOST, proxy_host, proxy_port)
self.start(session)
self.gateway.write_log("REST API启动成功")
self.query_contract()
def query_contract(self):
""""""
self.add_request(
method="GET",
path="/v1/symbols_details",
callback=self.on_query_contract,
)
def on_query_contract(self, data, request):
""""""
for d in data:
contract = ContractData(
symbol=d["pair"].upper(),
exchange=Exchange.BITFINEX,
name=d["pair"].upper(),
product=Product.SPOT,
size=1,
pricetick=1 / pow(10, d["price_precision"]),
min_volume=float(d["minimum_order_size"]),
history_data=True,
gateway_name=self.gateway_name,
)
self.gateway.on_contract(contract)
self.gateway.write_log("账户资金查询成功")
def on_failed(self, status_code: int, request: Request):
"""
Callback to handle request failed.
"""
msg = f"请求失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_error(
self, exception_type: type, exception_value: Exception, tb, request: Request
):
"""
Callback to handler request exception.
"""
msg = f"触发异常,状态码:{exception_type},信息:{exception_value}"
self.gateway.write_log(msg)
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb, request)
)
def query_history(self, req: HistoryRequest):
""""""
history = []
limit = 5000
interval = INTERVAL_VT2BITFINEX[req.interval]
path = f"/v2/candles/trade:{interval}:t{req.symbol}/hist"
start_time = req.start
while True:
# Create query params
params = {
"limit": 5000,
"start": datetime.timestamp(start_time) * 1000,
"sort": 1
}
# Get response from server
resp = self.request(
"GET",
path,
params=params
)
# Break if request failed with other status code
if resp.status_code // 100 != 2:
msg = f"获取历史数据失败,状态码:{resp.status_code},信息:{resp.text}"
self.gateway.write_log(msg)
break
else:
data = resp.json()
if not data:
msg = f"获取历史数据为空,开始时间:{start_time}"
break
buf = []
for l in data:
ts, o, h, l, c, v = l
bar = BarData(
symbol=req.symbol,
exchange=req.exchange,
datetime=generate_datetime(ts),
interval=req.interval,
volume=v,
open_price=o,
high_price=h,
low_price=l,
close_price=c,
gateway_name=self.gateway_name
)
buf.append(bar)
history.extend(buf)
begin = buf[0].datetime
end = buf[-1].datetime
msg = f"获取历史数据成功,{req.symbol} - {req.interval.value},{begin} - {end}"
self.gateway.write_log(msg)
# Break if total data count less than 5000 (latest date collected)
if len(data) < limit:
break
# Update start time
start_time = bar.datetime + TIMEDELTA_MAP[req.interval]
return history
class BitfinexWebsocketApi(WebsocketClient):
""""""
def __init__(self, gateway):
""""""
super(BitfinexWebsocketApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.order_id = 1_000_000
self.trade_id = 1_000_000
self.key = ""
self.secret = ""
self.ticks = {}
self.accounts = {}
self.orders = {}
self.trades = set()
self.ticks = {}
self.bids = {}
self.asks = {}
self.channels = {} # channel_id : (Channel, Symbol)
self.subscribed = {}
def connect(
self,
key: str,
secret: str,
proxy_host: str,
proxy_port: int,
margin: bool
):
""""""
self.key = key
self.secret = secret.encode()
self.margin = margin
self.init(WEBSOCKET_HOST, proxy_host, proxy_port)
self.start()
def subscribe(self, req: SubscribeRequest):
"""
Subscribe to tick data upate.
"""
if req.symbol not in self.subscribed:
self.subscribed[req.symbol] = req
d = {
"event": "subscribe",
"channel": "book",
"symbol": req.symbol,
}
self.send_packet(d)
d = {
"event": "subscribe",
"channel": "ticker",
"symbol": req.symbol,
}
self.send_packet(d)
return int(round(time.time() * 1000))
def resubscribe(self):
""""""
for req in self.subscribed.values():
self.subscribe(req)
def _gen_unqiue_cid(self):
self.order_id += 1
local_oid = time.strftime("%y%m%d") + str(self.order_id)
return int(local_oid)
def send_order(self, req: OrderRequest):
orderid = self._gen_unqiue_cid()
if req.direction == Direction.LONG:
amount = req.volume
else:
amount = -req.volume
order_type = ORDERTYPE_VT2BITFINEX[req.type]
if self.margin:
order_type = order_type.replace("EXCHANGE ", "")
o = {
"cid": orderid,
"type": order_type,
"symbol": "t" + req.symbol,
"amount": str(amount),
"price": str(req.price),
}
request = [0, "on", None, o]
order = req.create_order_data(orderid, self.gateway_name)
self.send_packet(request)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
""""""
orderid = req.orderid
date_str = "20" + str(orderid)[0:6]
date = date_str[0:4] + "-" + date_str[4:6] + "-" + date_str[6:8]
request = [
0,
"oc",
None,
{
"cid": int(orderid),
"cid_date": date
}
]
self.send_packet(request)
def on_connected(self):
""""""
self.gateway.write_log("Websocket API连接成功")
self.authenticate()
def on_disconnected(self):
""""""
self.gateway.write_log("Websocket API连接断开")
def on_packet(self, packet: dict):
""""""
if isinstance(packet, dict):
self.on_response(packet)
else:
self.on_update(packet)
def on_response(self, data):
""""""
if "event" not in data:
return
if data["event"] == "subscribed":
symbol = str(data["symbol"].replace("t", ""))
self.channels[data["chanId"]] = (data["channel"], symbol)
def on_update(self, data):
""""""
if data[1] == "hb":
return
channel_id = data[0]
if not channel_id:
self.on_trade_update(data)
else:
self.on_data_update(data)
def on_data_update(self, data):
""""""
channel_id = data[0]
channel, symbol = self.channels[channel_id]
symbol = str(symbol.replace("t", ""))
# Get the Tick object
if symbol in self.ticks:
tick = self.ticks[symbol]
else:
tick = TickData(
symbol=symbol,
exchange=Exchange.BITFINEX,
name=symbol,
datetime=datetime.now(UTC_TZ),
gateway_name=self.gateway_name,
)
self.ticks[symbol] = tick
l_data1 = data[1]
# Update general quote
if channel == "ticker":
tick.volume = float(l_data1[-3])
tick.high_price = float(l_data1[-2])
tick.low_price = float(l_data1[-1])
tick.last_price = float(l_data1[-4])
tick.open_price = float(tick.last_price - l_data1[4])
# Update deep quote
elif channel == "book":
bid = self.bids.setdefault(symbol, {})
ask = self.asks.setdefault(symbol, {})
if len(l_data1) > 3:
for price, count, amount in l_data1:
price = float(price)
count = int(count)
amount = float(amount)
if amount > 0:
bid[price] = amount
else:
ask[price] = -amount
else:
price, count, amount = l_data1
price = float(price)
count = int(count)
amount = float(amount)
if not count:
if price in bid:
del bid[price]
elif price in ask:
del ask[price]
else:
if amount > 0:
bid[price] = amount
else:
ask[price] = -amount
try:
# BID
bid_keys = bid.keys()
bidPriceList = sorted(bid_keys, reverse=True)
tick.bid_price_1 = bidPriceList[0]
tick.bid_price_2 = bidPriceList[1]
tick.bid_price_3 = bidPriceList[2]
tick.bid_price_4 = bidPriceList[3]
tick.bid_price_5 = bidPriceList[4]
tick.bid_volume_1 = bid[tick.bid_price_1]
tick.bid_volume_2 = bid[tick.bid_price_2]
tick.bid_volume_3 = bid[tick.bid_price_3]
tick.bid_volume_4 = bid[tick.bid_price_4]
tick.bid_volume_5 = bid[tick.bid_price_5]
# ASK
ask_keys = ask.keys()
askPriceList = sorted(ask_keys)
tick.ask_price_1 = askPriceList[0]
tick.ask_price_2 = askPriceList[1]
tick.ask_price_3 = askPriceList[2]
tick.ask_price_4 = askPriceList[3]
tick.ask_price_5 = askPriceList[4]
tick.ask_volume_1 = ask[tick.ask_price_1]
tick.ask_volume_2 = ask[tick.ask_price_2]
tick.ask_volume_3 = ask[tick.ask_price_3]
tick.ask_volume_4 = ask[tick.ask_price_4]
tick.ask_volume_5 = ask[tick.ask_price_5]
except IndexError:
return
dt = datetime.now(UTC_TZ)
tick.datetime = dt
self.gateway.on_tick(copy(tick))
def on_wallet(self, data):
""""""
# Exchange Mode
if not self.margin and str(data[0]) != "exchange":
return
# Margin Mode
elif self.margin and str(data[0]) != "margin":
return
accountid = str(data[1])
account = self.accounts.get(accountid, None)
if not account:
account = AccountData(
accountid=accountid,
gateway_name=self.gateway_name,
)
account.balance = float(data[2])
account.available = 0.0
account.frozen = 0.0
self.gateway.on_account(copy(account))
def on_trade_update(self, data):
""""""
name = data[1]
info = data[2]
if name == "ws":
for l in info:
self.on_wallet(l)
self.gateway.write_log("账户资金获取成功")
elif name == "wu":
self.on_wallet(info)
elif | |
oneup.set_state(-1) if oneup.state == 0 else oneup.set_state(0)
else:
old_state = False
oneup = Oneup(id=uuid4().hex, author=author, parent=self)
self.children.append(oneup)
# Commit 1up
db.session.add(self)
db.session.commit()
app.logger.info("{verb} {obj}".format(verb="Toggled" if old_state else "Added", obj=oneup, ))
return oneup
def link_url(self):
"""Return URL if this Star has a Link-Planet
Returns:
String: URL of the first associated Link
Bool: False if no link was found
"""
# planet_assoc = self.planet_assocs.join(PlanetAssociation.planet.of_type(LinkPlanet)).first()
for planet_assoc in self.planet_assocs:
if planet_assoc.planet.kind == "link":
return planet_assoc.planet.url
return None
def has_picture(self):
"""Return True if this Star has a PicturePlanet"""
try:
first = self.picture_planets()[0]
except IndexError:
first = None
return first is not None
def has_text(self):
"""Return True if this Star has a TextPlanet"""
try:
first = self.text_planets()[0]
except IndexError:
first = None
return first is not None
def picture_planets(self):
"""Return pictures of this Star"""
return self.planet_assocs.join(PlanetAssociation.planet.of_type(LinkedPicturePlanet)).all()
def text_planets(self):
"""Return TextPlanets of this Star"""
return self.planet_assocs.join(PlanetAssociation.planet.of_type(TextPlanet)).all()
class PlanetAssociation(db.Model):
"""Associates Planets with Stars, defining an author for the connection"""
__tablename__ = 'planet_association'
star_id = db.Column(db.String(32), db.ForeignKey('star.id'), primary_key=True)
planet_id = db.Column(db.String(32), db.ForeignKey('planet.id'), primary_key=True)
planet = db.relationship("Planet", backref="star_assocs")
author_id = db.Column(db.String(32), db.ForeignKey('persona.id'))
author = db.relationship("Persona", backref="planet_assocs")
@classmethod
def validate_changeset(cls, changeset):
"""Return True if `changeset` is a valid PlanetAssociation changeset"""
if "author_id" not in changeset or changeset["author_id"] is None:
app.logger.warning("Missing `author_id` in changeset")
return False
if "planet" not in changeset or changeset["planet"] is None or "kind" not in changeset["planet"]:
app.logger.warning("Missing `planet` or `planet.kind` in changeset")
return False
p_cls = LinkPlanet if changeset["planet"]["kind"] == "link" else LinkedPicturePlanet
return p_cls.validate_changeset(changeset)
t_planet_vesicles = db.Table(
'planet_vesicles',
db.Column('planet_id', db.String(32), db.ForeignKey('planet.id')),
db.Column('vesicle_id', db.String(32), db.ForeignKey('vesicle.id'))
)
class Planet(Serializable, db.Model):
"""A Planet represents an attachment"""
__tablename__ = 'planet'
_insert_required = ["id", "title", "created", "modified", "source", "kind"]
_update_required = ["id", "title", "modified", "source"]
id = db.Column(db.String(32), primary_key=True)
title = db.Column(db.Text)
kind = db.Column(db.String(32))
created = db.Column(db.DateTime, default=datetime.datetime.utcnow())
modified = db.Column(db.DateTime, default=datetime.datetime.utcnow())
source = db.Column(db.String(128))
state = db.Column(db.Integer, default=0)
vesicles = db.relationship(
'Vesicle',
secondary='planet_vesicles',
primaryjoin='planet_vesicles.c.planet_id==planet.c.id',
secondaryjoin='planet_vesicles.c.vesicle_id==vesicle.c.id')
__mapper_args__ = {
'polymorphic_identity': 'planet',
'polymorphic_on': kind
}
def __repr__(self):
return "<Planet:{} [{}]>".format(self.kind, self.id[:6])
def get_state(self):
"""
Return publishing state of this planet.
Returns:
Integer:
-2 -- deleted
-1 -- unavailable
0 -- published
1 -- draft
2 -- private
3 -- updating
"""
return PLANET_STATES[self.state][0]
def set_state(self, new_state):
"""
Set the publishing state of this planet
Parameters:
new_state (int) code of the new state as defined in nucleus.PLANET_STATES
Raises:
ValueError: If new_state is not an Int or not a valid state of this object
"""
new_state = int(new_state)
if new_state not in PLANET_STATES.keys():
raise ValueError("{} ({}) is not a valid planet state").format(
new_state, type(new_state))
else:
self.state = new_state
def export(self, update=False):
return Serializable.export(self, update=update)
@staticmethod
def create_from_changeset(changeset, stub=None, update_sender=None, update_recipient=None):
"""Create a new Planet object from a changeset (See Serializable.create_from_changeset). """
created_dt = iso8601.parse_date(changeset["modified"]).replace(tzinfo=None)
modified_dt = iso8601.parse_date(changeset["modified"]).replace(tzinfo=None)
if stub is not None:
if not isinstance(stub, Planet):
raise ValueError("Invalid stub of type {}".format(type(stub)))
new_planet = stub
new_planet.id = changeset["id"]
new_planet.title = changeset["title"]
new_planet.source = changeset["source"]
new_planet.created = created_dt
new_planet.modified = modified_dt
else:
new_planet = Planet(
id=changeset["id"],
title=changeset["title"],
created=created_dt,
modified=modified_dt,
source=changeset["source"]
)
app.logger.info("Created new {} from changeset".format(new_planet))
return new_planet
def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):
"""Update a new Planet object from a changeset (See Serializable.update_from_changeset). """
modified_dt = iso8601.parse_date(changeset["modified"]).replace(tzinfo=None)
self.title = changeset["title"]
self.source = changeset["source"]
self.modifed = modified_dt
return self
class PicturePlanet(Planet):
"""A Picture attachment"""
_insert_required = ["id", "title", "created", "modified", "source", "filename", "kind"]
_update_required = ["id", "title", "modified", "source", "filename"]
id = db.Column(db.String(32), ForeignKey('planet.id'), primary_key=True)
filename = db.Column(db.Text)
__mapper_args__ = {
'polymorphic_identity': 'picture'
}
@staticmethod
def create_from_changeset(changeset, stub=None, update_sender=None, update_recipient=None):
"""Create a new Planet object from a changeset (See Serializable.create_from_changeset). """
stub = PicturePlanet()
new_planet = Planet.create_from_changeset(changeset,
stub=stub, update_sender=update_sender, update_recipient=update_recipient)
new_planet.filename = changeset["filename"]
return new_planet
def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):
"""Update a new Planet object from a changeset (See Serializable.update_from_changeset). """
raise NotImplementedError
class LinkedPicturePlanet(Planet):
"""A linked picture attachment"""
_insert_required = ["id", "title", "created", "modified", "source", "url", "kind"]
_update_required = ["id", "title", "modified", "source", "url"]
id = db.Column(db.String(32), ForeignKey('planet.id'), primary_key=True)
url = db.Column(db.Text)
__mapper_args__ = {
'polymorphic_identity': 'linkedpicture'
}
@staticmethod
def create_from_changeset(changeset, stub=None, update_sender=None, update_recipient=None):
"""Create a new Planet object from a changeset (See Serializable.create_from_changeset). """
if stub is None:
stub = LinkedPicturePlanet()
new_planet = Planet.create_from_changeset(changeset,
stub=stub, update_sender=update_sender, update_recipient=update_recipient)
new_planet.url = changeset["url"]
return new_planet
def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):
"""Update a new Planet object from a changeset (See Serializable.update_from_changeset). """
raise NotImplementedError
class LinkPlanet(Planet):
"""A URL attachment"""
_insert_required = ["id", "title", "kind", "created", "modified", "source", "url", "kind"]
_update_required = ["id", "title", "modified", "source", "url"]
id = db.Column(db.String(32), ForeignKey('planet.id'), primary_key=True)
url = db.Column(db.Text)
__mapper_args__ = {
'polymorphic_identity': 'link'
}
@staticmethod
def create_from_changeset(changeset, stub=None, update_sender=None, update_recipient=None):
"""Create a new Planet object from a changeset (See Serializable.create_from_changeset). """
if stub is None:
stub = LinkPlanet()
new_planet = Planet.create_from_changeset(changeset,
stub=stub, update_sender=update_sender, update_recipient=update_recipient)
new_planet.url = changeset["url"]
return new_planet
def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):
"""Update a new Planet object from a changeset (See Serializable.update_from_changeset). """
raise NotImplementedError
class TextPlanet(Planet):
"""A longform text attachment"""
_insert_required = ["id", "title", "kind", "created", "modified", "source", "text", "kind"]
_update_required = ["id", "title", "modified", "source", "text"]
id = db.Column(db.String(32), ForeignKey('planet.id'), primary_key=True)
text = db.Column(db.Text)
__mapper_args__ = {
'polymorphic_identity': 'text'
}
@classmethod
def get_or_create(cls, text):
"""Return planet containing text if it already exists or create it
Args:
text: Content value of the TextPlanet
"""
h = sha256(text).hexdigest()[:32]
planet = TextPlanet.query.get(h)
if planet is None:
app.logger.info("Storing new text")
planet = TextPlanet(
id=h,
text=text)
return planet
@staticmethod
def create_from_changeset(changeset, stub=None, update_sender=None, update_recipient=None):
"""Create a new Planet object from a changeset (See Serializable.create_from_changeset). """
if stub is None:
stub = TextPlanet()
new_planet = Planet.create_from_changeset(changeset,
stub=stub, update_sender=update_sender, update_recipient=update_recipient)
new_planet.text = changeset["text"]
return new_planet
def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):
"""Update a new Planet object from a changeset (See Serializable.update_from_changeset). """
raise NotImplementedError
class Oneup(Star):
"""A 1up is a vote that signals interest in its parent Star"""
_insert_required = ["id", "created", "modified", "author_id", "parent_id", "state"]
_update_required = ["id", "modified", "state"]
__mapper_args__ = {
'polymorphic_identity': 'oneup'
}
def __repr__(self):
if ["author_id", "parent_id"] in dir(self):
return "<1up <Persona {}> -> <Star {}> ({})>".format(
self.author_id[:6], self.parent_id[:6], self.get_state())
else:
return "<1up ({})>".format(self.get_state())
def get_state(self):
"""
Return publishing state of this 1up.
Returns:
Integer:
-1 -- (disabled)
0 -- (active)
1 -- (unknown author)
"""
return ONEUP_STATES[self.state][0]
def set_state(self, new_state):
"""
Set the publishing state of this 1up
Parameters:
new_state (int) code of the new state as defined in nucleus.ONEUP_STATES
Raises:
ValueError: If new_state is not an Int or not a valid state of this object
"""
new_state = int(new_state)
if new_state not in ONEUP_STATES.keys():
raise ValueError("{} ({}) is not a valid 1up state".format(
new_state, type(new_state)))
else:
self.state = new_state
@staticmethod
def create_from_changeset(changeset, stub=None, update_sender=None, update_recipient=None):
"""Create a new Oneup object from a changeset (See Serializable.create_from_changeset). """
created_dt = iso8601.parse_date(changeset["modified"]).replace(tzinfo=None)
modified_dt = iso8601.parse_date(changeset["modified"]).replace(tzinfo=None)
if stub is not None:
oneup = stub
oneup.created = created_dt
oneup.modified = modified_dt
oneup.author = None
oneup.source = changeset["source"],
oneup.parent_id = None
else:
oneup = Oneup(
id=changeset["id"],
created=created_dt,
modified=modified_dt,
author=None,
parent=None,
)
oneup.set_state(int(changeset["state"]))
author = Persona.query.get(changeset["author_id"])
if author is None:
# TODO: Send request for author
oneup.author_id = changeset["author_id"]
if oneup.get_state() >= 0:
oneup.set_state(1)
else:
oneup.author = author
star = Star.query.get(changeset["parent_id"])
if star is None:
app.logger.warning("Parent Star for Oneup not found")
oneup.parent_id = changeset["parent_id"]
else:
star.children.append(oneup)
return oneup
def update_from_changeset(self, changeset, update_sender=None, update_recipient=None):
"""Update a new Oneup object from a changeset (See Serializable.update_from_changeset). """
modified_dt = iso8601.parse_date(changeset["modified"]).replace(tzinfo=None)
self.modified = modified_dt
self.set_state(changeset["state"])
app.logger.info("Updated {} from changeset".format(self))
class Souma(Serializable, db.Model):
"""A physical machine in the Souma network"""
__tablename__ = "souma"
_insert_required = ["id", "modified", "crypt_public", "sign_public", "starmap_id"]
id = db.Column(db.String(32), primary_key=True)
crypt_private = db.Column(db.Text)
crypt_public = db.Column(db.Text)
sign_private = db.Column(db.Text)
sign_public = db.Column(db.Text)
starmap_id = db.Column(db.String(32), db.ForeignKey('starmap.id'))
starmap = db.relationship('Starmap')
_version_string = db.Column(db.String(32), default="")
def __str__(self):
return "<Souma [{}]>".format(self.id[:6])
def authorize(self, action, author_id=None):
"""Return True if this Souma authorizes `action` for `author_id`
Args:
action (String): Action to be performed (see Synapse.CHANGE_TYPES)
author_id (String): Persona ID that wants to perform the action
Returns:
Boolean: True if authorized
"""
return False
def generate_keys(self):
""" Generate new RSA keypairs for | |
Preset
:type preset: Union[str, Preset]
"""
preset_list = self.__FormatPresetList(preset)
func_list = []
for preset in preset_list:
func_list.extend([func for func in preset])
self.SelectSpectrumFunctionList(func_list)
def RegisterPreset(self, preset: Preset, is_register=True):
"""Register the presets.
:type preset: Preset
:param is_register: If True, register, if False, deregister., defaults to True
:type is_register: bool, optional
"""
prev_preset_list = self.__GetPresetList()
for preset in self.__FormatPresetList(preset):
if is_register:
self.__preset_dict[preset.GetName()] = preset
else:
del self.__preset_dict[preset.GetName()]
preset_list = self.__GetPresetList()
Event = PresetRegisterEvent if is_register else PresetDeregisterEvent
event = Event(preset_list, prev_preset_list, id=self.__id)
self.__core_mgr.SendEvent(event)
def __FormatPresetList(self, preset_list):
if isinstance(preset_list, str):
preset_list = [self.GetPreset(preset_list)]
elif isinstance(preset_list, (list, tuple)):
if all([isinstance(preset, Preset) for preset in preset_list]):
preset_list = preset_list
elif all([isinstance(preset, str) for preset in preset_list]):
preset_list = [self.GetPreset(preset) for preset in preset_list]
else:
TypeError()
elif isinstance(preset_list, Preset):
preset_list = [deepcopy(preset_list)]
else:
TypeError()
return preset_list
def IsRegisteredPresetName(self, name: str) -> bool:
"""Returns True if the specified name has been registered.
:param name: name of preset
:type name: str
:rtype: bool
"""
return name in self.__preset_dict
def OnEvent(self, event):
event.Skip()
if event.GetId() == self.__id:
return
event_type = event.GetEventType()
if event_type == wxEVT_ENCODE_FUNCTION_SELECT:
self.__selected_encode_func = event.GetFunction()
elif event_type == wxEVT_DECODE_FUNCTION_SELECT:
self.__selected_decode_func = event.GetFunction()
elif event_type == wxEVT_SPECTRUM_FUNCTION_LIST_SELECT:
self.__selected_spectrum_func_list = event.GetFunctionList()
elif event_type in [wxEVT_ENCODE_FUNCTION_REGISTER, wxEVT_DECODE_FUNCTION_REGISTER, wxEVT_SPECTRUM_FUNCTION_REGISTER, wxEVT_PEAK_FUNCTION_REGISTER, wxEVT_MAPPING_FUNCTION_REGISTER]:
function_list = event.GetFunctionList()
self.__Set2FunctionDict(function_list, True)
elif event_type in [wxEVT_ENCODE_FUNCTION_DEREGISTER, wxEVT_DECODE_FUNCTION_DEREGISTER, wxEVT_SPECTRUM_FUNCTION_DEREGISTER, wxEVT_PEAK_FUNCTION_DEREGISTER, wxEVT_MAPPING_FUNCTION_DEREGISTER]:
function_list = event.GetFunctionList()
self.__Set2FunctionDict(function_list, False)
elif event_type == wxEVT_PRESET_REGISTER:
preset_list = event.GetPresetList()
for preset in preset_list:
self.__preset_dict[preset.GetName()] = preset
elif event_type == wxEVT_PRESET_DEREGISTER:
preset_list = event.GetPresetList()
for preset in preset_list:
del self.__preset_dict[preset.GetName()]
elif event_type == wxEVT_EXIT:
design = (
(SELECTED_ENCODE_FUNCTION, self.__selected_encode_func),
(SELECTED_DECODE_FUNCTION, self.__selected_decode_func),
(SPECTRUM_FUNCTION_PRESET_LIST, self.__GetPresetList()),
(SELECTED_MAPPING_FUNCTION, self.__selected_mapping_func),
)
for key, value in design:
self.__io_mgr.SetSetting(key, value)
class ProjectManager(Singleton):
"""Manager for project
"""
def __init__(self, *args, **kw):
"""Default constructor
"""
super().__init__()
self.__core_mgr = kw['core_manager']
self.__io_mgr = kw['io_manager']
self.__id = NewIdRef()
self.__is_saved = None
self.__project = Project()
def GetProject(self) -> Project:
"""Get project
:rtype: Project
"""
return self.__project
def NewProject(self, data_list: Iterable[DataContainer]):
"""Create a new project.
:type data_list: Iterable[DataContainer]
"""
if not HasValidElement(data_list, DataContainer):
raise TypeError()
peak_type = self.__core_mgr.Get(PEAK_MANAGER).GetSelectedPeakType()
self.__project = Project()
self.__project.SetDataList(data_list)
self.__project.SetPeakType(peak_type)
self.__SetIsProjectSaved(False)
event = ProjectNewEvent(data_list, peak_type, id=self.__id)
self.__core_mgr.SendEvent(event)
def OpenProject(self, path: str):
"""Load an existing project.
:type path: str
"""
if self.IsProjectStarted() and not self.IsProjectSaved():
with MessageDialog(None, 'Project changes will not be saved.', style=OK | CANCEL | ICON_INFORMATION | CENTRE) as dialog:
if dialog.ShowModal() == ID_CANCEL:
return
project = self.__io_mgr.OpenProject(path)
self.__project = project
self.__SetIsProjectSaved(True)
path = project.GetPath()
note = project.GetNote()
peak_type = project.GetPeakType()
data_list = project.GetDataList()
experimental_date = project.GetExperimentalDate()
event = ProjectOpenEvent(data_list, path, peak_type, note, experimental_date, id=self.__id)
self.__core_mgr.SendEvent(event)
def SaveProject(self, project: Project = None) -> bool:
"""Save the project.
:param project: If project is None, Save the managed by this class. Defaults to None
:type project: Project, optional
"""
if not self.IsProjectStarted() or self.IsProjectSaved():
return
if project is None:
project = self.GetProject()
if not isinstance(project, Project):
raise TypeError()
path = project.GetPath()
data_list = project.GetDataList()
note = project.GetNote()
peak_type = project.GetPeakType()
experimental_date = project.GetExperimentalDate()
self.__project = deepcopy(project)
event = ProjectSaveEvent(path, data_list, peak_type, note, experimental_date, id=self.__id)
self.__core_mgr.SendEvent(event)
self.__io_mgr.SaveProject(self.__project)
self.__SetIsProjectSaved(True)
def SetProjectMemo(self, experimental_date: date, note: str):
"""Set the memo for the project.
:param experimental_date: Date of Experiment
:type experimental_date: date
:param note: Notes on the project
:type note: str
"""
prev_date = self.__project.GetExperimentalDate()
prev_note = self.__project.GetNote()
self.__project.SetExperimentalDate(experimental_date)
self.__project.SetNote(note)
self.__SetIsProjectSaved(False)
event = ProjectMemoChangeEvent(experimental_date, prev_date, note, prev_note)
self.__core_mgr.SendEvent(event)
def IsProjectStarted(self) -> bool:
"""Returns True if the project has been started
:rtype: bool
"""
return len(self.__project.GetDataList()) != 0
def IsProjectSaved(self) -> bool:
"""Returns True if the project has saved the most recent state.
:rtype: bool
"""
return self.__is_saved
def AskProjectSaving(self) -> bool:
"""Ask if the project needs to be saved.
:return: Whether the operation is complete or not.
:rtype: bool
"""
if self.IsProjectStarted() and not self.IsProjectSaved():
with SaveCheckDialog(None, title='Info') as dialog:
dialog.Center()
id_ = dialog.ShowModal()
if id_ == ID_CANCEL:
return False
elif id_ == ID_SAVE:
is_saved = self.__core_mgr.Get(MENUBAR_MANAGER).ExecuteMenuFunction(SAVE_MENU_ITEM)
if not is_saved:
return False
self.SaveProject()
return True
def GetDefaultProjectPath(self) -> str:
"""Returns the default project name.
:rtype: str
"""
return join(getcwd(), NEW_PROJECT_NAME)
def __SetIsProjectSaved(self, is_saved):
self.__is_saved = is_saved
name = self.__project.GetFileName()
self.__core_mgr.SetTitle(name)
def OnEvent(self, event):
event.Skip()
if event.GetId() == self.__id:
return
event_type = event.GetEventType()
if event_type == wxEVT_PROJECT_NEW:
data_list = event.GetDataList()
self.__project.SetDataList(data_list)
peak_type = event.GetPeakType()
self.__project.SetPeakType(peak_type)
self.__SetIsProjectSaved(False)
elif event_type == wxEVT_PROJECT_OPEN:
data_list = event.GetDataList()
self.__project.SetDataList(data_list)
note = event.GetNote()
self.__project.SetNote(note)
peak_type = event.GetPeakType()
self.__project.SetPeakType(peak_type)
self.__SetIsProjectSaved(True)
elif event_type == wxEVT_PROJECT_SAVE:
data_list = event.GetDataList()
self.__project.SetDataList(data_list)
note = event.GetNote()
self.__project.SetNote(note)
peak_type = event.GetPeakType()
self.__project.SetPeakType(peak_type)
# Eventがパネルに飛ぶ通達される前に実行されちゃう
self.__io_mgr.SaveProject(self.__project)
self.__SetIsProjectSaved(True)
elif event_type == wxEVT_PROJECT_MEMO_CHANGE:
date = event.GetExperimentalData()
note = event.GetNote()
self.__project.SetExperimentalDate(date)
self.__project.SetNote(note)
self.__SetIsProjectSaved(False)
elif event_type == wxEVT_DATA_CONTENTS_CHANGE:
self.__SetIsProjectSaved(False)
class PeakManager(Singleton):
"""Manager related to peak.
"""
def __init__(self, *args, **kw):
"""Default constructor
"""
super().__init__()
self.__core_mgr = kw['core_manager']
self.__io_mgr = kw['io_manager']
self.__id = NewIdRef()
self.__peak_type_dict = {}
def GetPeakType(self, name: str) -> PeakType:
"""Get the peak type specified by name. This value is deepcopied.
:param name: class name of peak type.
:type name: str
:rtype: PeakType
"""
return deepcopy(self.__peak_type_dict.get(name))
def SelectPeakType(self, peak_type: PeakType):
"""Select Peak Type
:type peak_type: PeakType
"""
if peak_type is None:
print('select peak_type is None')
return
if isinstance(peak_type, str):
peak_type = self.GetPeakType(peak_type)
if not isinstance(peak_type, PeakType):
raise TypeError()
prev_peak_type = self.__GetProject().GetPeakType()
self.__GetProject().SetPeakType(peak_type)
self.__core_mgr.Get(MENUBAR_MANAGER).CheckMenuItem(peak_type.GetName())
event = PeakTypeChangeEvent(peak_type, prev_peak_type, self.__id)
for spectrum_func_instance in SpectrumFunctionContainerBase._instance_list:
spectrum_func_instance.OnPeakTypeChanged(event)
self.__core_mgr.SendEvent(event)
def GetSelectedPeakType(self) -> PeakType:
"""Get selected type of peak.
:rtype: PeakType
"""
return self.__GetProject().GetPeakType()
def RegisterPeakTypeList(self, peak_type_list: Union[PeakType, Iterable[PeakType]]):
"""Register the peak type.
:type peak_type_list: Union[PeakType, Iterable[PeakType]]
"""
if not hasattr(peak_type_list, '__iter__'):
peak_type_list = [peak_type_list]
if any(not isinstance(peak_type, PeakType) for peak_type in peak_type_list):
raise TypeError()
prev_peak_type_list = list(self.__peak_type_dict.values())
for peak_type in peak_type_list:
if (peak_name := peak_type.GetName()) not in self.__peak_type_dict:
self.__peak_type_dict[peak_name] = peak_type
peak_type_list = list(self.__peak_type_dict.values())
event = PeakTypeRegisterEvent(peak_type_list, prev_peak_type_list, self.__id)
self.__core_mgr.SendEvent(event)
def GetPeakTypeNames(self) -> Tuple[str, ...]:
"""Get a list of registered peak type names.
:rtype: Tuple[str, ...]
"""
return tuple(self.__peak_type_dict.keys())
def GetPeakTypeList(self) -> Tuple[PeakType, ...]:
"""Get a list of registered peak types.
:rtype: Tuple[PeakType, ...]
"""
return tuple(self.__peak_type_dict.values())
def __GetProject(self):
return self.__core_mgr.Get(PROJECT_MANAGER).GetProject()
def OnEvent(self, event):
if event.GetId() == self.__id:
return
event_type = event.GetEventType()
if event_type == wxEVT_PROJECT_NEW:
peak_type = self.GetSelectedPeakType()
event = PeakTypeChangeEvent(peak_type, peak_type, self.__id)
self.__core_mgr.SendEvent(event)
elif event_type == wxEVT_PEAK_TYPE_CHANGE:
peak_type = event.GetPeakType()
self.__GetProject().SetPeakType(peak_type)
self.__core_mgr.Get(MENUBAR_MANAGER).CheckMenuItem(peak_type.GetName())
for spectrum_func_instance in SpectrumFunctionContainerBase._instance_list:
spectrum_func_instance.OnPeakTypeChanged(event)
elif event_type == wxEVT_PEAK_TYPE_REGISTER:
peak_type_list = event.GetPeakTypeList()
for peak_type in peak_type_list:
if (peak_name := peak_type.GetName()) not in self.__peak_type_dict:
self.__peak_type_dict[peak_name] = peak_type
elif event_type == wxEVT_EXIT:
design = (
(PEAK_TYPE, self.GetSelectedPeakType()),
)
for key, value in design:
self.__io_mgr.SetSetting(key, value)
class DataManager(Singleton):
"""Manage references and selections about data.
"""
def __init__(self, *args, **kw):
"""Default constructor
"""
self.__core_mgr = kw['core_manager']
self.__id = NewIdRef()
self.__main_selection = deque([None, None], 2)
self.__selection = deque([set(), set()], 2)
self.__selected_recipe = Recipe()
def __GetProject(self):
return self.__core_mgr.Get(PROJECT_MANAGER).GetProject()
def GetData(self, index: int) -> DataContainer:
"""Returns the data specified by the index.
:type index: int
:rtype: DataContainer
"""
return self.GetDataList()[index]
def SetData(self, index: int, data: DataContainer):
"""Set to the data specified by the index.
:type index: int
:type data: DataContainer
"""
self.SetDataList([index], [data])
def GetDataList(self, index_list: Iterable[int] = None) -> List[DataContainer]:
"""Returns the data specified in the list of indexes. If index_list is None, returns the all data list. This value is deepcopied.
:type index_list: Iterable[int], optional
:rtype: List[DataContainer]
"""
data_list = self.__GetDataList()
data_list = data_list if index_list is None else [data_list[index] for index in index_list]
return deepcopy(data_list)
def SetDataList(self, index_list: Iterable[int], data_list: Iterable[DataContainer]):
"""Sets the list of data corresponding to the specified list of indexes.
:type index_list: Iterable[int]
:type data_list: Iterable[DataContainer]
"""
if any([not isinstance(data, DataContainer) for data in data_list]):
raise TypeError()
project = self.__GetProject()
project.SetDataList(data_list, index_list)
x_changed_list = y_changed_list = bg_changed_list = peaks_changed_list = recipe_changed_list = msg_changed_list = [True] * len(data_list)
event = DataContentsChangeEvent(index_list, data_list, x_changed_list, y_changed_list, bg_changed_list, peaks_changed_list, recipe_changed_list, msg_changed_list, id=self.__id)
self.__core_mgr.SendEvent(event)
def __GetDataList(self):
return self.__GetProject().GetDataList()
def GetX(self, index: int) -> ndarray:
"""Returns the x data of spectrum for a specified index.
:type index: int
:rtype: ndarray
"""
return self.__GetDataList()[index].X
def GetY(self, index: int) -> ndarray:
"""Returns the y data of spectrum for a specified index.
:type | |
0)
self.assertEqual(self.op2.get_strategy_count_by_price_type('close'), 3)
self.assertEqual(self.op2.get_strategy_count_by_price_type('open'), 0)
def test_property_strategy_names(self):
""" test property strategy_ids"""
op = qt.Operator('dma')
self.assertIsInstance(op.strategy_ids, list)
names = op.strategy_ids[0]
print(f'names are {names}')
self.assertEqual(names, 'dma')
op = qt.Operator('dma, macd, trix, cdl')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'cdl')
op = qt.Operator('dma, macd, trix, dma, dma')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'dma_1')
self.assertEqual(op.strategy_ids[4], 'dma_2')
def test_property_strategy_blenders(self):
""" test property strategy blenders including property setter,
and test the method get_blender()"""
print(f'------- Test property strategy blenders ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
# test adding blender to empty operator
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op.add_strategy('dma')
op.strategy_blenders = '1+2'
self.assertEqual(op.strategy_blenders, {'close': ['+', '2', '1']})
op.clear_strategies()
self.assertEqual(op.strategy_blenders, {})
op.add_strategies('dma, trix, macd, dma')
op.set_parameter('dma', price_type='open')
op.set_parameter('trix', price_type='high')
op.set_blender('open', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
op.set_blender('open', '1+2+3')
op.set_blender('abc', '1+2+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
blender_abc = op.get_blender('abc')
self.assertEqual(op.strategy_blenders, {'open': ['+', '3', '+', '2', '1']})
self.assertEqual(blender_open, ['+', '3', '+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
self.assertEqual(blender_abc, None)
op.set_blender('open', 123)
blender_open = op.get_blender('open')
self.assertEqual(blender_open, [])
op.set_blender(None, '1+1')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
self.assertEqual(op.get_blender(), {'close': ['+', '1', '1'],
'open': ['+', '1', '1'],
'high': ['+', '1', '1']})
self.assertEqual(blender_open, ['+', '1', '1'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '1', '1'])
op.set_blender(None, ['1+1', '3+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '3'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '4', '3'])
self.assertEqual(op.view_blender('open'), '3+4')
self.assertEqual(op.view_blender('close'), '1+1')
self.assertEqual(op.view_blender('high'), '3+4')
op.strategy_blenders = (['1+2', '2*3', '1+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
self.assertEqual(op.view_blender('open'), '1+4')
self.assertEqual(op.view_blender('close'), '1+2')
self.assertEqual(op.view_blender('high'), '2*3')
# test error inputs:
# wrong type of price_type
self.assertRaises(TypeError, op.set_blender, 1, '1+3')
# price_type not found, no change is made
op.set_blender('volume', '1+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# price_type not valid, no change is made
op.set_blender('closee', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('open', 55)
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('close', ['1+2'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, ['*', '3', '2'])
# can't parse blender, set to empty list
op.set_blender('high', 'a+bc')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, [])
def test_property_singal_type(self):
""" test property signal_type"""
op = qt.Operator()
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'pt')
op = qt.Operator(signal_type='ps')
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='PS')
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='proportion signal')
self.assertEqual(op.signal_type, 'ps')
print(f'"pt" will be the default type if wrong value is given')
op = qt.Operator(signal_type='wrong value')
self.assertEqual(op.signal_type, 'pt')
print(f'test signal_type.setter')
op.signal_type = 'ps'
self.assertEqual(op.signal_type, 'ps')
print(f'test error raising')
self.assertRaises(TypeError, setattr, op, 'signal_type', 123)
self.assertRaises(ValueError, setattr, op, 'signal_type', 'wrong value')
def test_property_op_data_types(self):
""" test property op_data_types"""
op = qt.Operator()
self.assertIsInstance(op.op_data_types, list)
self.assertEqual(op.op_data_types, [])
op = qt.Operator('macd, dma, trix')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
op = qt.Operator('macd, cdl')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
op.add_strategy('dma')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
def test_property_op_data_type_count(self):
""" test property op_data_type_count"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_count, int)
self.assertEqual(op.op_data_type_count, 0)
op = qt.Operator('macd, dma, trix')
dtn = op.op_data_type_count
self.assertEqual(dtn, 1)
op = qt.Operator('macd, cdl')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
op.add_strategy('dma')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
def test_property_op_data_freq(self):
""" test property op_data_freq"""
op = qt.Operator()
self.assertIsInstance(op.op_data_freq, str)
self.assertEqual(len(op.op_data_freq), 0)
self.assertEqual(op.op_data_freq, '')
op = qt.Operator('macd, dma, trix')
dtf = op.op_data_freq
self.assertIsInstance(dtf, str)
self.assertEqual(dtf[0], 'd')
op.set_parameter('macd', data_freq='m')
dtf = op.op_data_freq
self.assertIsInstance(dtf, list)
self.assertEqual(len(dtf), 2)
self.assertEqual(dtf[0], 'd')
self.assertEqual(dtf[1], 'm')
def test_property_bt_price_types(self):
""" test property bt_price_types"""
print('------test property bt_price_tyeps-------')
op = qt.Operator()
self.assertIsInstance(op.bt_price_types, list)
self.assertEqual(len(op.bt_price_types), 0)
self.assertEqual(op.bt_price_types, [])
op = qt.Operator('macd, dma, trix')
btp = op.bt_price_types
self.assertIsInstance(btp, list)
self.assertEqual(btp[0], 'close')
op.set_parameter('macd', price_type='open')
btp = op.bt_price_types
btpc = op.bt_price_type_count
print(f'price_types are \n{btp}')
self.assertIsInstance(btp, list)
self.assertEqual(len(btp), 2)
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.add_strategies(['dma', 'macd'])
op.set_parameter('dma_1', price_type='high')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'high')
self.assertEqual(btp[2], 'open')
self.assertEqual(btpc, 3)
op.remove_strategy('dma_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.remove_strategy('macd_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
def test_property_op_data_type_list(self):
""" test property op_data_type_list"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_list, list)
self.assertEqual(len(op.op_data_type_list), 0)
self.assertEqual(op.op_data_type_list, [])
op = qt.Operator('macd, dma, trix, cdl')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(ohd[0], ['close'])
op.set_parameter('macd', data_types='open, close')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(len(ohd), 4)
self.assertEqual(ohd[0], ['open', 'close'])
self.assertEqual(ohd[1], ['close'])
self.assertEqual(ohd[2], ['close'])
self.assertEqual(ohd[3], ['open', 'high', 'low', 'close'])
def test_property_op_history_data(self):
""" Test this important function to get operation history data that shall be used in
signal generation
these data are stored in list of nd-arrays, each ndarray represents the data
that is needed for each and every strategy
"""
print(f'------- Test getting operation history data ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.op_history_data, {})
self.assertEqual(op.signal_type, 'pt')
def test_property_opt_space_par(self):
""" test property opt_space_par"""
print(f'-----test property opt_space_par--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_space_par, tuple)
self.assertIsInstance(op.opt_space_par[0], list)
self.assertIsInstance(op.opt_space_par[1], list)
self.assertEqual(len(op.opt_space_par), 2)
self.assertEqual(op.opt_space_par, ([], []))
op = qt.Operator('macd, dma, trix, cdl')
osp = op.opt_space_par
print(f'before setting opt_tags opt_space_par is empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(osp[0], [])
self.assertEqual(osp[1], [])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
osp = op.opt_space_par
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(len(osp), 2)
self.assertIsInstance(osp[0], list)
self.assertIsInstance(osp[1], list)
self.assertEqual(len(osp[0]), 6)
self.assertEqual(len(osp[1]), 6)
self.assertEqual(osp[0], [(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
self.assertEqual(osp[1], ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
def test_property_opt_types(self):
""" test property opt_tags"""
print(f'-----test property opt_tags--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_tags, list)
self.assertEqual(len(op.opt_tags), 0)
self.assertEqual(op.opt_tags, [])
op = qt.Operator('macd, dma, trix, cdl')
otp = op.opt_tags
print(f'before setting opt_tags opt_space_par is empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(otp, [0, 0, 0, 0])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
otp = op.opt_tags
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(len(otp), 4)
self.assertEqual(otp, [1, 1, 0, 0])
def test_property_max_window_length(self):
""" test property max_window_length"""
print(f'-----test property max window length--------:\n')
op = qt.Operator()
self.assertIsInstance(op.max_window_length, int)
self.assertEqual(op.max_window_length, 0)
op = qt.Operator('macd, dma, trix, cdl')
mwl = op.max_window_length
print(f'before setting window_length the value is 270:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 270)
op.set_parameter('macd', window_length=300)
op.set_parameter('dma', window_length=350)
mwl = op.max_window_length
print(f'after setting window_length the value is new set value:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 350)
def test_property_bt_price_type_count(self):
""" test property bt_price_type_count"""
print(f'-----test property bt_price_type_count--------:\n')
op = qt.Operator()
self.assertIsInstance(op.bt_price_type_count, int)
self.assertEqual(op.bt_price_type_count, 0)
op = qt.Operator('macd, dma, trix, cdl')
otp = op.bt_price_type_count
print(f'before setting price_type the price count is 1:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 1)
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='open')
otp = op.bt_price_type_count
print(f'after setting price_type the price type count is 2:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 2)
def test_property_set(self):
""" test all property setters:
setting following properties:
- strategy_blenders
- signal_type
other properties can not be set"""
print(f'------- Test setting properties ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator('macd, dma, trix, cdl')
# TODO: 修改set_parameter(),使下面的用法成立
# a_to_sell.set_parameter('dma, cdl', price_type='open')
op.set_parameter('dma', price_type='open')
op.set_parameter('cdl', price_type='open')
sb = op.strategy_blenders
st = op.signal_type
self.assertIsInstance(sb, dict)
print(f'before setting: strategy_blenders={sb}')
self.assertEqual(sb, {})
op.strategy_blenders = '1+2 * 3'
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '*', '3', '2', '1'],
'open': ['+', '*', '3', '2', '1']})
op.strategy_blenders = ['1+2', '3-4']
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '2', | |
# coding: utf-8
# # Latent Dirichlet Allocation for Text Data
#
# In this assignment you will
#
# * apply standard preprocessing techniques on Wikipedia text data
# * use GraphLab Create to fit a Latent Dirichlet allocation (LDA) model
# * explore and interpret the results, including topic keywords and topic assignments for documents
#
# Recall that a major feature distinguishing the LDA model from our previously explored methods is the notion of *mixed membership*. Throughout the course so far, our models have assumed that each data point belongs to a single cluster. k-means determines membership simply by shortest distance to the cluster center, and Gaussian mixture models suppose that each data point is drawn from one of their component mixture distributions. In many cases, though, it is more realistic to think of data as genuinely belonging to more than one cluster or category - for example, if we have a model for text data that includes both "Politics" and "World News" categories, then an article about a recent meeting of the United Nations should have membership in both categories rather than being forced into just one.
#
# With this in mind, we will use GraphLab Create tools to fit an LDA model to a corpus of Wikipedia articles and examine the results to analyze the impact of a mixed membership approach. In particular, we want to identify the topics discovered by the model in terms of their most important words, and we want to use the model to predict the topic membership distribution for a given document.
# **Note to Amazon EC2 users**: To conserve memory, make sure to stop all the other notebooks before running this notebook.
# ## Text Data Preprocessing
# We'll start by importing our familiar Wikipedia dataset.
#
# The following code block will check if you have the correct version of GraphLab Create. Any version later than 1.8.5 will do. To upgrade, read [this page](https://turi.com/download/upgrade-graphlab-create.html).
# In[2]:
import os
os.environ["OMP_NUM_THREADS"] = "1"
import graphlab as gl
graphlab.SArray(range(1000)).apply(lambda x: x)
# In[3]:
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
'''Check GraphLab Create version'''
from distutils.version import StrictVersion
assert (StrictVersion(gl.version) >= StrictVersion('1.8.5')), 'GraphLab Create must be version 1.8.5 or later.'
# In[4]:
# import wiki data
wiki = gl.SFrame('people_wiki.gl/')
wiki
# In the original data, each Wikipedia article is represented by a URI, a name, and a string containing the entire text of the article. Recall from the video lectures that LDA requires documents to be represented as a _bag of words_, which ignores word ordering in the document but retains information on how many times each word appears. As we have seen in our previous encounters with text data, words such as 'the', 'a', or 'and' are by far the most frequent, but they appear so commonly in the English language that they tell us almost nothing about how similar or dissimilar two documents might be.
#
# Therefore, before we train our LDA model, we will preprocess the Wikipedia data in two steps: first, we will create a bag of words representation for each article, and then we will remove the common words that don't help us to distinguish between documents. For both of these tasks we can use pre-implemented tools from GraphLab Create:
# In[7]:
wiki_docs = gl.text_analytics.count_words(wiki['text'])
wiki_docs = wiki_docs.dict_trim_by_keys(gl.text_analytics.stopwords(), exclude=True)
# ## Model fitting and interpretation
# In the video lectures we saw that Gibbs sampling can be used to perform inference in the LDA model. In this assignment we will use a GraphLab Create method to learn the topic model for our Wikipedia data, and our main emphasis will be on interpreting the results. We'll begin by creating the topic model using create() from GraphLab Create's topic_model module.
#
# Note: This may take several minutes to run.
# In[8]:
topic_model = gl.topic_model.create(wiki_docs, num_topics=10, num_iterations=200)
# GraphLab provides a useful summary of the model we have fitted, including the hyperparameter settings for alpha, gamma (note that GraphLab Create calls this parameter beta), and K (the number of topics); the structure of the output data; and some useful methods for understanding the results.
# In[9]:
topic_model
# It is certainly useful to have pre-implemented methods available for LDA, but as with our previous methods for clustering and retrieval, implementing and fitting the model gets us only halfway towards our objective. We now need to analyze the fitted model to understand what it has done with our data and whether it will be useful as a document classification system. This can be a challenging task in itself, particularly when the model that we use is complex. We will begin by outlining a sequence of objectives that will help us understand our model in detail. In particular, we will
#
# * get the top words in each topic and use these to identify topic themes
# * predict topic distributions for some example documents
# * compare the quality of LDA "nearest neighbors" to the NN output from the first assignment
# * understand the role of model hyperparameters alpha and gamma
# ## Load a fitted topic model
# The method used to fit the LDA model is a _randomized algorithm_, which means that it involves steps that are random; in this case, the randomness comes from Gibbs sampling, as discussed in the LDA video lectures. Because of these random steps, the algorithm will be expected to yield slighty different output for different runs on the same data - note that this is different from previously seen algorithms such as k-means or EM, which will always produce the same results given the same input and initialization.
#
# It is important to understand that variation in the results is a fundamental feature of randomized methods. However, in the context of this assignment this variation makes it difficult to evaluate the correctness of your analysis, so we will load and analyze a pre-trained model.
#
# We recommend that you spend some time exploring your own fitted topic model and compare our analysis of the pre-trained model to the same analysis applied to the model you trained above.
# In[10]:
topic_model = gl.load_model('lda_assignment_topic_model')
# # Identifying topic themes by top words
#
# We'll start by trying to identify the topics learned by our model with some major themes. As a preliminary check on the results of applying this method, it is reasonable to hope that the model has been able to learn topics that correspond to recognizable categories. In order to do this, we must first recall what exactly a 'topic' is in the context of LDA.
#
# In the video lectures on LDA we learned that a topic is a probability distribution over words in the vocabulary; that is, each topic assigns a particular probability to every one of the unique words that appears in our data. Different topics will assign different probabilities to the same word: for instance, a topic that ends up describing science and technology articles might place more probability on the word 'university' than a topic that describes sports or politics. Looking at the highest probability words in each topic will thus give us a sense of its major themes. Ideally we would find that each topic is identifiable with some clear theme _and_ that all the topics are relatively distinct.
#
# We can use the GraphLab Create function get_topics() to view the top words (along with their associated probabilities) from each topic.
#
# __Quiz Question:__ Identify the top 3 most probable words for the first topic.
# In[11]:
topic_model.get_topics(num_words=50)
# __ Quiz Question:__ What is the sum of the probabilities assigned to the top 50 words in the 3rd topic?
# In[12]:
sum(topic_model.get_topics([2], num_words=50)['score'])
# Let's look at the top 10 words for each topic to see if we can identify any themes:
# In[14]:
[x['words'] for x in topic_model.get_topics(output_type='topic_words', num_words=10)]
# We propose the following themes for each topic:
#
# - topic 0: Science and research
# - topic 1: Team sports
# | |
select_ex: select_text
tool_with_text_input:
tool_id: param_text_option
in:
text_param: select_text
"""
)
with self.dataset_populator.test_history() as history_id:
run_workflow = self._download_workflow(workflow_id, style="run", history_id=history_id)
options = run_workflow["steps"][0]["inputs"][0]["options"]
assert len(options) == 5
assert options[0] == ["Ex1", "--ex1", False]
@skip_without_tool("random_lines1")
def test_run_replace_params_by_tool(self):
workflow_request, history_id, workflow_id = self._setup_random_x2_workflow("test_for_replace_tool_params")
workflow_request["parameters"] = dumps(dict(random_lines1=dict(num_lines=5)))
self.workflow_populator.invoke_workflow_and_wait(workflow_id, request=workflow_request)
# Would be 8 and 6 without modification
self.__assert_lines_hid_line_count_is(history_id, 2, 5)
self.__assert_lines_hid_line_count_is(history_id, 3, 5)
@skip_without_tool("random_lines1")
def test_run_replace_params_by_uuid(self):
workflow_request, history_id, workflow_id = self._setup_random_x2_workflow("test_for_replace_")
workflow_request["parameters"] = dumps(
{
"58dffcc9-bcb7-4117-a0e1-61513524b3b1": dict(num_lines=4),
"58dffcc9-bcb7-4117-a0e1-61513524b3b2": dict(num_lines=3),
}
)
self.workflow_populator.invoke_workflow_and_wait(workflow_id, request=workflow_request)
# Would be 8 and 6 without modification
self.__assert_lines_hid_line_count_is(history_id, 2, 4)
self.__assert_lines_hid_line_count_is(history_id, 3, 3)
@skip_without_tool("cat1")
@skip_without_tool("addValue")
def test_run_batch(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_batch")
workflow_id = self.workflow_populator.create_workflow(workflow)
with self.dataset_populator.test_history() as history_id:
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3", wait=True)
hda2 = self.dataset_populator.new_dataset(history_id, content="4 5 6", wait=True)
hda3 = self.dataset_populator.new_dataset(history_id, content="7 8 9", wait=True)
hda4 = self.dataset_populator.new_dataset(history_id, content="10 11 12", wait=True)
parameters = {
"0": {
"input": {
"batch": True,
"values": [
{"id": hda1.get("id"), "hid": hda1.get("hid"), "src": "hda"},
{"id": hda2.get("id"), "hid": hda2.get("hid"), "src": "hda"},
{"id": hda3.get("id"), "hid": hda2.get("hid"), "src": "hda"},
{"id": hda4.get("id"), "hid": hda2.get("hid"), "src": "hda"},
],
}
},
"1": {
"input": {"batch": False, "values": [{"id": hda1.get("id"), "hid": hda1.get("hid"), "src": "hda"}]},
"exp": "2",
},
}
workflow_request = {
"history_id": history_id,
"batch": True,
"parameters_normalized": True,
"parameters": dumps(parameters),
}
invocation_response = self._post(f"workflows/{workflow_id}/usage", data=workflow_request)
self._assert_status_code_is(invocation_response, 200)
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
r1 = "1 2 3\t1\n1 2 3\t2\n"
r2 = "4 5 6\t1\n1 2 3\t2\n"
r3 = "7 8 9\t1\n1 2 3\t2\n"
r4 = "10 11 12\t1\n1 2 3\t2\n"
t1 = self.dataset_populator.get_history_dataset_content(history_id, hid=7)
t2 = self.dataset_populator.get_history_dataset_content(history_id, hid=10)
t3 = self.dataset_populator.get_history_dataset_content(history_id, hid=13)
t4 = self.dataset_populator.get_history_dataset_content(history_id, hid=16)
self.assertEqual(r1, t1)
self.assertEqual(r2, t2)
self.assertEqual(r3, t3)
self.assertEqual(r4, t4)
@skip_without_tool("cat1")
@skip_without_tool("addValue")
def test_run_batch_inputs(self):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_batch")
workflow_id = self.workflow_populator.create_workflow(workflow)
with self.dataset_populator.test_history() as history_id:
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
hda2 = self.dataset_populator.new_dataset(history_id, content="4 5 6")
hda3 = self.dataset_populator.new_dataset(history_id, content="7 8 9")
hda4 = self.dataset_populator.new_dataset(history_id, content="10 11 12")
inputs = {
"coolinput": {
"batch": True,
"values": [
{"id": hda1.get("id"), "hid": hda1.get("hid"), "src": "hda"},
{"id": hda2.get("id"), "hid": hda2.get("hid"), "src": "hda"},
{"id": hda3.get("id"), "hid": hda2.get("hid"), "src": "hda"},
{"id": hda4.get("id"), "hid": hda2.get("hid"), "src": "hda"},
],
}
}
parameters = {
"1": {
"input": {"batch": False, "values": [{"id": hda1.get("id"), "hid": hda1.get("hid"), "src": "hda"}]},
"exp": "2",
}
}
workflow_request = {
"history_id": history_id,
"batch": True,
"inputs": dumps(inputs),
"inputs_by": "name",
"parameters_normalized": True,
"parameters": dumps(parameters),
}
invocation_response = self._post(f"workflows/{workflow_id}/usage", data=workflow_request)
self._assert_status_code_is(invocation_response, 200)
time.sleep(5)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
r1 = "1 2 3\t1\n1 2 3\t2\n"
r2 = "4 5 6\t1\n1 2 3\t2\n"
r3 = "7 8 9\t1\n1 2 3\t2\n"
r4 = "10 11 12\t1\n1 2 3\t2\n"
t1 = self.dataset_populator.get_history_dataset_content(history_id, hid=7)
t2 = self.dataset_populator.get_history_dataset_content(history_id, hid=10)
t3 = self.dataset_populator.get_history_dataset_content(history_id, hid=13)
t4 = self.dataset_populator.get_history_dataset_content(history_id, hid=16)
self.assertEqual(r1, t1)
self.assertEqual(r2, t2)
self.assertEqual(r3, t3)
self.assertEqual(r4, t4)
@skip_without_tool("validation_default")
def test_parameter_substitution_sanitization(self):
substitions = dict(input1='" ; echo "moo')
run_workflow_response, history_id = self._run_validation_workflow_with_substitions(substitions)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual(
"__dq__ X echo __dq__moo\n", self.dataset_populator.get_history_dataset_content(history_id, hid=1)
)
@skip_without_tool("validation_repeat")
def test_parameter_substitution_validation_value_errors_0(self):
with self.dataset_populator.test_history() as history_id:
workflow_id = self._upload_yaml_workflow(
"""
class: GalaxyWorkflow
steps:
validation:
tool_id: validation_repeat
state:
r2:
- text: "abd"
"""
)
workflow_request = dict(
history=f"hist_id={history_id}", parameters=dumps(dict(validation_repeat={"r2_0|text": ""}))
)
url = f"workflows/{workflow_id}/invocations"
invocation_response = self._post(url, data=workflow_request)
# Take a valid stat and make it invalid, assert workflow won't run.
self._assert_status_code_is(invocation_response, 400)
@skip_without_tool("validation_default")
def test_parameter_substitution_validation_value_errors_1(self):
substitions = dict(select_param='" ; echo "moo')
run_workflow_response, history_id = self._run_validation_workflow_with_substitions(substitions)
self._assert_status_code_is(run_workflow_response, 400)
@skip_without_tool("validation_repeat")
def test_workflow_import_state_validation_1(self):
with self.dataset_populator.test_history() as history_id:
self._run_jobs(
"""
class: GalaxyWorkflow
steps:
validation:
tool_id: validation_repeat
state:
r2:
- text: ""
""",
history_id=history_id,
wait=False,
expected_response=400,
assert_ok=False,
)
def _run_validation_workflow_with_substitions(self, substitions):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_validation_1")
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
history_id = self.dataset_populator.new_history()
workflow_request = dict(
history=f"hist_id={history_id}",
workflow_id=uploaded_workflow_id,
parameters=dumps(dict(validation_default=substitions)),
)
run_workflow_response = self.workflow_populator.invoke_workflow_raw(uploaded_workflow_id, workflow_request)
return run_workflow_response, history_id
@skip_without_tool("random_lines1")
def test_run_replace_params_by_steps(self):
workflow_request, history_id, workflow_id, steps = self._setup_random_x2_workflow_steps(
"test_for_replace_step_params"
)
params = dumps({str(steps[1]["id"]): dict(num_lines=5)})
workflow_request["parameters"] = params
self.workflow_populator.invoke_workflow_and_wait(workflow_id, request=workflow_request)
# Would be 8 and 6 without modification
self.__assert_lines_hid_line_count_is(history_id, 2, 8)
self.__assert_lines_hid_line_count_is(history_id, 3, 5)
@skip_without_tool("random_lines1")
def test_run_replace_params_nested(self):
workflow_request, history_id, workflow_id, steps = self._setup_random_x2_workflow_steps(
"test_for_replace_step_params_nested"
)
seed_source = dict(
seed_source_selector="set_seed",
seed="moo",
)
params = dumps(
{
str(steps[0]["id"]): dict(num_lines=1, seed_source=seed_source),
str(steps[1]["id"]): dict(num_lines=1, seed_source=seed_source),
}
)
workflow_request["parameters"] = params
self.workflow_populator.invoke_workflow_and_wait(workflow_id, request=workflow_request)
self.assertEqual("2\n", self.dataset_populator.get_history_dataset_content(history_id))
@skip_without_tool("random_lines1")
def test_run_replace_params_nested_normalized(self):
workflow_request, history_id, workflow_id, steps = self._setup_random_x2_workflow_steps(
"test_for_replace_step_normalized_params_nested"
)
parameters = {
"num_lines": 1,
"seed_source|seed_source_selector": "set_seed",
"seed_source|seed": "moo",
}
params = dumps({str(steps[0]["id"]): parameters, str(steps[1]["id"]): parameters})
workflow_request["parameters"] = params
workflow_request["parameters_normalized"] = False
self.workflow_populator.invoke_workflow_and_wait(workflow_id, request=workflow_request)
self.assertEqual("2\n", self.dataset_populator.get_history_dataset_content(history_id))
@skip_without_tool("random_lines1")
def test_run_replace_params_over_default(self):
with self.dataset_populator.test_history() as history_id:
self._run_jobs(
WORKFLOW_ONE_STEP_DEFAULT,
test_data="""
step_parameters:
'1':
num_lines: 4
input:
value: 1.bed
type: File
""",
history_id=history_id,
wait=True,
assert_ok=True,
round_trip_format_conversion=True,
)
result = self.dataset_populator.get_history_dataset_content(history_id)
assert result.count("\n") == 4
@skip_without_tool("random_lines1")
def test_defaults_editor(self):
workflow_id = self._upload_yaml_workflow(WORKFLOW_ONE_STEP_DEFAULT, publish=True)
workflow_object = self._download_workflow(workflow_id, style="editor")
put_response = self._update_workflow(workflow_id, workflow_object)
assert put_response.status_code == 200
@skip_without_tool("random_lines1")
def test_run_replace_params_over_default_delayed(self):
with self.dataset_populator.test_history() as history_id:
run_summary = self._run_workflow(
"""
class: GalaxyWorkflow
inputs:
input: data
steps:
first_cat:
tool_id: cat1
in:
input1: input
the_pause:
type: pause
in:
input: first_cat/out_file1
randomlines:
tool_id: random_lines1
in:
input: the_pause
num_lines:
default: 6
""",
test_data="""
step_parameters:
'3':
num_lines: 4
input:
value: 1.bed
type: File
""",
history_id=history_id,
wait=False,
)
wait_on(lambda: len(self._history_jobs(history_id)) >= 2 or None, "history jobs")
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
workflow_id = run_summary.workflow_id
invocation_id = run_summary.invocation_id
self.__review_paused_steps(workflow_id, invocation_id, order_index=2, action=True)
self.workflow_populator.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
result = self.dataset_populator.get_history_dataset_content(history_id)
assert result.count("\n") == 4
def test_pja_import_export(self):
workflow = self.workflow_populator.load_workflow(name="test_for_pja_import", add_pja=True)
uploaded_workflow_id = self.workflow_populator.create_workflow(workflow)
downloaded_workflow = self._download_workflow(uploaded_workflow_id)
self._assert_has_keys(downloaded_workflow["steps"], "0", "1", "2")
pjas = list(downloaded_workflow["steps"]["2"]["post_job_actions"].values())
assert len(pjas) == 1, len(pjas)
pja = pjas[0]
self._assert_has_keys(pja, "action_type", "output_name", "action_arguments")
def test_invocation_filtering(self):
with self._different_user(email=f"{<EMAIL>()}<EMAIL>"):
# new user, start with no invocations
assert not self._assert_invocation_for_url_is("invocations")
self._run_jobs(
"""
class: GalaxyWorkflow
inputs:
input:
type: data
optional: true
steps: []
""",
wait=False,
)
first_invocation = self._assert_invocation_for_url_is("invocations")
new_history_id = self.dataset_populator.new_history()
# new history has no invocations
assert not self._assert_invocation_for_url_is(f"invocations?history_id={new_history_id}")
self._run_jobs(
"""
class: GalaxyWorkflow
inputs:
input:
type: data
optional: true
steps: []
""",
history_id=new_history_id,
wait=False,
)
# new history has one invocation now
new_invocation = self._assert_invocation_for_url_is(f"invocations?history_id={new_history_id}")
# filter invocation by workflow instance id
self._assert_invocation_for_url_is(
f"invocations?workflow_id={first_invocation['workflow_id']}&instance=true", first_invocation
)
# limit to 1, newest invocation first by default
self._assert_invocation_for_url_is("invocations?limit=1", target_invocation=new_invocation)
# limit to 1, descending sort on date
self._assert_invocation_for_url_is(
"invocations?limit=1&sort_by=create_time&sort_desc=true", target_invocation=new_invocation
)
# limit to 1, ascending sort on date
self._assert_invocation_for_url_is(
"invocations?limit=1&sort_by=create_time&sort_desc=false", target_invocation=first_invocation
)
# limit to 1, ascending sort on date, offset 1
self._assert_invocation_for_url_is(
"invocations?limit=1&sort_by=create_time&sort_desc=false&offset=1", target_invocation=new_invocation
)
def _assert_invocation_for_url_is(self, route, target_invocation=None):
response = self._get(route)
self._assert_status_code_is(response, 200)
invocations = response.json()
if target_invocation:
assert len(invocations) == 1
assert invocations[0]["id"] == target_invocation["id"]
if invocations:
assert len(invocations) == 1
return invocations[0]
@skip_without_tool("cat1")
def test_only_own_invocations_indexed_and_accessible(self):
workflow_id, usage = self._run_workflow_once_get_invocation("test_usage_accessiblity")
with self._different_user():
usage_details_response = self._get(f"workflows/{workflow_id}/usage/{usage['id']}")
self._assert_status_code_is(usage_details_response, 403)
index_response = self._get(f"workflows/{workflow_id}/invocations")
self._assert_status_code_is(index_response, 200)
assert len(index_response.json()) == 0
invocation_ids = self._all_user_invocation_ids()
assert usage["id"] in invocation_ids
with self._different_user():
invocation_ids = self._all_user_invocation_ids()
assert usage["id"] not in invocation_ids
@skip_without_tool("cat1")
def test_invocation_usage(self):
workflow_id, usage = self._run_workflow_once_get_invocation("test_usage")
invocation_id = usage["id"]
usage_details = self._invocation_details(workflow_id, invocation_id)
# Assert some high-level things about the structure of data returned.
self._assert_has_keys(usage_details, "inputs", "steps", "workflow_id", "history_id")
# Check invocations for this workflow invocation by history and regardless of history.
history_invocations_response = self._get("invocations", {"history_id": usage_details["history_id"]})
self._assert_status_code_is(history_invocations_response, 200)
assert len(history_invocations_response.json()) == 1
assert history_invocations_response.json()[0]["id"] == invocation_id
# Check history invocations for this workflow invocation.
invocation_ids = self._all_user_invocation_ids()
assert invocation_id in invocation_ids
# Wait for the invocation to be fully scheduled, so we have details on all steps.
self._wait_for_invocation_state(workflow_id, invocation_id, "scheduled")
usage_details = self._invocation_details(workflow_id, invocation_id)
invocation_steps = usage_details["steps"]
invocation_input_step, invocation_tool_step = {}, {}
for invocation_step in invocation_steps:
self._assert_has_keys(invocation_step, "workflow_step_id", "order_index", "id")
order_index = invocation_step["order_index"]
assert order_index in [0, 1, 2], order_index
if order_index == 0:
invocation_input_step = invocation_step
elif order_index == 2:
invocation_tool_step = invocation_step
# Tool steps have non-null job_ids (deprecated though they may be)
assert invocation_input_step.get("job_id", None) is None
job_id = invocation_tool_step.get("job_id", None)
assert job_id is not None
invocation_tool_step_id = invocation_tool_step["id"]
invocation_tool_step_response = self._get(
f"workflows/{workflow_id}/invocations/{invocation_id}/steps/{invocation_tool_step_id}"
)
self._assert_status_code_is(invocation_tool_step_response, 200)
self._assert_has_keys(invocation_tool_step_response.json(), "id", "order_index", "job_id")
assert invocation_tool_step_response.json()["job_id"] == job_id
def test_invocation_with_collection_mapping(self):
workflow_id, invocation_id = self._run_mapping_workflow()
usage_details = self._invocation_details(workflow_id, invocation_id)
# Assert some high-level things about the structure of data returned.
self._assert_has_keys(usage_details, "inputs", "steps", "workflow_id")
invocation_steps = usage_details["steps"]
invocation_input_step, invocation_tool_step = None, None
for invocation_step in invocation_steps:
self._assert_has_keys(invocation_step, "workflow_step_id", "order_index", "id")
order_index = invocation_step["order_index"]
assert order_index in [0, 1]
if invocation_step["order_index"] == 0:
assert invocation_input_step is None
invocation_input_step = invocation_step
else:
assert invocation_tool_step is None
invocation_tool_step = invocation_step
assert invocation_input_step
assert invocation_tool_step
# Tool steps have non-null job_ids (deprecated | |
# Copyright (c) 2021, <NAME>
# License: MIT License
from typing import Any, List, Dict, Optional
import textwrap
from ezdxf.lldxf.types import (
render_tag,
DXFVertex,
GROUP_MARKERS,
POINTER_CODES,
)
from ezdxf.addons.xqt import QModelIndex, QAbstractTableModel, Qt
from ezdxf.addons.xqt import QStandardItemModel, QStandardItem, QColor
from .tags import compile_tags, Tags
__all__ = [
"DXFTagsModel",
"DXFStructureModel",
"EntityContainer",
"Entity",
"DXFTagsRole",
]
DXFTagsRole = Qt.UserRole + 1
def name_fmt(handle, name: str) -> str:
if handle is None:
return name
else:
return f"<{handle}> {name}"
HEADER_LABELS = ["Group Code", "Data Type", "Content", "4", "5"]
def calc_line_numbers(start: int, tags: Tags) -> List[int]:
numbers = [start]
index = start
for tag in tags:
if isinstance(tag, DXFVertex):
index += len(tag.value) * 2
else:
index += 2
numbers.append(index)
return numbers
class DXFTagsModel(QAbstractTableModel):
def __init__(
self, tags: Tags, start_line_number: int = 1, valid_handles=None
):
super().__init__()
self._tags = compile_tags(tags)
self._line_numbers = calc_line_numbers(start_line_number, self._tags)
self._valid_handles = valid_handles or set()
def data(self, index: QModelIndex, role: int = ...) -> Any: # type: ignore
def is_invalid_handle(tag):
if (
tag.code in POINTER_CODES
and not tag.value.upper() in self._valid_handles
):
return True
return False
if role == Qt.DisplayRole:
tag = self._tags[index.row()]
return render_tag(tag, index.column())
elif role == Qt.ForegroundRole:
tag = self._tags[index.row()]
if tag.code in GROUP_MARKERS:
return QColor("blue")
elif is_invalid_handle(tag):
return QColor("red")
elif role == DXFTagsRole:
return self._tags[index.row()]
elif role == Qt.ToolTipRole:
code, value = self._tags[index.row()]
if index.column() == 0: # group code column
return GROUP_CODE_TOOLTIPS_DICT.get(code)
code, value = self._tags[index.row()]
if code in POINTER_CODES:
if value.upper() in self._valid_handles:
return f"Double click to go to the referenced entity"
else:
return f"Handle does not exist"
elif code == 0:
return f"Double click to go to the DXF reference provided by Autodesk"
def headerData(
self, section: int, orientation: Qt.Orientation, role: int = ... # type: ignore
) -> Any:
if orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
return HEADER_LABELS[section]
elif role == Qt.TextAlignmentRole:
return Qt.AlignLeft
elif orientation == Qt.Vertical:
if role == Qt.DisplayRole:
return self._line_numbers[section]
elif role == Qt.ToolTipRole:
return "Line number in DXF file"
def rowCount(self, parent: QModelIndex = ...) -> int: # type: ignore
return len(self._tags)
def columnCount(self, parent: QModelIndex = ...) -> int: # type: ignore
return 3
def compiled_tags(self) -> Tags:
"""Returns the compiled tags. Only points codes are compiled, group
code 10, ...
"""
return self._tags
def line_number(self, row: int) -> int:
"""Return the DXF file line number of the widget-row."""
try:
return self._line_numbers[row]
except IndexError:
return 0
class EntityContainer(QStandardItem):
def __init__(self, name: str, entities: List[Tags]):
super().__init__()
self.setEditable(False)
self.setText(name + f" ({len(entities)})")
self.setup_content(entities)
def setup_content(self, entities):
self.appendRows([Entity(e) for e in entities])
class Classes(EntityContainer):
def setup_content(self, entities):
self.appendRows([Class(e) for e in entities])
class AcDsData(EntityContainer):
def setup_content(self, entities):
self.appendRows([AcDsEntry(e) for e in entities])
class NamedEntityContainer(EntityContainer):
def setup_content(self, entities):
self.appendRows([NamedEntity(e) for e in entities])
class Tables(EntityContainer):
def setup_content(self, entities):
container = []
name = ""
for e in entities:
container.append(e)
dxftype = e.dxftype()
if dxftype == "TABLE":
try:
handle = e.get_handle()
except ValueError:
handle = None
name = e.get_first_value(2, default="UNDEFINED")
name = name_fmt(handle, name)
elif dxftype == "ENDTAB":
if container:
container.pop() # remove ENDTAB
self.appendRow(NamedEntityContainer(name, container))
container.clear()
class Blocks(EntityContainer):
def setup_content(self, entities):
container = []
name = "UNDEFINED"
for e in entities:
container.append(e)
dxftype = e.dxftype()
if dxftype == "BLOCK":
try:
handle = e.get_handle()
except ValueError:
handle = None
name = e.get_first_value(2, default="UNDEFINED")
name = name_fmt(handle, name)
elif dxftype == "ENDBLK":
if container:
self.appendRow(EntityContainer(name, container))
container.clear()
def get_section_name(section: List[Tags]) -> str:
if len(section) > 0:
header = section[0]
if len(header) > 1 and header[0].code == 0 and header[1].code == 2:
return header[1].value
return "INVALID SECTION HEADER!"
class Entity(QStandardItem):
def __init__(self, tags: Tags):
super().__init__()
self.setEditable(False)
self._tags = tags
self._handle: Optional[str]
try:
self._handle = tags.get_handle()
except ValueError:
self._handle = None
self.setText(self.entity_name())
def entity_name(self):
name = "INVALID ENTITY!"
tags = self._tags
if tags and tags[0].code == 0:
name = name_fmt(self._handle, tags[0].value)
return name
def data(self, role: int = ...) -> Any: # type: ignore
if role == DXFTagsRole:
return self._tags
else:
return super().data(role)
class Header(Entity):
def entity_name(self):
return "HEADER"
class ThumbnailImage(Entity):
def entity_name(self):
return "THUMBNAILIMAGE"
class NamedEntity(Entity):
def entity_name(self):
name = self._tags.get_first_value(2, "<noname>")
return name_fmt(str(self._handle), name)
class Class(Entity):
def entity_name(self):
tags = self._tags
name = "INVALID CLASS!"
if len(tags) > 1 and tags[0].code == 0 and tags[1].code == 1:
name = tags[1].value
return name
class AcDsEntry(Entity):
def entity_name(self):
return self._tags[0].value
class DXFStructureModel(QStandardItemModel):
def __init__(self, filename: str, doc):
super().__init__()
root = QStandardItem(filename)
root.setEditable(False)
self.appendRow(root)
row: Any
for section in doc.sections.values():
name = get_section_name(section)
if name == "HEADER":
row = Header(section[0])
elif name == "THUMBNAILIMAGE":
row = ThumbnailImage(section[0])
elif name == "CLASSES":
row = Classes(name, section[1:])
elif name == "TABLES":
row = Tables(name, section[1:])
elif name == "BLOCKS":
row = Blocks(name, section[1:])
elif name == "ACDSDATA":
row = AcDsData(name, section[1:])
else:
row = EntityContainer(name, section[1:])
root.appendRow(row)
def index_of_entity(self, entity: Tags) -> QModelIndex:
root = self.item(0, 0)
index = find_index(root, entity)
if index is None:
return root.index()
else:
return index
def find_index(item: QStandardItem, entity: Tags) -> Optional[QModelIndex]:
def _find(sub_item: QStandardItem):
for index in range(sub_item.rowCount()):
child = sub_item.child(index, 0)
tags = child.data(DXFTagsRole)
if tags and tags is entity:
return child.index()
if child.rowCount() > 0:
index2 = _find(child)
if index2 is not None:
return index2
return None
return _find(item)
GROUP_CODE_TOOLTIPS = [
(0, "Text string indicating the entity type (fixed)"),
(1, "Primary text value for an entity"),
(2, "Name (attribute tag, block name, and so on)"),
((3, 4), "Other text or name values"),
(5, "Entity handle; text string of up to 16 hexadecimal digits (fixed)"),
(6, "Linetype name (fixed)"),
(7, "Text style name (fixed)"),
(8, "Layer name (fixed)"),
(
9,
"DXF: variable name identifier (used only in HEADER section of the DXF file)",
),
(
10,
"Primary point; this is the start point of a line or text entity, center "
"of a circle, and so on DXF: X value of the primary point (followed by Y "
"and Z value codes 20 and 30) APP: 3D point (list of three reals)",
),
(
(11, 18),
"Other points DXF: X value of other points (followed by Y value codes "
"21-28 and Z value codes 31-38) APP: 3D point (list of three reals)",
),
(20, "DXF: Y value of the primary point"),
(30, "DXF: Z value of the primary point"),
((21, 28), "DXF: Y values of other points"),
((31, 37), "DXF: Z values of other points"),
(38, "DXF: entity's elevation if nonzero"),
(39, "Entity's thickness if nonzero (fixed)"),
(
(40, 47),
"Double-precision floating-point values (text height, scale factors, and so on)",
),
(48, "Linetype scale; default value is defined for all entity types"),
(
49,
"Multiple 49 groups may appear in one entity for variable-length tables "
"(such as the dash lengths in the LTYPE table). A 7x group always appears "
"before the first 49 group to specify the table length",
),
(
(50, 58),
"Angles (output in degrees to DXF files and radians through AutoLISP and ObjectARX applications)",
),
(
60,
"Entity visibility; absence or 0 indicates visibility; 1 indicates invisibility",
),
(62, "Color number (fixed)"),
(66, "Entities follow flag (fixed)"),
(67, "0 for model space or 1 for paper space (fixed)"),
(
68,
"APP: identifies whether viewport is on but fully off screen; is not active or is off",
),
(69, "APP: viewport identification number"),
((70, 79), "Integer values, such as repeat counts, flag bits, or modes"),
((90, 99), "32-bit integer values"),
(
100,
"Subclass data marker (with derived class name as a string). "
"Required for all objects and entity classes that are derived from "
"another concrete class. The subclass data marker segregates data defined by different "
"classes in the inheritance chain for the same object. This is in addition "
"to the requirement for DXF names for each distinct concrete class derived "
"from ObjectARX (see Subclass Markers)",
),
(101, "Embedded object marker"),
(
102,
"Control string, followed by '{arbitrary name' or '}'. Similar to the "
"xdata 1002 group code, except that when the string begins with '{', it "
"can be followed by an arbitrary string whose interpretation is up to the "
"application. The | |
<filename>dags/itp_audit_dag.py
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
DV360 / CM360 Privacy Audit
Dashboard that shows performance metrics across browser to see the impact of privacy changes.
- Follow the instructions from <a href="https://docs.google.com/document/d/1HaRCMaBBEo0tSKwnofWNtaPjlW0ORcVHVwIRabct4fY/edit?usp=sharing" target="_blank">this document</a>
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'recipe_timezone':'America/Los_Angeles', # Timezone for report dates.
'auth_sheets':'user', # Credentials used for Sheets.
'auth_bq':'service', # Credentials used for BigQuery.
'auth_dv':'user', # Credentials used for DV360.
'auth_cm':'user', # Credentials used for CM.
'cm_account_id':'', # Campaign Manager Account Id.
'floodlight_configuration_ids':[], # Comma delimited list of floodlight configuration ids for the Campaign Manager floodlight report.
'date_range':'LAST_365_DAYS', # Timeframe to run the ITP report for.
'cm_advertiser_ids':[], # Optional: Comma delimited list of CM advertiser ids.
'dv360_partner_id':'', # DV360 Partner id
'dv360_advertiser_ids':[], # Optional: Comma delimited list of DV360 Advertiser ids.
'recipe_name':'', # Name of report in DBM, should be unique.
'recipe_slug':'ITP_Audit_Dashboard', # BigQuery dataset for store dashboard tables.
}
RECIPE = {
'setup':{
'hour':[
3
],
'day':[
'Mon'
]
},
'tasks':[
{
'drive':{
'auth':{'field':{'name':'auth_sheets','kind':'authentication','order':1,'default':'user','description':'Credentials used for Sheets.'}},
'hour':[
],
'copy':{
'source':'https://docs.google.com/spreadsheets/d/1rH_PGXOYW2mVdmAYnKbv6kcaB6lQihAyMsGtFfinnqg/',
'destination':{'field':{'name':'recipe_name','prefix':'Privacy Audit ','kind':'string','order':1,'description':'Name of document to deploy to.','default':''}}
}
}
},
{
'dataset':{
'auth':{'field':{'name':'auth_bq','kind':'authentication','order':1,'default':'service','description':'Credentials used for BigQuery.'}},
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':1,'default':'ITP_Audit_Dashboard','description':'BigQuery dataset for store dashboard tables.'}}
}
},
{
'dbm':{
'auth':{'field':{'name':'auth_dv','kind':'authentication','order':1,'default':'user','description':'Credentials used for DV360.'}},
'report':{
'name':{'field':{'name':'recipe_name','kind':'string','prefix':'ITP_Audit_Browser_','default':'ITP_Audit_Browser_','order':1,'description':'Name of report in DV360, should be unique.'}},
'timeout':90,
'filters':{
'FILTER_ADVERTISER':{
'values':{'field':{'name':'dv360_advertiser_ids','kind':'integer_list','order':6,'default':[],'description':'Optional: Comma delimited list of DV360 Advertiser ids.'}}
},
'FILTER_PARTNER':{
'values':{'field':{'name':'dv360_partner_id','kind':'integer','order':5,'default':'','description':'DV360 Partner id'}}
}
},
'body':{
'timezoneCode':{'field':{'name':'recipe_timezone','kind':'timezone','description':'Timezone for report dates.','default':'America/Los_Angeles'}},
'metadata':{
'title':{'field':{'name':'recipe_name','default':'ITP_Audit_Browser_','kind':'string','prefix':'ITP_Audit_Browser_','order':1,'description':'Name of report in DV360, should be unique.'}},
'dataRange':{'field':{'name':'date_range','kind':'choice','order':3,'default':'LAST_365_DAYS','choices':['LAST_7_DAYS','LAST_14_DAYS','LAST_30_DAYS','LAST_365_DAYS','LAST_60_DAYS','LAST_7_DAYS','LAST_90_DAYS','MONTH_TO_DATE','PREVIOUS_MONTH','PREVIOUS_QUARTER','PREVIOUS_WEEK','PREVIOUS_YEAR','QUARTER_TO_DATE','WEEK_TO_DATE','YEAR_TO_DATE'],'description':'Timeframe to run the ITP report for.'}},
'format':'CSV'
},
'params':{
'type':'TYPE_GENERAL',
'groupBys':[
'FILTER_ADVERTISER',
'FILTER_ADVERTISER_NAME',
'FILTER_ADVERTISER_CURRENCY',
'FILTER_MEDIA_PLAN',
'FILTER_MEDIA_PLAN_NAME',
'FILTER_CAMPAIGN_DAILY_FREQUENCY',
'FILTER_INSERTION_ORDER',
'FILTER_INSERTION_ORDER_NAME',
'FILTER_LINE_ITEM',
'FILTER_LINE_ITEM_NAME',
'FILTER_PAGE_LAYOUT',
'FILTER_WEEK',
'FILTER_MONTH',
'FILTER_YEAR',
'FILTER_PARTNER',
'FILTER_PARTNER_NAME',
'FILTER_LINE_ITEM_TYPE',
'FILTER_DEVICE_TYPE',
'FILTER_BROWSER',
'FILTER_ANONYMOUS_INVENTORY_MODELING',
'FILTER_OS'
],
'metrics':[
'METRIC_MEDIA_COST_ADVERTISER',
'METRIC_IMPRESSIONS',
'METRIC_CLICKS',
'METRIC_TOTAL_CONVERSIONS',
'METRIC_LAST_CLICKS',
'METRIC_LAST_IMPRESSIONS',
'METRIC_CM_POST_CLICK_REVENUE',
'METRIC_CM_POST_VIEW_REVENUE',
'METRIC_REVENUE_ADVERTISER'
]
}
}
},
'delete':False,
'out':{
'bigquery':{
'auth':{'field':{'name':'auth_bq','kind':'authentication','order':1,'default':'service','description':'Credentials used for BigQuery.'}},
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':1,'default':'ITP_Audit_Dashboard','description':'BigQuery dataset for store dashboard tables.'}},
'table':'z_Dv360_Browser_Report_Dirty',
'header':True,
'schema':[
{
'name':'Advertiser_Id',
'type':'INTEGER',
'mode':'NULLABLE'
},
{
'name':'Advertiser',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Advertiser_Currency',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Campaign_Id',
'type':'INTEGER',
'mode':'NULLABLE'
},
{
'name':'Campaign',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Insertion_Order_Daily_Frequency',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Insertion_Order_Id',
'type':'INTEGER',
'mode':'NULLABLE'
},
{
'name':'Insertion_Order',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Line_Item_Id',
'type':'INTEGER',
'mode':'NULLABLE'
},
{
'name':'Line_Item',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Environment',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Week',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Month',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Year',
'type':'INTEGER',
'mode':'NULLABLE'
},
{
'name':'Partner_Id',
'type':'INTEGER',
'mode':'NULLABLE'
},
{
'name':'Partner',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Line_Item_Type',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Device_Type',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Browser',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Anonymous_Inventory_Modeling',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Operating_System',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Media_Cost_Advertiser_Currency',
'type':'FLOAT',
'mode':'NULLABLE'
},
{
'name':'Impressions',
'type':'INTEGER',
'mode':'NULLABLE'
},
{
'name':'Clicks',
'type':'INTEGER',
'mode':'NULLABLE'
},
{
'name':'Total_Conversions',
'type':'FLOAT',
'mode':'NULLABLE'
},
{
'name':'Post_Click_Conversions',
'type':'FLOAT',
'mode':'NULLABLE'
},
{
'name':'Post_View_Conversions',
'type':'FLOAT',
'mode':'NULLABLE'
},
{
'name':'Cm_Post_Click_Revenue',
'type':'FLOAT',
'mode':'NULLABLE'
},
{
'name':'Cm_Post_View_Revenue',
'type':'FLOAT',
'mode':'NULLABLE'
},
{
'name':'Revenue_Adv_Currency',
'type':'FLOAT',
'mode':'NULLABLE'
}
]
}
}
}
},
{
'dcm':{
'auth':{'field':{'name':'auth_cm','kind':'authentication','order':1,'default':'user','description':'Credentials used for CM.'}},
'timeout':90,
'report':{
'timeout':90,
'account':{'field':{'name':'cm_account_id','kind':'string','order':2,'default':'','description':'Campaign Manager Account Id.'}},
'filters':{
'advertiser':{
'values':{'field':{'name':'cm_advertiser_ids','kind':'integer_list','order':3,'default':[],'description':'Optional: Comma delimited list of CM advertiser ids.'}}
}
},
'body':{
'kind':'dfareporting#report',
'name':{'field':{'name':'recipe_name','kind':'string','order':1,'prefix':'ITP_Audit_Browser_','default':'ITP_Audit_Dashboard_Browser','description':'Name of the Campaign Manager browser report.'}},
'format':'CSV',
'type':'STANDARD',
'criteria':{
'dateRange':{
'kind':'dfareporting#dateRange',
'relativeDateRange':{'field':{'name':'date_range','kind':'choice','order':3,'default':'LAST_365_DAYS','choices':['LAST_7_DAYS','LAST_14_DAYS','LAST_30_DAYS','LAST_365_DAYS','LAST_60_DAYS','LAST_7_DAYS','LAST_90_DAYS','MONTH_TO_DATE','PREVIOUS_MONTH','PREVIOUS_QUARTER','PREVIOUS_WEEK','PREVIOUS_YEAR','QUARTER_TO_DATE','WEEK_TO_DATE','YEAR_TO_DATE'],'description':'Timeframe to run the ITP report for.'}}
},
'dimensions':[
{
'kind':'dfareporting#sortedDimension',
'name':'campaign'
},
{
'kind':'dfareporting#sortedDimension',
'name':'campaignId'
},
{
'kind':'dfareporting#sortedDimension',
'name':'site'
},
{
'kind':'dfareporting#sortedDimension',
'name':'advertiser'
},
{
'kind':'dfareporting#sortedDimension',
'name':'advertiserId'
},
{
'kind':'dfareporting#sortedDimension',
'name':'browserPlatform'
},
{
'kind':'dfareporting#sortedDimension',
'name':'platformType'
},
{
'kind':'dfareporting#sortedDimension',
'name':'month'
},
{
'kind':'dfareporting#sortedDimension',
'name':'week'
}
],
'metricNames':[
'impressions',
'clicks',
'totalConversions',
'activityViewThroughConversions',
'activityClickThroughConversions'
],
'dimensionFilters':[
]
},
'schedule':{
'active':True,
'repeats':'WEEKLY',
'every':1,
'repeatsOnWeekDays':[
'Sunday'
]
},
'delivery':{
'emailOwner':False
}
}
},
'out':{
'bigquery':{
'auth':{'field':{'name':'auth_bq','kind':'authentication','order':1,'default':'service','description':'Credentials used for BigQuery.'}},
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':1,'default':'ITP_Audit_Dashboard','description':'BigQuery dataset for store dashboard tables.'}},
'table':'z_CM_Browser_Report_Dirty',
'header':True,
'is_incremental_load':False
}
},
'delete':False
}
},
{
'sdf':{
'auth':{'field':{'name':'auth_dv','kind':'authentication','order':1,'default':'user','description':'Credentials used for DV360.'}},
'version':'SDF_VERSION_5_3',
'partner_id':{'field':{'name':'dv360_partner_id','kind':'integer','order':5,'default':'','description':'DV360 Partner id'}},
'file_types':[
'FILE_TYPE_CAMPAIGN',
'FILE_TYPE_LINE_ITEM',
'FILE_TYPE_INSERTION_ORDER'
],
'filter_type':'FILTER_TYPE_ADVERTISER_ID',
'read':{
'filter_ids':{
'single_cell':True,
'bigquery':{
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':7,'default':'ITP_Audit_Dashboard','description':'BigQuery dataset for store dashboard tables.'}},
'query':'select distinct Advertiser_Id from `{dataset}.z_Dv360_Browser_Report_Dirty`',
'parameters':{
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':7,'description':'BigQuery dataset for store dashboard tables.'}}
},
'legacy':False
}
}
},
'time_partitioned_table':False,
'create_single_day_table':False,
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':7,'default':'ITP_Audit_Dashboard','description':'BigQuery dataset for store dashboard tables.'}}
}
},
{
'bigquery':{
'auth':{'field':{'name':'auth_bq','kind':'authentication','order':1,'default':'service','description':'Credentials used for BigQuery.'}},
'from':{
'values':[
[
'App',
'App'
],
[
'Web optimized for device',
'Web'
],
[
'Web not optimized for device',
'Web'
]
]
},
'to':{
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':7,'default':'ITP_Audit_Dashboard','description':'BigQuery dataset for store dashboard tables.'}},
'table':'z_Environment'
},
'schema':[
{
'name':'Environment',
'type':'STRING'
},
{
'name':'Environment_clean',
'type':'STRING'
}
]
}
},
{
'bigquery':{
'auth':{'field':{'name':'auth_bq','kind':'authentication','order':1,'default':'service','description':'Credentials used for BigQuery.'}},
'from':{
'values':[
[
'Other',
'TrueView',
''
],
[
'Opera',
'Other',
''
],
[
'Google Chrome',
'Chrome/Android',
''
],
[
'Android Webkit',
'Chrome/Android',
''
],
[
'Safari',
'Safari/iOS',
''
],
[
'Safari 10',
'Safari/iOS',
''
],
[
'Safari 11',
'Safari/iOS',
''
],
[
'Safari 6',
'Safari/iOS',
''
],
[
'Safari 8',
'Safari/iOS',
''
],
[
'Safari 9',
'Safari/iOS',
''
],
[
'Safari 12',
'Safari/iOS',
'Includes Safari mobile web and webkit, both re v12'
],
[
'Safari 13',
'Safari/iOS',
''
],
[
'Safari 12+13',
'Safari/iOS',
''
],
[
'Safari 14',
'Safari/iOS',
''
],
[
'Safari 7',
'Safari/iOS',
''
],
[
'Safari 5',
'Safari/iOS',
''
],
[
'Safari 4',
'Safari/iOS',
''
],
[
'Safari 3',
'Safari/iOS',
''
],
[
'Firefox',
'Firefox',
''
],
[
'Microsoft Edge',
'IE/Edge',
''
],
[
'Internet Explorer 11',
'IE/Edge',
''
],
[
'Internet Explorer 10',
'IE/Edge',
''
],
[
'Internet Explorer 9',
'IE/Edge',
'',
''
],
[
'Internet Explorer 8',
'IE/Edge',
''
]
]
},
'to':{
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':7,'default':'ITP_Audit_Dashboard','description':'BigQuery dataset for store dashboard tables.'}},
'table':'z_Browser'
},
'schema':[
{
'name':'Browser_Platform',
'type':'STRING'
},
{
'name':'Browser_Platform_clean',
'type':'STRING'
},
{
'name':'Browser_Platform_detail',
'type':'STRING'
}
]
}
},
{
'bigquery':{
'auth':{'field':{'name':'auth_bq','kind':'authentication','order':1,'default':'service','description':'Credentials used for BigQuery.'}},
'from':{
'values':[
[
'Other',
'Other',
0
],
[
'Android Webkit',
'Android',
1
],
[
'Firefox',
'Firefox',
2
],
[
'Chrome',
'Chrome/Android',
3
],
[
'Internet Explorer 9',
'IE/Edge',
4
],
[
'Safari',
'Safari/iOS',
6
],
[
'Safari 5',
'Safari/iOS',
7
],
[
'Internet Explorer 10',
'IE/Edge',
9
],
[
'Safari 6',
'Safari/iOS',
10
],
[
'Opera',
'Opera',
1038
],
[
'Internet Explorer 11',
'IE/Edge',
12
],
[
'Internet Explorer 8',
'IE/Edge',
13
],
[
'Internet Explorer 7',
'IE/Edge',
14
],
[
'Internet Explorer 6',
'IE/Edge',
15
],
[
'Internet Explorer 5',
'IE/Edge',
16
],
[
'Safari 4',
'Safari/iOS',
17
],
[
'Safari 3',
'Safari/iOS',
18
],
[
'Safari 2',
'Safari/iOS',
19
],
[
'Safari 1',
'Safari/iOS',
20
],
[
'Microsoft Edge',
'IE/Edge',
| |
#!/usr/bin/env python
#
# svnrdump_tests.py: Tests svnrdump's remote repository dumping capabilities.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import sys, os
import re
# Our testing module
import svntest
from svntest.verify import SVNUnexpectedStdout, SVNUnexpectedStderr
from svntest.verify import SVNExpectedStderr
from svntest.main import write_restrictive_svnserve_conf
from svntest.main import server_has_partial_replay
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
## Mismatched headers during dumping operation
# Text-copy-source-* and *-sha1 headers are not provided by the RA
# layer. `svnadmin dump` is able to provide them because it works on
# the FS layer. Also, svnrdump attaches "Prop-delta: true" with
# everything whether it's really a delta or a new prop (delta from
# /dev/null). This is really harmless, but `svnadmin dump` contains
# the logic for differentiating between these two cases.
mismatched_headers_re = re.compile(
b"Prop-delta: .*|Text-content-sha1: .*|Text-copy-source-md5: .*|" +
b"Text-copy-source-sha1: .*|Text-delta-base-sha1: .*"
)
######################################################################
# Helper routines
def compare_repos_dumps(sbox, other_dumpfile,
bypass_prop_validation=False):
"""Compare two dumpfiles, one created from SBOX, and other given
by OTHER_DUMPFILE. The dumpfiles do not need to match linewise, as the
OTHER_DUMPFILE contents will first be loaded into a repository and then
re-dumped to do the match, which should generate the same dumpfile as
dumping SBOX."""
sbox_dumpfile = svntest.actions.run_and_verify_dump(sbox.repo_dir)
# Load and dump the other dumpfile (using svnadmin)
other_sbox = sbox.clone_dependent()
other_sbox.build(create_wc=False, empty=True)
svntest.actions.run_and_verify_load(other_sbox.repo_dir, other_dumpfile,
bypass_prop_validation)
other_dumpfile = svntest.actions.run_and_verify_dump(other_sbox.repo_dir)
### This call kind-of assumes EXPECTED is first and ACTUAL is second.
svntest.verify.compare_dump_files(
None, None, other_dumpfile, sbox_dumpfile)
def run_and_verify_svnrdump_dump(dumpfile,
expected_stdout,
expected_stderr,
expected_exit,
*varargs):
"""Run 'svnrdump dump'.
Verify the results against EXPECTED_*.
DUMPFILE is a filename to write to, or None to return the dump as a
list of strings.
"""
if dumpfile:
varargs += ('--file=' + dumpfile,)
exp_stdout = None
else:
exp_stdout = expected_stdout
output = svntest.actions.run_and_verify_svnrdump(
None,
exp_stdout,
expected_stderr,
expected_exit,
'dump',
*varargs)
if not dumpfile:
return output
def run_and_verify_svnrdump_load(dumpfile,
expected_stdout,
expected_stderr,
expected_exit,
*varargs):
"""Run 'svnrdump load' to load a dumpfile.
Verify the results against EXPECTED_*.
DUMPFILE is a filename or the dump content as a list of strings.
"""
if isinstance(dumpfile, list):
dumpfile_content = dumpfile
else:
dumpfile_content = None
varargs += ('--file=' + dumpfile,)
svntest.actions.run_and_verify_svnrdump(
dumpfile_content,
expected_stdout,
expected_stderr,
expected_exit,
'load',
*varargs)
def run_dump_test(sbox, dumpfile_name, expected_dumpfile_name = None,
subdir = None, bypass_prop_validation = False,
ignore_base_checksums = False, extra_options = []):
"""Load a dumpfile using 'svnadmin load', dump it with 'svnrdump
dump' and check that the same dumpfile is produced or that
expected_dumpfile_name is produced if provided. Additionally, the
subdir argument appends itself to the URL. EXTRA_OPTIONS is an
array of optional additional options to pass to 'svnrdump dump'."""
# Create an empty sandbox repository
sbox.build(create_wc=False, empty=True)
# This directory contains all the dump files
svnrdump_tests_dir = os.path.join(os.path.dirname(sys.argv[0]),
'svnrdump_tests_data')
# Load the specified dump file into the sbox repository using
# svnadmin load
original_dumpfile = open(os.path.join(svnrdump_tests_dir,
dumpfile_name),
'rb').readlines()
svntest.actions.run_and_verify_load(sbox.repo_dir, original_dumpfile,
bypass_prop_validation)
repo_url = sbox.repo_url
if subdir:
repo_url = repo_url + subdir
# Create a dump file using svnrdump
opts = extra_options + ['-q', repo_url]
svnrdump_dumpfile = \
run_and_verify_svnrdump_dump(None,
svntest.verify.AnyOutput, [], 0,
*opts)
if expected_dumpfile_name:
expected_dumpfile = open(os.path.join(svnrdump_tests_dir,
expected_dumpfile_name),
'rb').readlines()
# Compare the output from stdout
if ignore_base_checksums:
expected_dumpfile = [l for l in expected_dumpfile
if not l.startswith(b'Text-delta-base-md5')]
svnrdump_dumpfile = [l for l in svnrdump_dumpfile
if not l.startswith(b'Text-delta-base-md5')]
expected_dumpfile = [l for l in expected_dumpfile
if not mismatched_headers_re.match(l)]
svnrdump_dumpfile = [l for l in svnrdump_dumpfile
if not mismatched_headers_re.match(l)]
expected_dumpfile = svntest.verify.UnorderedOutput(expected_dumpfile)
svntest.verify.compare_and_display_lines(
"Dump files", "DUMP", expected_dumpfile, svnrdump_dumpfile,
None)
else:
# The expected dumpfile is the result of dumping SBOX.
compare_repos_dumps(sbox, svnrdump_dumpfile, bypass_prop_validation)
def run_load_test(sbox, dumpfile_name, expected_dumpfile_name = None,
expect_deltas = True):
"""Load a dumpfile using 'svnrdump load', dump it with 'svnadmin
dump' and check that the same dumpfile is produced"""
# Create an empty sandbox repository
sbox.build(create_wc=False, empty=True)
# Create the revprop-change hook for this test
svntest.actions.enable_revprop_changes(sbox.repo_dir)
# This directory contains all the dump files
svnrdump_tests_dir = os.path.join(os.path.dirname(sys.argv[0]),
'svnrdump_tests_data')
# Load the specified dump file into the sbox repository using
# svnrdump load
original_dumpfile = open(os.path.join(svnrdump_tests_dir,
dumpfile_name),
'rb').readlines()
# Set the UUID of the sbox repository to the UUID specified in the
# dumpfile ### RA layer doesn't have a set_uuid functionality
uuid = original_dumpfile[2].split(b' ')[1][:-1].decode()
svntest.actions.run_and_verify_svnadmin2(None, None, 0,
'setuuid', sbox.repo_dir,
uuid)
run_and_verify_svnrdump_load(original_dumpfile,
svntest.verify.AnyOutput,
[], 0, sbox.repo_url)
# Re-dump the rdump-loaded repo using svnadmin dump
resulted_dumpfile = svntest.actions.run_and_verify_dump(sbox.repo_dir,
expect_deltas)
if expected_dumpfile_name:
expected_dumpfile = open(os.path.join(svnrdump_tests_dir,
expected_dumpfile_name),
'rb').readlines()
# Compare the output from stdout
svntest.verify.compare_and_display_lines(
"Dump files", "DUMP", expected_dumpfile, resulted_dumpfile)
else:
expected_dumpfile = original_dumpfile
compare_repos_dumps(sbox, expected_dumpfile)
######################################################################
# Tests
def basic_dump(sbox):
"dump: standard sbox repos"
sbox.build(read_only = True, create_wc = False)
out = \
run_and_verify_svnrdump_dump(None,
svntest.verify.AnyOutput, [], 0,
'-q', sbox.repo_url)
if not out[0].startswith(b'SVN-fs-dump-format-version:'):
raise svntest.Failure('No valid output')
def revision_0_dump(sbox):
"dump: revision zero"
run_dump_test(sbox, "revision-0.dump")
def revision_0_load(sbox):
"load: revision zero"
run_load_test(sbox, "revision-0.dump")
# skeleton.dump repository layout
#
# Projects/ (Added r1)
# README (Added r2)
# Project-X (Added r3)
# Project-Y (Added r4)
# Project-Z (Added r5)
# docs/ (Added r6)
# README (Added r6)
def skeleton_dump(sbox):
"dump: skeleton repository"
run_dump_test(sbox, "skeleton.dump")
def skeleton_load(sbox):
"load: skeleton repository"
run_load_test(sbox, "skeleton.dump")
def sparse_propchanges_dump(sbox):
"dump: sparse file/dir propchanges"
run_dump_test(sbox, "sparse-propchanges.dump")
@Issue(3902)
def sparse_propchanges_load(sbox):
"load: sparse file/dir propchanges"
run_load_test(sbox, "sparse-propchanges.dump")
def copy_and_modify_dump(sbox):
"dump: copy and modify"
run_dump_test(sbox, "copy-and-modify.dump")
def copy_and_modify_load(sbox):
"load: copy and modify"
run_load_test(sbox, "copy-and-modify.dump")
def no_author_dump(sbox):
"dump: copy revs with no svn:author revprops"
run_dump_test(sbox, "no-author.dump")
def no_author_load(sbox):
"load: copy revs with no svn:author revprops"
run_load_test(sbox, "no-author.dump")
def copy_from_previous_version_and_modify_dump(sbox):
"dump: copy from previous version and modify"
run_dump_test(sbox, "copy-from-previous-version-and-modify.dump")
def copy_from_previous_version_and_modify_load(sbox):
"load: copy from previous version and modify"
run_load_test(sbox, "copy-from-previous-version-and-modify.dump")
def modified_in_place_dump(sbox):
"dump: modified in place"
run_dump_test(sbox, "modified-in-place.dump")
def modified_in_place_load(sbox):
"load: modified in place"
run_load_test(sbox, "modified-in-place.dump")
def move_and_modify_in_the_same_revision_dump(sbox):
"dump: move parent & modify child file in same rev"
run_dump_test(sbox, "move-and-modify.dump")
def move_and_modify_in_the_same_revision_load(sbox):
"load: move parent & modify child file in same rev"
run_load_test(sbox, "move-and-modify.dump")
def tag_empty_trunk_dump(sbox):
"dump: tag empty trunk"
run_dump_test(sbox, "tag-empty-trunk.dump")
def tag_empty_trunk_load(sbox):
"load: tag empty trunk"
run_load_test(sbox, "tag-empty-trunk.dump")
def tag_trunk_with_file_dump(sbox):
"dump: tag trunk containing a file"
run_dump_test(sbox, "tag-trunk-with-file.dump")
def tag_trunk_with_file_load(sbox):
"load: tag trunk containing a file"
run_load_test(sbox, "tag-trunk-with-file.dump")
def tag_trunk_with_file2_dump(sbox):
"dump: tag trunk containing a file (#2)"
run_dump_test(sbox, "tag-trunk-with-file2.dump")
def tag_trunk_with_file2_load(sbox):
"load: tag trunk containing a file (#2)"
run_load_test(sbox, "tag-trunk-with-file2.dump")
def dir_prop_change_dump(sbox):
"dump: directory property changes"
run_dump_test(sbox, "dir-prop-change.dump")
def dir_prop_change_load(sbox):
"load: directory property changes"
run_load_test(sbox, "dir-prop-change.dump")
def copy_parent_modify_prop_dump(sbox):
"dump: copy parent and modify prop"
run_dump_test(sbox, "copy-parent-modify-prop.dump")
def copy_parent_modify_prop_load(sbox):
"load: copy parent and modify prop"
run_load_test(sbox, "copy-parent-modify-prop.dump")
def copy_revprops_dump(sbox):
"dump: copy revprops other than svn:*"
run_dump_test(sbox, "revprops.dump")
def copy_revprops_load(sbox):
"load: copy revprops other than svn:*"
run_load_test(sbox, "revprops.dump")
def only_trunk_dump(sbox):
"dump: subdirectory"
run_dump_test(sbox, "trunk-only.dump", subdir="/trunk",
expected_dumpfile_name="trunk-only.expected.dump")
def only_trunk_A_with_changes_dump(sbox):
"dump: subdirectory with changes on root"
run_dump_test(sbox, "trunk-A-changes.dump", subdir="/trunk/A",
expected_dumpfile_name="trunk-A-changes.expected.dump")
def url_encoding_dump(sbox):
"dump: url encoding issues"
run_dump_test(sbox, "url-encoding-bug.dump")
def url_encoding_load(sbox):
"load: url encoding issues"
run_load_test(sbox, "url-encoding-bug.dump")
def copy_bad_line_endings_dump(sbox):
"dump: inconsistent line endings in svn:* props"
run_dump_test(sbox, "copy-bad-line-endings.dump",
expected_dumpfile_name="copy-bad-line-endings.expected.dump",
bypass_prop_validation=True)
@Issue(4263)
def copy_bad_line_endings_load(sbox):
"load: inconsistent line endings in svn:* props"
run_load_test(sbox, "copy-bad-line-endings.dump",
expected_dumpfile_name="copy-bad-line-endings.expected.dump")
def copy_bad_line_endings2_dump(sbox):
"dump: non-LF line endings in svn:* props"
run_dump_test(sbox, "copy-bad-line-endings2.dump",
expected_dumpfile_name="copy-bad-line-endings2.expected.dump",
bypass_prop_validation=True, ignore_base_checksums=True)
def commit_a_copy_of_root_dump(sbox):
"dump: commit a copy of root"
run_dump_test(sbox, "repo-with-copy-of-root-dir.dump")
def commit_a_copy_of_root_load(sbox):
"load: commit a copy of root"
run_load_test(sbox, "repo-with-copy-of-root-dir.dump")
def descend_into_replace_dump(sbox):
"dump: descending into replaced dir looks in src"
run_dump_test(sbox, "descend-into-replace.dump", subdir='/trunk/H',
expected_dumpfile_name = "descend-into-replace.expected.dump")
def descend_into_replace_load(sbox):
"load: descending into replaced dir looks in src"
run_load_test(sbox, "descend-into-replace.dump")
@Issue(3847)
def add_multi_prop_dump(sbox):
"dump: add with multiple props"
run_dump_test(sbox, "add-multi-prop.dump")
@Issue(3844)
def multi_prop_edit_load(sbox):
"load: multiple prop edits on a file"
run_load_test(sbox, "multi-prop-edits.dump", None, False)
#----------------------------------------------------------------------
# This test replicates svnadmin_tests.py 16 'reflect dropped renumbered
# revs in svn:mergeinfo' but uses 'svnrdump load' in place of
# 'svnadmin load'.
@Issue(3890)
def reflect_dropped_renumbered_revs(sbox):
"svnrdump renumbers dropped revs in mergeinfo"
# Create an empty sandbox repository
sbox.build(create_wc=False, empty=True)
# Create the revprop-change hook for this test
| |
'xī',
0x72A8: 'chōu',
0x72A9: 'wéi',
0x72AA: 'kuí',
0x72AB: 'chōu',
0x72AC: 'quǎn',
0x72AD: 'quǎn',
0x72AE: 'quǎn,bá',
0x72AF: 'fàn',
0x72B0: 'qiú',
0x72B1: 'jǐ',
0x72B2: 'chái',
0x72B3: 'zhuó,bào',
0x72B4: 'hān,àn',
0x72B5: 'gē',
0x72B6: 'zhuàng',
0x72B7: 'guǎng',
0x72B8: 'mǎ',
0x72B9: 'yóu',
0x72BA: 'kàng,gǎng',
0x72BB: 'pèi,fèi',
0x72BC: 'hǒu',
0x72BD: 'yà',
0x72BE: 'yín',
0x72BF: 'huān,fān',
0x72C0: 'zhuàng',
0x72C1: 'yǔn',
0x72C2: 'kuáng',
0x72C3: 'niǔ',
0x72C4: 'dí',
0x72C5: 'kuáng',
0x72C6: 'zhòng',
0x72C7: 'mù',
0x72C8: 'bèi',
0x72C9: 'pī',
0x72CA: 'jú',
0x72CB: 'yí,quán,chí',
0x72CC: 'shēng,xīng',
0x72CD: 'páo',
0x72CE: 'xiá',
0x72CF: 'tuó,yí',
0x72D0: 'hú',
0x72D1: 'líng',
0x72D2: 'fèi',
0x72D3: 'pī',
0x72D4: 'nǐ',
0x72D5: 'yǎo',
0x72D6: 'yòu',
0x72D7: 'gǒu',
0x72D8: 'xuè',
0x72D9: 'jū',
0x72DA: 'dàn',
0x72DB: 'bó',
0x72DC: 'kǔ',
0x72DD: 'xiǎn',
0x72DE: 'níng',
0x72DF: 'huán,huān',
0x72E0: 'hěn',
0x72E1: 'jiǎo',
0x72E2: 'hé,mò',
0x72E3: 'zhào',
0x72E4: 'jié',
0x72E5: 'xùn',
0x72E6: 'shān',
0x72E7: 'tà,shì',
0x72E8: 'róng',
0x72E9: 'shòu',
0x72EA: 'tóng,dòng',
0x72EB: 'lǎo',
0x72EC: 'dú',
0x72ED: 'xiá',
0x72EE: 'shī',
0x72EF: 'kuài',
0x72F0: 'zhēng',
0x72F1: 'yù',
0x72F2: 'sūn',
0x72F3: 'yú',
0x72F4: 'bì',
0x72F5: 'máng,dòu',
0x72F6: 'xī,shǐ',
0x72F7: 'juàn',
0x72F8: 'lí',
0x72F9: 'xiá',
0x72FA: 'yín',
0x72FB: 'suān',
0x72FC: 'láng',
0x72FD: 'bèi',
0x72FE: 'zhì',
0x72FF: 'yán',
0x7300: 'shā',
0x7301: 'lì',
0x7302: 'hàn',
0x7303: 'xiǎn',
0x7304: 'jīng',
0x7305: 'pái',
0x7306: 'fēi',
0x7307: 'xiāo',
0x7308: 'bài,pí',
0x7309: 'qí',
0x730A: 'ní',
0x730B: 'biāo',
0x730C: 'yìn',
0x730D: 'lái',
0x730E: 'liè',
0x730F: 'jiān,yàn',
0x7310: 'qiāng',
0x7311: 'kūn',
0x7312: 'yàn',
0x7313: 'guō',
0x7314: 'zòng',
0x7315: 'mí',
0x7316: 'chāng',
0x7317: 'yī,yǐ',
0x7318: 'zhì',
0x7319: 'zhēng',
0x731A: 'yá,wèi',
0x731B: 'měng',
0x731C: 'cāi',
0x731D: 'cù',
0x731E: 'shē',
0x731F: 'liè',
0x7320: 'ceon',
0x7321: 'luó',
0x7322: 'hú',
0x7323: 'zōng',
0x7324: 'guì',
0x7325: 'wěi',
0x7326: 'fēng',
0x7327: 'wō',
0x7328: 'yuán',
0x7329: 'xīng',
0x732A: 'zhū',
0x732B: 'māo,máo',
0x732C: 'wèi',
0x732D: 'chuàn,chuān',
0x732E: 'xiàn',
0x732F: 'tuān,tuàn',
0x7330: 'yà,jiá,qiè',
0x7331: 'náo',
0x7332: 'xiē,hè,gé,hài',
0x7333: 'jiā',
0x7334: 'hóu',
0x7335: 'biān,piàn',
0x7336: 'yóu',
0x7337: 'yóu',
0x7338: 'méi',
0x7339: 'chá',
0x733A: 'yáo',
0x733B: 'sūn',
0x733C: 'bó,pò',
0x733D: 'míng',
0x733E: 'huá',
0x733F: 'yuán',
0x7340: 'sōu',
0x7341: 'mǎ',
0x7342: 'huán',
0x7343: 'dāi',
0x7344: 'yù',
0x7345: 'shī',
0x7346: 'háo',
0x7347: 'qiāng',
0x7348: 'yì',
0x7349: 'zhēn',
0x734A: 'cāng',
0x734B: 'háo,gāo',
0x734C: 'màn',
0x734D: 'jìng',
0x734E: 'jiǎng',
0x734F: 'mò',
0x7350: 'zhāng',
0x7351: 'chán',
0x7352: 'áo',
0x7353: 'áo',
0x7354: 'háo',
0x7355: 'suǒ,cuī',
0x7356: 'fén,fèn',
0x7357: 'jué',
0x7358: 'bì',
0x7359: 'bì',
0x735A: 'huáng',
0x735B: 'pú',
0x735C: 'lín,lìn',
0x735D: 'xù',
0x735E: 'tóng',
0x735F: 'yào,xiāo',
0x7360: 'liáo',
0x7361: 'shuò,xī',
0x7362: 'xiāo',
0x7363: 'shòu',
0x7364: 'dūn',
0x7365: 'jiào',
0x7366: 'gé,liè,xiē',
0x7367: 'juàn',
0x7368: 'dú',
0x7369: 'huì',
0x736A: 'kuài',
0x736B: 'xiǎn',
0x736C: 'xiè',
0x736D: 'tǎ',
0x736E: 'xiǎn',
0x736F: 'xūn',
0x7370: 'níng',
0x7371: 'biān,piàn',
0x7372: 'huò',
0x7373: 'nòu,rú',
0x7374: 'méng',
0x7375: 'liè',
0x7376: 'náo,nǎo,yōu',
0x7377: 'guǎng',
0x7378: 'shòu',
0x7379: 'lú',
0x737A: 'tǎ',
0x737B: 'xiàn',
0x737C: 'mí',
0x737D: 'ráng',
0x737E: 'huān',
0x737F: 'náo,yōu',
0x7380: 'luó',
0x7381: 'xiǎn',
0x7382: 'qí',
0x7383: 'jué',
0x7384: 'xuán',
0x7385: 'miào',
0x7386: 'zī',
0x7387: 'shuài,lǜ',
0x7388: 'lú',
0x7389: 'yù',
0x738A: 'sù',
0x738B: 'wáng,wàng',
0x738C: 'qiú',
0x738D: 'gǎ',
0x738E: 'dīng',
0x738F: 'lè',
0x7390: 'bā',
0x7391: 'jī',
0x7392: 'hóng',
0x7393: 'dì',
0x7394: 'chuàn',
0x7395: 'gān',
0x7396: 'jiǔ',
0x7397: 'yú',
0x7398: 'qǐ',
0x7399: 'yú',
0x739A: 'chàng,yáng',
0x739B: 'mǎ',
0x739C: 'hóng',
0x739D: 'wǔ',
0x739E: 'fū',
0x739F: 'mín,wén',
0x73A0: 'jiè',
0x73A1: 'yà',
0x73A2: 'bīn,fēn',
0x73A3: 'biàn',
0x73A4: 'bàng',
0x73A5: 'yuè',
0x73A6: 'jué',
0x73A7: 'mén,yǔn',
0x73A8: 'jué',
0x73A9: 'wán',
0x73AA: 'jiān,qián',
0x73AB: 'méi',
0x73AC: 'dǎn',
0x73AD: 'pín',
0x73AE: 'wěi',
0x73AF: 'huán',
0x73B0: 'xiàn',
0x73B1: 'qiāng,cāng',
0x73B2: 'líng',
0x73B3: 'dài',
0x73B4: 'yì',
0x73B5: 'án,gān',
0x73B6: 'píng',
0x73B7: 'diàn',
0x73B8: 'fú',
0x73B9: 'xuán,xián',
0x73BA: 'xǐ',
0x73BB: 'bō',
0x73BC: 'cī,cǐ',
0x73BD: 'gǒu',
0x73BE: 'jiǎ',
0x73BF: 'sháo',
0x73C0: 'pò',
0x73C1: 'cí',
0x73C2: 'kē',
0x73C3: 'rǎn',
0x73C4: 'shēng',
0x73C5: 'shēn',
0x73C6: 'yí,tāi',
0x73C7: 'zǔ,jù',
0x73C8: 'jiā',
0x73C9: 'mín',
0x73CA: 'shān',
0x73CB: 'liǔ',
0x73CC: 'bì',
0x73CD: 'zhēn',
0x73CE: 'zhēn',
0x73CF: 'jué',
0x73D0: 'fà',
0x73D1: 'lóng',
0x73D2: 'jīn',
0x73D3: 'jiào',
0x73D4: 'jiàn',
0x73D5: 'lì',
0x73D6: 'guāng',
0x73D7: 'xiān',
0x73D8: 'zhōu',
0x73D9: 'gǒng',
0x73DA: 'yān',
0x73DB: 'xiù',
0x73DC: 'yáng',
0x73DD: 'xǔ',
0x73DE: 'luò',
0x73DF: 'sù',
0x73E0: 'zhū',
0x73E1: 'qín',
0x73E2: 'yín,kèn',
0x73E3: 'xún',
0x73E4: 'bǎo',
0x73E5: 'ěr',
0x73E6: 'xiàng',
0x73E7: 'yáo',
0x73E8: 'xiá',
0x73E9: 'héng',
0x73EA: 'guī',
0x73EB: 'chōng',
0x73EC: 'xù',
0x73ED: 'bān',
0x73EE: 'pèi',
0x73EF: 'lǎo',
0x73F0: 'dāng',
0x73F1: 'yīng',
0x73F2: 'hún,huī',
0x73F3: 'wén',
0x73F4: 'é',
0x73F5: 'chéng',
0x73F6: 'dì,tí',
0x73F7: 'wǔ',
0x73F8: 'wú',
0x73F9: 'chéng',
0x73FA: 'jùn',
0x73FB: 'méi',
0x73FC: 'bèi',
0x73FD: 'tǐng',
0x73FE: 'xiàn',
0x73FF: 'chù',
0x7400: 'hán',
0x7401: 'xuán,qióng',
0x7402: 'yán',
0x7403: 'qiú',
0x7404: 'xuàn',
0x7405: 'láng',
0x7406: 'lǐ',
0x7407: 'xiù',
0x7408: 'fú,fū',
0x7409: 'liú',
0x740A: 'yá',
0x740B: 'xī',
0x740C: 'líng',
0x740D: 'lí',
0x740E: 'jīn',
0x740F: 'liǎn',
0x7410: 'suǒ',
0x7411: 'suǒ',
0x7412: 'fēng',
0x7413: 'wán',
0x7414: 'diàn',
0x7415: 'pín,bǐng',
0x7416: 'zhǎn',
0x7417: 'cuì,sè',
0x7418: 'mín',
0x7419: 'yù',
0x741A: 'jū',
0x741B: 'chēn',
0x741C: 'lái',
0x741D: 'mín',
0x741E: 'shèng',
0x741F: 'wéi,yù',
0x7420: 'tiǎn,tiàn',
0x7421: 'shū',
0x7422: 'zhuó,zuó',
0x7423: 'běng,pěi',
0x7424: 'chēng',
0x7425: 'hǔ',
0x7426: 'qí',
0x7427: 'è',
0x7428: 'kūn',
0x7429: 'chāng',
0x742A: 'qí',
0x742B: 'běng',
0x742C: 'wǎn',
0x742D: 'lù',
0x742E: 'cóng',
0x742F: 'guǎn',
0x7430: 'yǎn',
0x7431: 'diāo',
0x7432: 'bèi',
0x7433: 'lín',
0x7434: 'qín',
0x7435: 'pí',
0x7436: 'pá',
0x7437: 'què',
0x7438: 'zhuó',
0x7439: 'qín',
0x743A: 'fà',
0x743B: 'jīn',
0x743C: 'qióng',
0x743D: 'dǔ',
0x743E: 'jiè',
0x743F: 'hún,huī',
0x7440: 'yǔ',
0x7441: 'mào',
0x7442: 'méi',
0x7443: 'chūn',
0x7444: 'xuān',
0x7445: 'tí',
0x7446: 'xīng',
0x7447: 'dài',
0x7448: 'róu',
0x7449: 'mín',
0x744A: 'jiān',
0x744B: 'wěi',
0x744C: 'ruǎn',
0x744D: 'huàn',
0x744E: 'xié,jiē',
0x744F: 'chuān',
0x7450: 'jiǎn',
0x7451: 'zhuàn',
0x7452: 'chàng,yáng',
0x7453: 'liàn',
0x7454: 'quán',
0x7455: 'xiá',
0x7456: 'duàn',
0x7457: 'yuàn',
0x7458: 'yé',
0x7459: 'nǎo',
0x745A: 'hú',
0x745B: 'yīng',
0x745C: 'yú',
0x745D: 'huáng',
0x745E: 'ruì',
0x745F: 'sè',
0x7460: 'liú',
0x7461: 'shī',
0x7462: 'róng',
0x7463: 'suǒ',
0x7464: 'yáo',
0x7465: 'wēn',
0x7466: 'wǔ',
0x7467: 'zhēn',
0x7468: 'jìn',
0x7469: 'yíng',
0x746A: 'mǎ',
0x746B: 'tāo',
0x746C: 'liú',
0x746D: 'táng',
0x746E: 'lì',
0x746F: 'láng',
0x7470: 'guī',
0x7471: 'tiàn,tián,zhèn',
0x7472: 'qiāng,cāng',
0x7473: 'cuō',
0x7474: 'jué',
0x7475: 'zhǎo',
0x7476: 'yáo',
0x7477: 'ài',
0x7478: 'bīn,pián',
0x7479: 'tú,shū',
0x747A: 'cháng',
0x747B: 'kūn',
0x747C: 'zhuān',
0x747D: 'cōng',
0x747E: 'jǐn',
0x747F: 'yī',
0x7480: 'cuǐ',
0x7481: 'cōng',
0x7482: 'qí',
0x7483: 'lí',
0x7484: 'jǐng',
0x7485: 'zǎo,suǒ',
0x7486: 'qiú',
0x7487: 'xuán',
0x7488: 'áo',
0x7489: 'liǎn',
0x748A: 'mén',
0x748B: 'zhāng',
0x748C: 'yín',
0x748D: 'yè',
0x748E: 'yīng',
0x748F: 'zhì',
0x7490: 'lù',
0x7491: 'wú',
0x7492: 'dēng',
0x7493: 'xiù',
0x7494: 'zēng',
0x7495: 'xún',
0x7496: 'qú',
0x7497: 'dàng',
0x7498: 'lín',
0x7499: 'liáo',
0x749A: 'qióng,jué',
0x749B: 'sù',
0x749C: 'huáng',
0x749D: 'guī',
0x749E: 'pú',
0x749F: 'jǐng',
0x74A0: 'fán',
0x74A1: 'jīn',
0x74A2: 'liú',
0x74A3: 'jī',
0x74A4: 'huì',
0x74A5: 'jǐng',
0x74A6: 'ài',
0x74A7: 'bì',
0x74A8: 'càn',
0x74A9: 'qú',
0x74AA: 'zǎo',
0x74AB: 'dāng',
0x74AC: 'jiǎo',
0x74AD: 'guǎn',
0x74AE: 'tǎn',
0x74AF: 'huì,kuài',
0x74B0: 'huán',
0x74B1: 'sè',
0x74B2: 'suì',
0x74B3: 'tián',
0x74B4: 'chǔ',
0x74B5: 'yú',
0x74B6: 'jìn',
0x74B7: 'lú,fū',
0x74B8: 'bīn,pián',
0x74B9: 'shú',
0x74BA: 'wèn',
0x74BB: 'zuǐ',
0x74BC: 'lán',
0x74BD: 'xǐ',
0x74BE: 'jì,zī',
0x74BF: 'xuán',
0x74C0: 'ruǎn',
0x74C1: 'wò',
0x74C2: 'gài',
0x74C3: 'léi',
0x74C4: 'dú',
0x74C5: 'lì',
0x74C6: 'zhì',
0x74C7: 'róu',
0x74C8: 'lí',
0x74C9: 'zàn',
0x74CA: 'qióng',
0x74CB: 'tì',
0x74CC: 'guī',
0x74CD: 'suí',
0x74CE: 'là',
0x74CF: 'lóng',
0x74D0: 'lú',
0x74D1: 'lì',
0x74D2: 'zàn',
0x74D3: 'làn',
0x74D4: 'yīng',
0x74D5: 'mí,xǐ',
0x74D6: 'xiāng',
0x74D7: 'qióng,wěi,wèi',
0x74D8: 'guàn',
0x74D9: 'dào',
0x74DA: 'zàn',
0x74DB: 'huán,yè,yà',
0x74DC: 'guā',
0x74DD: 'bó',
0x74DE: 'dié',
0x74DF: 'bó,páo',
0x74E0: 'hù',
0x74E1: 'zhí,hú',
0x74E2: 'piáo',
0x74E3: 'bàn',
0x74E4: 'ráng',
0x74E5: 'lì',
0x74E6: 'wǎ,wà',
0x74E7: 'shíwǎ',
0x74E8: 'xiáng,hóng',
0x74E9: 'qiānwǎ',
0x74EA: 'bǎn',
0x74EB: 'pén',
0x74EC: 'fǎng',
0x74ED: 'dǎn',
0x74EE: 'wèng',
0x74EF: 'ōu',
0x74F0: 'fēnwǎ',
0x74F1: 'máowǎ',
0x74F2: 'túnwǎ',
0x74F3: 'hú',
0x74F4: 'líng',
0x74F5: 'yí',
0x74F6: 'píng',
0x74F7: 'cí',
0x74F8: 'bǎi,wǎ',
0x74F9: 'juàn,juān',
0x74FA: 'cháng',
0x74FB: 'chī',
0x74FC: 'lǐwǎ',
0x74FD: 'dàng',
0x74FE: 'wā',
0x74FF: 'bù',
0x7500: 'zhuì',
0x7501: 'píng',
0x7502: 'biān',
0x7503: 'zhòu',
0x7504: 'zhēn',
0x7505: 'líwǎ',
0x7506: 'cí',
0x7507: 'yīng',
0x7508: 'qì',
0x7509: 'xián',
0x750A: 'lǒu',
0x750B: 'dì',
0x750C: 'ōu',
0x750D: 'méng',
0x750E: 'zhuān',
0x750F: 'bèng',
0x7510: 'lìn',
0x7511: 'zèng',
0x7512: 'wǔ',
0x7513: 'pì',
0x7514: 'dān',
0x7515: 'wèng',
0x7516: 'yīng',
0x7517: 'yǎn',
0x7518: 'gān',
0x7519: 'dài',
0x751A: 'shèn,shén',
0x751B: 'tián',
0x751C: 'tián',
0x751D: 'hán',
0x751E: 'cháng',
0x751F: 'shēng',
0x7520: 'qíng',
0x7521: 'shēn',
0x7522: 'chǎn',
0x7523: 'chǎn',
0x7524: 'ruí',
0x7525: 'shēng',
0x7526: 'sū',
0x7527: | |
import tensorflow as tf
import pdb
import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from mpl_toolkits.axes_grid1 import make_axes_locatable
import scipy
import myParams
def getHome():
# return '/home/deni/'
# return '/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/'
# return '/media/a/H2/home/a/'
return '/opt/data/'
def getDatasetsBase():
# return '/home/deni/'
return '/media/a/H1/TFDatasets/'
def getParam_tmpF(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
try:
return np.array(list(map(int, s.split(','))))
except ValueError:
try:
return np.array(list(map(float, s.split(','))))
except ValueError:
return s
def readParamsTxt(ParamFN):
ParamsD = {}
with open(ParamFN) as f:
for line in f:
if len(line)<3:
continue
# print(line)
#print(line.replace("\n",""))
(key,val,X)=(line+' a').split(maxsplit=2)
# (key, val) = line.split()
valx=getParam_tmpF(val)
ParamsD[key] = valx
myParams.myDict[key]=ParamsD[key]
# print(key + " : " + str(val) + " " + type(valx).__name__)
def getparam(S):
try:
return myParams.myDict[S]
except ValueError:
print('Couldnt find parameter: ' + S)
return 0
def setparam(S,V):
myParams.myDict[S]=V
return
def ConcatCOnDim(X,dim):
# return tf.cast(tf.concat([tf.real(X),tf.imag(X)],axis=dim),tf.float32)
return tf.concat([tf.real(X),tf.imag(X)],axis=dim)
def ConcatRIOn0(X): return tf.concat([tf.real(X),tf.imag(X)],axis=0)
def ConcatRIOn1(X): return tf.concat([tf.real(X),tf.imag(X)],axis=1)
def ConcatRIOn2(X): return tf.concat([tf.real(X),tf.imag(X)],axis=2)
def ConcatRIOn3(X): return tf.concat([tf.real(X),tf.imag(X)],axis=3)
def ConcatRIOn4(X): return tf.concat([tf.real(X),tf.imag(X)],axis=4)
def ConcatRIOn5(X): return tf.concat([tf.real(X),tf.imag(X)],axis=5)
def ConcatRIOn6(X): return tf.concat([tf.real(X),tf.imag(X)],axis=6)
def ConcatRIOn7(X): return tf.concat([tf.real(X),tf.imag(X)],axis=7)
def ConcatCOnDimWithStack(X,dim):
# return tf.cast(tf.concat([tf.stack([tf.real(X)],axis=dim),tf.stack([tf.imag(X)],axis=dim)],axis=dim),tf.float32)
return tf.concat([tf.stack([tf.real(X)],axis=dim),tf.stack([tf.imag(X)],axis=dim)],axis=dim)
def NP_ConcatCOnDim(X,dim):
return np.float32(np.concatenate((np.real(X),np.imag(X)),axis=dim))
def NP_ConcatRIOn0(X): return NP_ConcatCOnDim(X,0)
def NP_ConcatRIOn1(X): return NP_ConcatCOnDim(X,1)
def NP_ConcatRIOn2(X): return NP_ConcatCOnDim(X,2)
def NP_ConcatRIOn3(X): return NP_ConcatCOnDim(X,3)
def NP_ConcatRIOn4(X): return NP_ConcatCOnDim(X,4)
def NP_ConcatRIOn5(X): return NP_ConcatCOnDim(X,5)
def NP_ConcatRIOn6(X): return NP_ConcatCOnDim(X,6)
def NP_fft2d_on6d(X): return np.transpose(np.fft.fft2(np.transpose(X,(2,3,4,5,0,1))),(4,5,0,1,2,3))
def NP_ifft2d_on6d(X): return np.transpose(np.fft.ifft2(np.transpose(X,(2,3,4,5,0,1))),(4,5,0,1,2,3))
# def RItoCon4(X):
# return tf.squeeze(tf.complex(tf.slice(X,[0,0,0,0],[-1,-1,-1,1]),tf.slice(X,[0,0,0,1],[-1,-1,-1,1])))
# def RItoCon4(X):
# return tf.squeeze(tf.complex(tf.slice(X,[0,0,0,0],[batch_size,H,W,1]),tf.slice(X,[0,0,0,1],[batch_size,H,W,1])))
def NP_addDim(X): return np.stack([X],axis=-1)
def TF_addDim(X): return tf.stack([X],axis=-1)
def TF_2d_to_3d(X): return tf.stack([X],axis=2)
def TF_3d_to_4d(X): return tf.stack([X],axis=3)
def TF_4d_to_5d(X): return tf.stack([X],axis=4)
def TF_5d_to_6d(X): return tf.stack([X],axis=5)
def TF_2d_to_4d(X): return TF_3d_to_4d(TF_2d_to_3d(X))
def TF_2d_to_5d(X): return TF_4d_to_5d(TF_3d_to_4d(TF_2d_to_3d(X)))
def TF_3d_to_5d(X): return TF_4d_to_5d(TF_3d_to_4d(X))
def TF_fft2d_on5d(X): return tf.transpose(tf.fft2d(tf.transpose(X,[2,3,4,0,1])),[3,4,0,1,2])
def TF_ifft2d_on5d(X): return tf.transpose(tf.ifft2d(tf.transpose(X,[2,3,4,0,1])),[3,4,0,1,2])
def TF_fft2d_on6d(X): return tf.transpose(tf.fft2d(tf.transpose(X,[2,3,4,5,0,1])),[4,5,0,1,2,3])
def TF_ifft2d_on6d(X): return tf.transpose(tf.ifft2d(tf.transpose(X,[2,3,4,5,0,1])),[4,5,0,1,2,3])
def TF_fft2d_on7d(X): return tf.transpose(tf.fft2d(tf.transpose(X,[2,3,4,5,6,0,1])),[5,6,0,1,2,3,4])
def TF_ifft2d_on7d(X): return tf.transpose(tf.ifft2d(tf.transpose(X,[2,3,4,5,6,0,1])),[5,6,0,1,2,3,4])
def TF_fft2d_onNd(X,N): return tf.transpose(tf.fft2d(tf.transpose(X,np.concatenate((np.arange(2,N),[0,1]),axis=0))),np.concatenate(([N-2,N-1],np.arange(0,N-2)),axis=0))
def TF_ifft2d_onNd(X,N): return tf.transpose(tf.ifft2d(tf.transpose(X,np.concatenate((np.arange(2,N),[0,1]),axis=0))),np.concatenate(([N-2,N-1],np.arange(0,N-2)),axis=0))
def TF_fft2d_on3d(X): return tf.transpose(tf.fft2d(tf.transpose(X,[2,0,1])),[1,2,0])
def TF_ifft2d_on3d(X): return tf.transpose(tf.ifft2d(tf.transpose(X,[2,0,1])),[1,2,0])
def tfrm(X): return tf.reduce_mean(tf.abs(X))
def rms(X): return np.sqrt(np.mean(np.square(np.abs(X))))
def TF_rms(X): return tf.sqrt(tf.reduce_mean(tf.square(tf.abs(X))))
def QuickCompare(Ref,X):
return [rms(Ref),rms(X),rms(Ref-X),rms(Ref)/rms(Ref-X)]
def toep(X,Kern,H,W):
return np.fft.ifft2(np.fft.fft2(np.pad(X,((0,H),(0,W)),'constant'),axes=(0,1))*Kern,axes=(0,1))[:H,:W]
def TF_toep(X,Kern,H,W):
return tf.ifft2d(tf.fft2d(tf.pad(X,((0,H),(0,W)),'constant'))*Kern)[:H,:W]
def cgp(x0, A, b, mit, stol, bbA):
# def [x, k] = cgp(x0, A, C, b, mit, stol, bbA, bbC):
# https://en.wikipedia.org/wiki/Conjugate_gradient_method#Example_code_in_MATLAB_/_GNU_Octave_2
x = x0;
ha = 0;
hp = 0;
hpp = 0;
ra = 0;
rp = 0;
rpp = 0;
u = 0;
k = 0;
ra = b - bbA(A, x0); # <--- ra = b - A * x0;
while rms(ra) > stol:
ha=ra
k = k + 1;
if (k == mit):
print('GCP:MAXIT: mit reached, no conversion.');
return x,k
hpp = hp;
rpp = rp;
hp = ha;
rp = ra;
t = np.sum(np.conj(rp)*hp)
if k == 1:
u = hp;
else:
u = hp + (t / np.sum(np.conj(rpp)*hpp)) * u;
Au = bbA(A, u) # <--- Au = A * u;
Fac=np.sum(np.conj(u)*Au)
a = t / Fac
x = x + a * u;
ra = rp - a * Au;
return x,k
def TF_cgp(x0, A, b, mit, stol, bbA):
x = x0;
ha = 0;
hp = 0;
hpp = 0;
ra = 0;
rp = 0;
rpp = 0;
u = 0;
k = 0;
ra = b - bbA(A, x0); # <--- ra = b - A * x0;
while TF_rms(ra) > stol:
ha=ra
k = k + 1;
if (k == mit):
print('GCP:MAXIT: mit reached, no conversion.');
return x,k
hpp = hp;
rpp = rp;
hp = ha;
rp = ra;
t = tf.reduce_sum(tf.conj(rp)*hp)
if k == 1:
u = hp;
else:
u = hp + (t / tf.reduce_sum(tf.conj(rpp)*hpp)) * u;
Au = bbA(A, u) # <--- Au = A * u;
Fac=tf.reduce_sum(tf.conj(u)*Au)
a = t / Fac
x = x + a * u;
ra = rp - a * Au;
return x,k
def NP_NUFFT_forw(X,SN,P,H,W):
return P*np.reshape(np.fft.fft2(np.pad(X*SN,((0,H),(0,W)),'constant')),-1)
# def back(X,SN,P,H,W):
# return np.fft.ifft2(np.reshape(np.conj(P.T)*X,((H*2,W*2))),axes=(0,1))[:H,:W]*np.conj(SN)
def NP_NUFFT_back(X,SN,P,H,W):
return (np.fft.ifft2(np.reshape(np.conj(np.transpose(P))*X,(H*2,W*2)))[:H,:W])*np.conj(SN)
def NP_NUFFT_forwWback(X,Wx,SN,P,H,W):
return NP_NUFFT_back(NP_NUFFT_forw(X,SN,P,H,W)*Wx,SN,P,H,W)
def NP_NUFFTHNUFFT_WithW(I,SN,P,CurW,H,W):
Step1=I*SN
Pad=np.pad(Step1,((0,H),(0,W)),'constant')
F=np.fft.fft2(Pad)
Col=np.reshape(F,(-1))
Sig=P*Col
Sig=Sig*CurW
# Out=back(Sig,SN,P,H,W)
Step1=np.conj(np.transpose(P))*Sig
Step1=np.reshape(Step1,(H*2,W*2))
F=np.fft.ifft2(Step1)
Cropped=F[:H,:W]
Out=Cropped*np.conj(SN)
return Out
def NUFFT_to_ToepKern(Wx,SN,P,H,W):
# NUFFT to ToepKern
v11=np.zeros((H,W),np.complex128)
v12=np.zeros((H,W),np.complex128)
v21=np.zeros((H,W),np.complex128)
v22=np.zeros((H,W),np.complex128)
v11[0,0]=1
v12[0,-1]=1
v21[-1,0]=1
v22[-1,-1]=1
block11=NP_NUFFTHNUFFT_WithW(v11,SN,P,Wx,H,W)
block12=NP_NUFFTHNUFFT_WithW(v12,SN,P,Wx,H,W)
block21=NP_NUFFTHNUFFT_WithW(v21,SN,P,Wx,H,W)
block22=NP_NUFFTHNUFFT_WithW(v22,SN,P,Wx,H,W)
Big=np.zeros((H*2,W*2),np.complex128)
Big[:H,:W]=block22;
Big[H-1:-1,W-1:-1]=block11;
Big[:H,W-1:-1]=block21;
Big[H-1:-1,:W]=block12;
Bigc=np.roll(Big,(-H+1,-W+1),(0,1))
TKern=np.fft.fft2(Bigc)
return TKern
# QuickCompare(TKern,TKern1)
def _glorot_initializer_g(units, stddev_factor=1.0):
"""Initialization in the style of Glorot 2010.
stddev_factor should be 1.0 for linear activations, and 2.0 for ReLUs"""
stddev = np.sqrt(stddev_factor / np.sqrt(np.prod(units)))
return tf.truncated_normal(units,mean=0.0, stddev=stddev)
""" Example use of TF_TSNUFFT:
B0Data=scipy.io.loadmat('/media/a/H1/MoreDataForTFNUFT.mat')
Sens=B0Data['Sens']
TSBF=B0Data['TSBF']
TSC=B0Data['TSC']
NUFTData=scipy.io.loadmat('/media/a/DATA/180628_AK/meas_MID244_gBP_VD11_U19_G35S155_4min_FID22439/TrajForNUFT.mat')
Kd=NUFTData['Kd']
P=NUFTData['P']
SN=NUFTData['SN']
Trajm2=NUFTData['Trajm2']
SmpI=scipy.io.loadmat('/media/a/H1/SmpI.mat')
SmpI=SmpI['SmpI']
nTraj=Trajm2.shape[1]
nCh=Sens.shape[2]
nTSC=TSC.shape[2]
SNc,paddings,sp_R,sp_I,TSBFX=GT.TF_TSNUFFT_Prepare(SN,Sens,TSC,TSBF,Kd,P)
Out=GT.TF_TSNUFFT_Run(SmpI,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX)
SOut={}
SOut['Out']=Out
scipy.io.savemat('/media/a/H1/TFTSNUFTOut.mat',SOut)
"""
# def TS_NUFFT_OPHOP(InImage,TSCSens,H,W,batch_size,paddingsY,nTSC,nCh,fftkernc5D):
# InImage=tf.stack([tf.stack([InImage],axis=3)],axis=4)
# InImage=tf.transpose(InImage,[1,2,3,4,0])
# Step1=tf.multiply(InImage,TSCSens)
# Padded=tf.pad(Step1, paddingsY, "CONSTANT")
# Step2=tf.transpose(tf.fft2d(tf.transpose(Padded,perm=[2,3,4,0,1])),[3,4,0,1,2])
# Step2=tf.multiply(Step2,fftkernc5D)
# Step2=tf.transpose(tf.ifft2d(tf.transpose(Step2,perm=[2,3,4,0,1])),[3,4,0,1,2])
# Cropped=tf.slice(Step2,[0,0,0,0,0],[H,W,nTSC,nCh,batch_size])
# Step3=tf.multiply(Cropped,tf.conj(TSCSens))
# Step3=tf.reduce_sum(Step3,axis=[2,3])
# Step3=tf.transpose(Step3,[2,0,1])
# return Step3
def blocksToFftkern(block1,block2):
(N1,N2)=block1.shape
z1 = np.zeros((N1,1))
z2 = np.zeros((N1-1,1))
Row1=np.concatenate((block1,z1,np.conj(np.flip(np.concatenate((block1[0:1,1:],block2[1:,1:]),axis=0),axis=1)) ),axis=1)
Row2=np.concatenate((np.flip(block2[1:,:],axis=0),z2,np.flip(np.flip(np.conj(block1[1:,1:]),axis=0),axis=1)),axis=1)
tmp1a=np.concatenate((Row1,np.zeros((1,N2*2)),Row2),axis=0)
tmp2a=np.conj(np.flip(np.flip(np.roll(np.roll(tmp1a,-1,axis=0),-1,axis=1),axis=0),axis=1))
kern=(tmp1a+tmp2a)/2
fftkerna=np.fft.fft2(kern)
fftkerna=np.real(fftkerna)
return fftkerna
def GetTSCoeffsByLinear(N,L):
M=np.zeros((N,L))
Ttimes=np.linspace(0,1,L);
xnew = np.linspace(0, 1, N)
for i in range(0,L):
# print(i)
tmp=np.zeros((L))
tmp[i]=1
f=scipy.interpolate.interp1d(Ttimes,tmp)
M[:,i]=f(xnew)
return M
def NP_Cartesian_OPHOP_ITS_MB(InImage,Sens6,Msk):
# InImage is batch_size,H,W,nTSC,MB
# Sens6 is H,W,/nTSC/,nCh,MB,batch_size
InImage=NP_addDim(InImage)
InImage=np.transpose(InImage,(1,2,3,5,4,0)) # H,W,nTSC,/nCh/,MB,batch_size
Step1=InImage*Sens6 # H,W,nTSC,nCh,MB,batch_size
F=NP_fft2d_on6d(Step1)
MF=F*Msk
IMF=NP_ifft2d_on6d(MF)
SIMF=IMF*np.conj(Sens6)
Step2=np.sum(SIMF,axis=3) # H,W,nTSC,MB,batch_size
Step3=np.transpose(Step2,(4,0,1,2,3)) # batch_size,H,W,nTSC,MB
return Step3 # batch_size,H,W,nTSC,MB
def Cartesian_OPHOP_ITS_MB(InImage,Sens6,Msk):
# InImage is batch_size,H,W,nTSC,MB
# Sens6 is H,W,/nTSC/,nCh,MB,batch_size
InImage=TF_addDim(InImage)
InImage=tf.transpose(InImage,[1,2,3,5,4,0]) # H,W,nTSC,/nCh/,MB,batch_size
Step1=InImage*Sens6 # H,W,nTSC,nCh,MB,batch_size
F=TF_fft2d_on6d(Step1)
MF=F*Msk
IMF=TF_ifft2d_on6d(MF)
SIMF=IMF*tf.conj(Sens6)
Step2=tf.reduce_sum(SIMF,axis=[3]) # H,W,nTSC,MB,batch_size
Step3=tf.transpose(Step2,[4,0,1,2,3]) # batch_size,H,W,nTSC,MB
return Step3 # batch_size,H,W,nTSC,MB
def TS_NUFFT_OPHOP_ITS_MB(InImage,Sens6,H,W,batch_size,paddingsYMB,nTSC,nCh,fftkernc7):
# InImage is batch_size,H,W,nTSC,MB
# Sens6 is H,W,/nTSC/,nCh,MB,batch_size
# fftkernc7 is # H*2,W*2,nTSC,/nCh/,MB,/batch_size/,MBaux
InImage=TF_addDim(InImage) # batch_size,H,W,nTSC,MB,/nCh/
InImage=tf.transpose(InImage,[1,2,3,5,4,0]) # H,W,nTSC,/nCh/,MB,batch_size
Step1=InImage*Sens6 # H,W,nTSC,nCh,MB,batch_size
Padded=tf.pad(Step1, paddingsYMB, "CONSTANT") # H*2,W*2,nTSC,nCh,MB,batch_size
Step2=TF_fft2d_on6d(Padded) # H*2,W*2,nTSC,nCh,MB,batch_size
Step2=TF_addDim(Step2) # H*2,W*2,nTSC,nCh,MB,batch_size,/MBaux/
Step2=Step2*fftkernc7 # H*2,W*2,nTSC,nCh,MB,batch_size,MBaux
Step2=TF_ifft2d_on7d(Step2) # H*2,W*2,nTSC,nCh,MB,batch_size,MBaux
# Cropped=tf.slice(Step2,[0,0,0,0,0],[H,W,-1,-1,-1])
Cropped=Step2[:H,:W,:,:,:,:,:] # H,W,nTSC,nCh,MB,batch_size,MBaux
Step3a=Cropped*tf.conj(TF_addDim(Sens6))
Step3=tf.reduce_sum(Step3a,axis=[3,4]) # H,W,nTSC,batch_size,MBaux
Step3=tf.transpose(Step3,[3,0,1,2,4]) # batch_size,H,W,nTSC,MB?aux?
return Step3 # batch_size,H,W,nTSC,MB?aux?
def TS_NUFFT_OPHOP_ITS(InImage,Sens5,H,W,batch_size,paddingsY,nTSC,nCh,fftkernc5):
# InImage is batch_size,H,W,nTSC
# Sens5 is H,W,1,nCh,batch_size
# fftkernc5D is H*2,W*2,nTSC,1,1
InImage=TF_addDim(InImage) # batch_size,H,W,nTSC,1
InImage=tf.transpose(InImage,[1,2,3,4,0]) # H,W,nTSC,1,batch_size
Step1=InImage*Sens5 # H,W,nTSC,nCh,batch_size
Padded=tf.pad(Step1, paddingsY, "CONSTANT") # H*2,W*2,nTSC,nCh,batch_size
Step2=TF_fft2d_on5d(Padded)
# Step2=tf.transpose(Step2,[1,0,2,3,4])
Step2=Step2*fftkernc5
# Step2=tf.transpose(Step2,[1,0,2,3,4])
Step2=TF_ifft2d_on5d(Step2)
Cropped=tf.slice(Step2,[0,0,0,0,0],[H,W,-1,-1,-1])
Step3a=Cropped*tf.conj(Sens5)
Step3=tf.reduce_sum(Step3a,axis=[3]) # H,W,nTSC,batch_size
Step3=tf.transpose(Step3,[3,0,1,2]) # batch_size,H,W,nTSC
return Step3 # batch_size,H,W,nTSC
def TS_NUFFT_OPHOP(InImage,TSCSens,H,W,batch_size,paddingsY,nTSC,nCh,fftkernc5D,SumOver=True):
InImage=TF_3d_to_5d(InImage)
InImage=tf.transpose(InImage,[1,2,3,4,0])
Step1=tf.multiply(InImage,TSCSens)
Padded=tf.pad(Step1, paddingsY, "CONSTANT")
Step2=TF_fft2d_on5d(Padded)
# Step2=tf.transpose(Step2,[1,0,2,3,4])
Step2=tf.multiply(Step2,fftkernc5D)
# Step2=tf.transpose(Step2,[1,0,2,3,4])
Step2=TF_ifft2d_on5d(Step2)
Cropped=tf.slice(Step2,[0,0,0,0,0],[H,W,nTSC,nCh,batch_size])
Step3a=tf.multiply(Cropped,tf.conj(TSCSens))
if SumOver:
Step3=tf.reduce_sum(Step3a,axis=[2,3])
Step3=tf.transpose(Step3,[2,0,1])
return Step3
else:
return Step3a
def TS_NUFFT_OP(InImage,TSCSens,SNc,H,W,batch_size,paddingsX,nTraj,nTSC,nCh,sp_C,TSBFXc):
InImage=tf.stack([tf.stack([InImage],axis=3)],axis=4)
InImage=tf.transpose(InImage,[1,2,3,4,0])
Step1=tf.multiply(InImage,SNc)
Step1=tf.multiply(Step1,TSCSens)
Step1=tf.reshape(Step1,[H,W,nTSC*nCh*batch_size])
Padded=tf.pad(Step1, paddingsX, "CONSTANT")
Step2a=TF_fft2d_on3d(Padded)
Step2=tf.transpose(Step2a,[1,0,2])
Col=tf.reshape(Step2,[-1,nTSC*nCh*batch_size])
C=tf.sparse_tensor_dense_matmul(sp_C,Col)
CX=tf.reshape(C,[nTraj,nTSC,nCh,batch_size])
WithTSB=CX*TSBFXc
WithTSBR=tf.reduce_sum(WithTSB,axis=1)
Sig=tf.transpose(WithTSBR,[2,0,1])
return Sig
def TS_NUFFT_OP_H(Sig,TSCSens,SNc,H,W,batch_size,paddingsX,nTraj,nTSC,nCh,sp_C,TSBFXc,SumOver=True):
SigP=tf.transpose(tf.stack([Sig],axis=3),[1,3,2,0])
SWithTSB=tf.multiply(tf.conj(TSBFXc),SigP)
SWithTSB=tf.reshape(SWithTSB,[nTraj,nTSC*nCh*batch_size])
C=tf.conj(tf.sparse_tensor_dense_matmul(sp_C,tf.conj(SWithTSB),adjoint_a=True))
# C=tf.sparse_tensor_dense_matmul(sp_C,SWithTSB,adjoint_a=True)
PaddedH=tf.reshape(C,[H*2,W*2,nTSC*nCh*batch_size])
PaddedH=tf.transpose(PaddedH,[1,0,2])
Step2=TF_ifft2d_on3d(PaddedH)*H*W*2*2
Cropped=tf.slice(Step2,[0,0,0],[H,W,nTSC*nCh*batch_size])
Cropped=tf.reshape(Cropped,[H,W,nTSC,nCh,batch_size])
Step1=tf.multiply(Cropped,tf.conj(TSCSens))
Step1=tf.multiply(Step1,tf.conj(SNc))
if SumOver:
yNew=tf.reduce_sum(Step1,axis=[2,3])
yNew=tf.transpose(yNew,[2,0,1])
return yNew
else:
return Step1
# def TS_NUFFT_OP_H(Sig,TSCSens,SNc,H,W,batch_size,paddingsX,nTraj,nTSC,nCh,sp_C,TSBFXc):
# SigP=tf.transpose(tf.stack([Sig],axis=3),[1,3,2,0])
# SWithTSB=tf.multiply(tf.conj(TSBFXc),SigP)
# SWithTSB=tf.reshape(SWithTSB,[nTraj,nTSC*nCh*batch_size])
# C=tf.conj(tf.sparse_tensor_dense_matmul(sp_C,tf.conj(SWithTSB),adjoint_a=True))
# # C=tf.sparse_tensor_dense_matmul(sp_C,SWithTSB,adjoint_a=True)
# PaddedH=tf.reshape(C,[H*2,W*2,nTSC*nCh*batch_size])
# Step2=tf.transpose(tf.ifft(tf.transpose(tf.ifft(tf.transpose(PaddedH,perm=[2,0,1])),perm=[0,2,1])),perm=[1,2,0])*np.sqrt(2*2*H*W)
# Cropped=tf.slice(Step2,[0,0,0],[H,W,nTSC*nCh*batch_size])
# Cropped=tf.reshape(Cropped,[H,W,nTSC,nCh,batch_size])
# Step1=tf.multiply(Cropped,tf.conj(TSCSens))
# Step1=tf.multiply(Step1,tf.conj(SNc))
# yNew=tf.reduce_sum(Step1,axis=[2,3])
# yNew=tf.transpose(yNew,[2,0,1])
# return yNew
# def TS_NUFFT_OP(InImage,TSCSens,SNc,H,W,batch_size,paddingsX,nTraj,nTSC,nCh,sp_C,TSBFXc):
# InImage=tf.stack([tf.stack([InImage],axis=3)],axis=4)
# InImage=tf.transpose(InImage,[1,2,3,4,0])
# Step1=tf.multiply(InImage,SNc)
# Step1=tf.multiply(Step1,TSCSens)
# Step1=tf.reshape(Step1,[H,W,nTSC*nCh*batch_size])
# Padded=tf.pad(Step1, paddingsX, "CONSTANT")
# Step2=tf.transpose(tf.fft(tf.transpose(tf.fft(tf.transpose(Padded,perm=[2,0,1])),perm=[0,2,1])),perm=[1,2,0])/np.sqrt(2*2*H*W)
# Col=tf.reshape(Step2,[-1,nTSC*nCh*batch_size])
# C=tf.sparse_tensor_dense_matmul(sp_C,Col)
# CX=tf.reshape(C,[nTraj,nTSC,nCh,batch_size])
# WithTSB=CX*TSBFXc
# WithTSBR=tf.reduce_sum(WithTSB,axis=1)
# Sig=tf.transpose(WithTSBR,[2,0,1])
# return Sig
def TF_TSNUFFT_Run_TSCin(InImage,TSCin,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX):
# SNx=tf.reshape(SNx,[SNx.shape[0],SNx.shape[1],1])
InImage=InImage*TSCin
# InImage=tf.reshape(InImage,[InImage.shape[0],InImage.shape[1],1])
Step1=tf.multiply(InImage,SNc)
Padded=tf.pad(Step1, paddings, "CONSTANT")
Step2=tf.transpose(tf.fft(tf.transpose(tf.fft(tf.transpose(Padded,perm=[2,0,1])),perm=[0,2,1])),perm=[1,2,0])
# Step2=tf.fft(tf.transpose(tf.fft(Padded),perm=[1,0]))
Col=tf.reshape(Step2,[-1,nTSC*nCh])
ColR=tf.real(Col)
ColI=tf.imag(Col)
RR=tf.sparse_tensor_dense_matmul(sp_R,ColR)
RI=tf.sparse_tensor_dense_matmul(sp_R,ColI)
IR=tf.sparse_tensor_dense_matmul(sp_I,ColR)
II=tf.sparse_tensor_dense_matmul(sp_I,ColI)
R=RR-II
I=RI+IR
C=tf.complex(R,I)
# pdb.set_trace()
# CX=np.reshape(C,(nTraj,nTSC,nCh))
CX=tf.reshape(C,[nTraj,nTSC,nCh])
WithTSB=CX*TSBFX
WithTSBR=tf.reduce_sum(WithTSB,axis=1)
return WithTSBR
def TF_TSNUFFT_Run(InImage,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX):
# SNx=tf.reshape(SNx,[SNx.shape[0],SNx.shape[1],1])
InImage=tf.reshape(InImage,[InImage.shape[0],InImage.shape[1],1])
Step1=tf.multiply(InImage,SNc)
Padded=tf.pad(Step1, paddings, "CONSTANT")
Step2=tf.transpose(tf.fft(tf.transpose(tf.fft(tf.transpose(Padded,perm=[2,0,1])),perm=[0,2,1])),perm=[1,2,0])
# Step2=tf.fft(tf.transpose(tf.fft(Padded),perm=[1,0]))
Col=tf.reshape(Step2,[-1,nTSC*nCh])
ColR=tf.real(Col)
ColI=tf.imag(Col)
RR=tf.sparse_tensor_dense_matmul(sp_R,ColR)
RI=tf.sparse_tensor_dense_matmul(sp_R,ColI)
IR=tf.sparse_tensor_dense_matmul(sp_I,ColR)
II=tf.sparse_tensor_dense_matmul(sp_I,ColI)
R=RR-II
I=RI+IR
C=tf.complex(R,I)
# pdb.set_trace()
# CX=np.reshape(C,(nTraj,nTSC,nCh))
CX=tf.reshape(C,[nTraj,nTSC,nCh])
WithTSB=CX*TSBFX
WithTSBR=tf.reduce_sum(WithTSB,axis=1)
return WithTSBR
def TF_TSNUFFT_Run3(H,W,InImage,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX):
# SNx=tf.reshape(SNx,[SNx.shape[0],SNx.shape[1],1])
# InImage=tf.reshape(InImage,[InImage.shape[0],InImage.shape[1],1])
Step1=tf.multiply(InImage,SNc)
Step1=tf.reshape(Step1,[H,W,nCh*nTSC])
Padded=tf.pad(Step1, paddings, "CONSTANT")
Step2=tf.transpose(tf.fft(tf.transpose(tf.fft(tf.transpose(Padded,perm=[2,0,1])),perm=[0,2,1])),perm=[1,2,0])
# Step2=tf.fft(tf.transpose(tf.fft(Padded),perm=[1,0]))
Col=tf.reshape(Step2,[-1,nTSC*nCh])
ColR=tf.real(Col)
ColI=tf.imag(Col)
RR=tf.sparse_tensor_dense_matmul(sp_R,ColR)
RI=tf.sparse_tensor_dense_matmul(sp_R,ColI)
IR=tf.sparse_tensor_dense_matmul(sp_I,ColR)
II=tf.sparse_tensor_dense_matmul(sp_I,ColI)
R=RR-II
I=RI+IR
C=tf.complex(R,I)
# pdb.set_trace()
# CX=np.reshape(C,(nTraj,nTSC,nCh))
CX=tf.reshape(C,[nTraj,nTSC,nCh])
WithTSB=CX*TSBFX
WithTSBR=tf.reduce_sum(WithTSB,axis=1)
return WithTSBR
def TF_TSNUFFT_Prepare3(SN,Sens,TSBF,Kd,P):
nTraj=TSBF.shape[1]
nTSC=TSBF.shape[0]
InputIShape=Sens.shape[0:2]
nCh=Sens.shape[2]
# TSCX=np.reshape(TSC,np.concatenate((TSC.shape,[1]),axis=0))
SensP=np.transpose(np.reshape(Sens,np.concatenate((Sens.shape,[1]),axis=0)),(0,1,3,2))
# SensWithTSC=SensP*TSCX
# SensWithTSCX=np.reshape(SensWithTSC,(InputIShape[0],InputIShape[1],nCh*nTSC))
# SNX=np.reshape(SN,np.concatenate((SN.shape,[1]),axis=0))
SNX=NP_addDim(NP_addDim(SN))
SensWithSN=SensP*SNX
# SensWithTSCXWithSN=SensWithTSCX*SNX
# SNc=tf.constant(tf.cast(SensWithTSCXWithSN,tf.complex64))
# SNc=tf.constant(np.complex64(SensWithTSCXWithSN))
SNc=tf.constant(np.complex64(SensWithSN))
TSBFX=np.transpose(np.reshape(TSBF,(nTSC,1,nTraj)),axes=(2,0,1))
TSBFX=tf.constant(np.complex64(TSBFX))
ToPad=[Kd[0,0]-InputIShape[0],Kd[0,1]-InputIShape[1]]
paddings = tf.constant([[0, ToPad[0]], [0, ToPad[1]],[0,0]])
# paddings = tf.constant([[0, 68], [0, 60]])
Idx=scipy.sparse.find(P)
I2=np.vstack([Idx[0],Idx[1]]).T
I2=tf.constant(np.int64(I2))
ValR=tf.constant(np.float32(np.real(Idx[2])))
ValI=tf.constant(np.float32(np.imag(Idx[2])))
sp_R = tf.SparseTensor(I2, ValR, [P.shape[0],P.shape[1]])
sp_I = tf.SparseTensor(I2, ValI, [P.shape[0],P.shape[1]])
# sp_R = tf.SparseTensor(I2, tf.cast(np.real(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
# sp_I = tf.SparseTensor(I2, tf.cast(np.imag(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
return SNc,paddings,sp_R,sp_I,TSBFX
def TF_TSNUFFT_Prepare2(SN,Sens,TSC,TSBF,Kd,P):
nTraj=TSBF.shape[1]
nTSC=TSBF.shape[0]
InputIShape=Sens.shape[0:2]
nCh=Sens.shape[2]
# TSCX=np.reshape(TSC,np.concatenate((TSC.shape,[1]),axis=0))
TSCX=tf.stack([TSC],axis=3)
SensP=np.transpose(np.reshape(Sens,np.concatenate((Sens.shape,[1]),axis=0)),(0,1,3,2))
SensPT=tf.constant(np.complex64(SensP))
SensWithTSC=tf.multiply(SensPT,TSCX)
SensWithTSCX=tf.reshape(SensWithTSC,[SN.shape[0],SN.shape[1],-1])
# SensWithTSCX=np.reshape(SensWithTSC,(InputIShape[0],InputIShape[1],nCh*nTSC))
SNX=np.reshape(SN,np.concatenate((SN.shape,[1]),axis=0))
SNXT=tf.constant(np.complex64(SNX))
SensWithTSCXWithSN=SensWithTSCX*SNXT
#print('SensPT')
#print(SensPT.shape)
#print('TSCX')
#print(TSCX.shape)
#print('SensWithTSC')
#print(SensWithTSC.shape)
#print('SensWithTSCXWithSN')
#print(SensWithTSCXWithSN.shape)
# SNc=tf.constant(tf.cast(SensWithTSCXWithSN,tf.complex64))
# SNc=tf.constant(np.complex64(SensWithTSCXWithSN))
# SNc=tf.constant(SensWithTSCXWithSN)
SNc=SensWithTSCXWithSN
TSBFX=np.transpose(np.reshape(TSBF,(nTSC,1,nTraj)),axes=(2,0,1))
TSBFX=tf.constant(np.complex64(TSBFX))
ToPad=[Kd[0,0]-InputIShape[0],Kd[0,1]-InputIShape[1]]
paddings = tf.constant([[0, ToPad[0]], [0, ToPad[1]],[0,0]])
# paddings = tf.constant([[0, 68], [0, 60]])
Idx=scipy.sparse.find(P)
I2=np.vstack([Idx[0],Idx[1]]).T
I2=tf.constant(np.int64(I2))
ValR=tf.constant(np.float32(np.real(Idx[2])))
ValI=tf.constant(np.float32(np.imag(Idx[2])))
ValC=tf.constant(np.complex64(Idx[2]))
sp_R = tf.SparseTensor(I2, ValR, [P.shape[0],P.shape[1]])
sp_I = tf.SparseTensor(I2, ValI, [P.shape[0],P.shape[1]])
sp_C = tf.SparseTensor(I2, ValC, [P.shape[0],P.shape[1]])
# sp_R = tf.SparseTensor(I2, tf.cast(np.real(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
# sp_I = tf.SparseTensor(I2, tf.cast(np.imag(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
return SNc,paddings,sp_R,sp_I,TSBFX,sp_C
def TF_TSNUFFT_Prepare(SN,Sens,TSC,TSBF,Kd,P):
nTraj=TSBF.shape[1]
nTSC=TSC.shape[2]
InputIShape=Sens.shape[0:2]
nCh=Sens.shape[2]
TSCX=np.reshape(TSC,np.concatenate((TSC.shape,[1]),axis=0))
SensP=np.transpose(np.reshape(Sens,np.concatenate((Sens.shape,[1]),axis=0)),(0,1,3,2))
SensWithTSC=SensP*TSCX
SensWithTSCX=np.reshape(SensWithTSC,(InputIShape[0],InputIShape[1],nCh*nTSC))
SNX=np.reshape(SN,np.concatenate((SN.shape,[1]),axis=0))
SensWithTSCXWithSN=SensWithTSCX*SNX
# SNc=tf.constant(tf.cast(SensWithTSCXWithSN,tf.complex64))
SNc=tf.constant(np.complex64(SensWithTSCXWithSN))
TSBFX=np.transpose(np.reshape(TSBF,(nTSC,1,nTraj)),axes=(2,0,1))
TSBFX=tf.constant(np.complex64(TSBFX))
ToPad=[Kd[0,0]-InputIShape[0],Kd[0,1]-InputIShape[1]]
paddings = tf.constant([[0, ToPad[0]], [0, ToPad[1]],[0,0]])
# paddings = tf.constant([[0, 68], [0, 60]])
Idx=scipy.sparse.find(P)
I2=np.vstack([Idx[0],Idx[1]]).T
I2=tf.constant(np.int64(I2))
ValR=tf.constant(np.float32(np.real(Idx[2])))
ValI=tf.constant(np.float32(np.imag(Idx[2])))
sp_R = tf.SparseTensor(I2, ValR, [P.shape[0],P.shape[1]])
sp_I = tf.SparseTensor(I2, ValI, [P.shape[0],P.shape[1]])
# sp_R = tf.SparseTensor(I2, tf.cast(np.real(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
# sp_I = tf.SparseTensor(I2, tf.cast(np.imag(Idx[2]),tf.float32), [P.shape[0],P.shape[1]])
return SNc,paddings,sp_R,sp_I,TSBFX
def TF_NUFT(A,SN,Kd,P):
# A is data, e.g. of size H,W,nMaps
# SN should be from Fessler, .* Channel maps; so finally H,W,nMaps
# Kd is the final size for the overFT, e.g. H*2,W*2
# P is a sparse matrix of nTraj x H*W ; <101x16320 sparse matrix of type '<class 'numpy.complex128'>' with 2525 stored elements in Compressed | |
<filename>pathplanning/dijkstra.py
#!/usr/bin/env python
'''
BSD 2-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import print_function
import numpy as np
import math
import matplotlib.pyplot as plt
import pprint
def dijkstras(occupancy_map, x_spacing, y_spacing, start, goal):
"""
Implements Dijkstra's shortest path algorithm
Input:
occupancy_map - an N by M numpy array of boolean values (represented
as integers 0 and 1) that represents the locations of the obstacles
in the world
x_spacing - parameter representing spacing between adjacent columns
y_spacing - parameter representing spacing between adjacent rows
start - a 3 by 1 numpy array of (x,y,theta) for the starting position
goal - a 3 by 1 numpy array of (x,y,theta) for the finishing position
Output:
path: list of the indices of the nodes on the shortest path found
starting with "start" and ending with "end" (each node is in
metric coordinates)
"""
DEBUG = False
VISUAL = True
colormapval = (0, 8)
goal_found = False
# Setup Map Visualizations:
if VISUAL == True:
viz_map=occupancy_map
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111)
ax.set_title('Occupancy Grid')
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.imshow(viz_map, origin='upper', interpolation='none', clim=colormapval)
ax.set_aspect('equal')
plt.pause(2)
# We will use this delta function to search surrounding nodes.
delta = [[-1, 0], # go up
[0, -1], # go left
[1, 0], # go down
[0, 1]] # go right
# Each node on the map "costs" 1 step to reach.
cost = 1
# Convert numpy array of map to list of map, makes it easier to search.
occ_map = occupancy_map.tolist()
if DEBUG == True:
print("occ_map: ")
pprint.pprint(occ_map)
# Converge start and goal positions to map indices.
x = int(math.ceil((start.item(0) / x_spacing) - 0.5)) # startingx
y = int(math.ceil((start.item(1) / y_spacing) - 0.5)) # startingy
goalX = int(math.ceil((goal.item(0) / x_spacing) - 0.5))
goalY = int(math.ceil((goal.item(1) / y_spacing) - 0.5))
print("Start Pose: ", x, y)
print("Goal Pose: ", goalX, goalY)
# Make a map to keep track of all the nodes and their cost distance values.
possible_nodes = [[0 for row in range(len(occ_map[0]))] for col in range(len(occ_map))]
row = y
col = x
# Show the starting node and goal node.
# 5 looks similar to S and 6 looks similar to G.
possible_nodes[row][col] = 5
if VISUAL == True:
viz_map[row][col] = 5
viz_map[goalY][goalX] = 6
plt.imshow(viz_map, origin='upper', interpolation='none', clim=colormapval)
plt.pause(2)
if DEBUG == True:
print("Possible Nodes: ")
pprint.pprint(possible_nodes)
# The g_value will count the number of steps each node is from the start.
# Since we are at the start node, the total cost is 0.
g_value = 0
frontier_nodes = [(g_value, col, row)] # dist, x, y
searched_nodes = []
parent_node = {} # Dictionary that Maps {child node : parent node}
loopcount = 0
while len(frontier_nodes) != 0:
if DEBUG == True:
"\n>>>>>>>>>>>>LOOP COUNT: ", loopcount, "\n"
frontier_nodes.sort(reverse=True) #sort from shortest distance to farthest
current_node = frontier_nodes.pop()
if DEBUG == True:
print("current_node: ", current_node)
print("frontier nodes: ", searched_nodes)
if current_node[1] == goalX and current_node[2] == goalY:
print("Goal found!")
goal_found = True
if VISUAL == True:
plt.text(2, 10, s="Goal found!", fontsize=18, style='oblique', ha='center', va='top')
plt.imshow(viz_map, origin='upper', interpolation='none', clim=colormapval)
plt.pause(2)
break
g_value, col, row = current_node
# Check surrounding neighbors.
for i in delta:
possible_expansion_x = col + i[0]
possible_expansion_y = row + i[1]
valid_expansion = 0 <= possible_expansion_y < len(occupancy_map[0]) and 0 <= possible_expansion_x < len(occ_map)
if DEBUG == True:
print("Current expansion Node: ", possible_expansion_x, possible_expansion_y)
if valid_expansion:
try:
unsearched_node = possible_nodes[possible_expansion_y][possible_expansion_x] == 0
open_node = occ_map[possible_expansion_y][possible_expansion_x] == 0
if DEBUG == True:
print("Check Open or Wall: ", occ_map[possible_expansion_y][possible_expansion_x])
except:
unsearched_node = False
open_node = False
if unsearched_node and open_node:
# Using instead of 1 to make it easier to read This node has been searched.
# searched_row = possible_expansion_y
# searched_col = possible_expansion_x
possible_nodes[possible_expansion_y][possible_expansion_x] = 3
possible_node = (g_value + cost, possible_expansion_x, possible_expansion_y)
frontier_nodes.append(possible_node)
if DEBUG == True:
print("frontier_nodes:", frontier_nodes)
if VISUAL == True:
viz_map[possible_expansion_y][possible_expansion_x] = 3
plt.imshow(viz_map, origin='upper', interpolation='none', clim=colormapval)
plt.pause(.5)
# This now builds parent/child relationship
parent_node[possible_node] = current_node
if DEBUG == True:
print("Parent Node: \n", parent_node)
print("While Possible Nodes: ")
pprint.pprint(possible_nodes)
loopcount = loopcount+1
if goal_found == True:
print("Generating path...")
route = []
child_node = current_node
while child_node in parent_node:
route.append(parent_node[child_node])
child_node = parent_node[child_node]
route.sort()
# route back to metric units:
if DEBUG == True:
print("Route: ", route)
if VISUAL == True:
for i in range(0, len(route)):
viz_map[route[i][2]][route[i][1]] = 7
plt.imshow(viz_map, origin='upper', interpolation='none', clim=colormapval)
plt.pause(.5)
viz_map[goalY][goalX] = 7
plt.imshow(viz_map, origin='upper', interpolation='none', clim=colormapval)
plt.pause(5)
path = []
position = [start.item(0), start.item(1)] # Starting point passed in by function
path.append(position) # Add it to the list for the path
for i in range(0, len(route)):
position = [round((route[i][1]+0.5)*x_spacing, 3), round((route[i][2]+0.5)*y_spacing, 3)]
path.append(position)
# Add the goal state:
position = [goal.item(0), goal.item(1)]
path.append(position)
print("Path: ")
pprint.pprint(path)
# Convert to numpy array and return.
path = np.array(path)
return path
else:
if VISUAL == True:
plt.text(2, 10, s="No path found...", fontsize=18, style='oblique', ha='center', va='top')
plt.imshow(viz_map, origin='upper', interpolation='none', clim=colormapval)
plt.pause(5)
return False
def test():
"""
Function that provides a few examples of maps and their solution paths
"""
test_map1 = np.array([
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1]])
x_spacing1 = 0.13
y_spacing1 = 0.2
start1 = np.array([[0.3], [0.3], [0]])
goal1 = np.array([[0.6], [1], [0]])
path1 = dijkstras(test_map1,x_spacing1,y_spacing1,start1,goal1)
true_path1 = np.array([
[0.3, 0.3],
[0.325, 0.3],
[0.325, 0.5],
[0.325, 0.7],
[0.325, 0.9],
[0.325, 1.1],
[0.455, 1.1],
[0.585, 1.1],
[0.6, 1.0]
])
if np.array_equal(path1,true_path1):
print("Path 1 passes")
test_map2 = np.array([
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 0, 0, 1],
[1, 0, 0, 1, 1, 0, 0, 1],
[1, 0, 0, 1, 1, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1]])
start2 = np.array([[0.5], [1.0], [1.5707963267948966]])
goal2 = np.array([[1.1], [0.9], [-1.5707963267948966]])
x_spacing2 = 0.2
y_spacing2 = 0.2
path2 = dijkstras(test_map2,x_spacing2,y_spacing2,start2,goal2)
true_path2 = np.array([[ 0.5, 1.0], # [2, 5]
[ 0.5, 1.1], # [2, 5]
[ 0.5, 1.3], # [2, 6]
[ 0.5, 1.5], # [2, 7]
[ 0.7, 1.5], # [3, 7]
[ 0.9, 1.5], # [4, 7]
[ 1.1, 1.5], # [5, 7]
[ 1.1, 1.3], # [5, 6]
[ 1.1, 1.1], # [5, 5]
[ 1.1, 0.9] # [5, 4]
])
if np.array_equal(path2,true_path2):
print("Path 2 | |
##
# File: EntityPolymerExtractor.py
# Date: 19-Feb-2019 jdw
#
# Selected utilities to extract entity polymer mapping and feature data
# from the exchange database schema.
#
# Updates:
#
#
##
__docformat__ = "google en"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "Apache 2.0"
import copy
import logging
import os
from rcsb.db.mongo.Connection import Connection
from rcsb.db.mongo.MongoDbUtil import MongoDbUtil
from rcsb.utils.io.MarshalUtil import MarshalUtil
logger = logging.getLogger(__name__)
class EntityPolymerExtractor(object):
"""Utilities to extract polymer related data from entry and entity collections."""
def __init__(self, cfgOb, **kwargs):
self.__cfgOb = cfgOb
self.__resourceName = "MONGO_DB"
self.__mU = MarshalUtil()
self.__entryD, self.__authAsymIdIndex = self.__rebuildCache(**kwargs)
#
def __rebuildCache(self, **kwargs):
useCache = kwargs.get("useCache", True)
dirPath = kwargs.get("exdbDirPath", ".")
cacheKwargs = kwargs.get("cacheKwargs", {"fmt": "pickle"})
#
ext = "pic" if cacheKwargs["fmt"] == "pickle" else "json"
fn = "entity-polymer-extracted-data-cache" + "." + ext
cacheFilePath = os.path.join(dirPath, fn)
#
cD = {"entryD": {}, "authIdxD": {}}
try:
self.__mU.mkdir(dirPath)
if not useCache:
for fp in [cacheFilePath]:
try:
os.remove(fp)
except Exception:
pass
if useCache and cacheFilePath and os.access(cacheFilePath, os.R_OK):
cD = self.__mU.doImport(cacheFilePath, **cacheKwargs)
else:
entryD = self.__selectEntries(**kwargs)
entryD = self.__selectPolymerEntities(entryD, **kwargs)
authIdxD = self.__buildIndices(entryD)
cD["entryD"] = entryD
cD["authIdxD"] = authIdxD
if cacheFilePath:
ok = self.__mU.doExport(cacheFilePath, cD, **cacheKwargs)
logger.info("Saved entity-polymer extracted results (%d) status %r in %s", len(entryD), ok, cacheFilePath)
except Exception as e:
logger.exception("Failing with %s", str(e))
return cD["entryD"], cD["authIdxD"]
def __buildIndices(self, entryD):
indD = {}
for entryId, eD in entryD.items():
entityD = eD["selected_polymer_entities"] if "selected_polymer_entities" in eD else {}
for entityId, pD in entityD.items():
for authAsymId in pD["auth_asym_ids"]:
# avoid tuples for json serialization
# indD[(entryId, authAsymId)] = entityId
indD[entryId + "_" + authAsymId] = entityId
return indD
def getEntryCount(self):
return len(self.__entryD)
def getRefSeqAccessions(self, dbName):
acL = []
try:
for _, eD in self.__entryD.items():
entityD = eD["selected_polymer_entities"] if "selected_polymer_entities" in eD else {}
for _, pD in entityD.items():
for dD in pD["struct_ref"]:
if "pdbx_db_accession" in dD and dD["db_name"] == dbName:
acL.append(dD["pdbx_db_accession"])
return list(set(acL))
except Exception as e:
logger.exception("Failing with %s", str(e))
return acL
def countRefSeqAccessions(self, dbName):
cD = {}
try:
for _, eD in self.__entryD.items():
entityD = eD["selected_polymer_entities"] if "selected_polymer_entities" in eD else {}
for _, pD in entityD.items():
iCount = 0
for dD in pD["struct_ref"]:
if "pdbx_db_accession" in dD and dD["db_name"] == dbName:
iCount += 1
cD[iCount] = cD[iCount] + 1 if iCount in cD else 1
except Exception as e:
logger.exception("Failing with %s", str(e))
return cD
def countRefSeqAccessionDbType(self):
cD = {}
try:
for _, eD in self.__entryD.items():
entityD = eD["selected_polymer_entities"] if "selected_polymer_entities" in eD else {}
for _, pD in entityD.items():
for dD in pD["struct_ref"]:
if "pdbx_db_accession" in dD and "db_name" in dD:
cD[dD["db_name"]] = cD[dD["db_name"]] + 1 if dD["db_name"] in cD else 1
except Exception as e:
logger.exception("Failing with %s", str(e))
return cD
def countRefSeqAccessionAny(self):
cD = {}
try:
for _, eD in self.__entryD.items():
entityD = eD["selected_polymer_entities"] if "selected_polymer_entities" in eD else {}
for _, pD in entityD.items():
iCount = len(pD["struct_ref"])
# if iCount == 0:
# logger.info("entryId %r " % (entryId, entityId))
cD[iCount] = cD[iCount] + 1 if iCount in cD else 1
except Exception as e:
logger.exception("Failing with %s", str(e))
return cD
def getUniqueTaxons(self):
#
tD = {}
try:
for _, eD in self.__entryD.items():
entityD = eD["selected_polymer_entities"] if "selected_polymer_entities" in eD else {}
for _, pD in entityD.items():
# logger.info("Entity dictionary %r", pD.keys())
if "rcsb_entity_source_organism" in pD:
for dd in pD["rcsb_entity_source_organism"]:
if "ncbi_taxonomy_id" in dd:
tD[dd["ncbi_taxonomy_id"]] = tD[dd["ncbi_taxonomy_id"]] + 1 if dd["ncbi_taxonomy_id"] in tD else 1
except Exception as e:
logger.exception("Failing with %s", str(e))
logger.info("Taxon coverage %d", len(tD))
return tD
def getOrigTaxons(self):
#
tD = {}
try:
for entryId, eD in self.__entryD.items():
entityD = eD["selected_polymer_entities"] if "selected_polymer_entities" in eD else {}
for entityId, pD in entityD.items():
# logger.info("Entity dictionary %r", pD.keys())
if "original_taxonomy_ids" in pD:
for tV in pD["original_taxonomy_ids"]:
tD.setdefault(entryId, []).append((entityId, tV))
if entryId not in tD:
logger.debug("No taxonomy for %s", entryId)
except Exception as e:
logger.exception("Failing with %s", str(e))
logger.info("Taxon coverage %d", len(tD))
return tD
def countRefSeqAccessionByTaxon(self, dbNameList=None):
#
tD = {}
iCount = 0
#
try:
for _, eD in self.__entryD.items():
entityD = eD["selected_polymer_entities"] if "selected_polymer_entities" in eD else {}
for _, pD in entityD.items():
# logger.info("Entity dictionary %r", pD.keys())
if "rcsb_entity_source_organism" in pD:
for dd in pD["rcsb_entity_source_organism"]:
if "ncbi_taxonomy_id" in dd:
tId = dd["ncbi_taxonomy_id"]
for dD in pD["struct_ref"]:
if "pdbx_db_accession" in dD and "db_name" in dD:
if dD["db_name"] in dbNameList:
tD.setdefault(tId, []).append(dD["pdbx_db_accession"])
iCount += 1
except Exception as e:
logger.exception("Failing with %s", str(e))
logger.info("Total observed accessions %d", iCount)
return tD
def checkRefSeqAlignRange(self, dbName):
ok = True
try:
eCount = 0
aCount = 0
tCount = 0
for entryId, eD in self.__entryD.items():
entityD = eD["selected_polymer_entities"] if "selected_polymer_entities" in eD else {}
for entityId, pD in entityD.items():
for dD in pD["struct_ref"]:
if "db_name" in dD and dD["db_name"] == dbName:
if "pdbx_db_accession" in dD and "alignD" in dD and "pdbx_seq_one_letter_code" in dD and "pdbx_align_begin" in dD:
seqLen = len(dD["pdbx_seq_one_letter_code"])
dbBegin = 100000000
dbEnd = -1
refSeqDbBegin = dD["pdbx_align_begin"]
for authAsymId, alDL in dD["alignD"].items():
tCount += 1
difL = []
for alD in alDL:
tBeg = alD["db_align_beg"]
tEnd = alD["db_align_end"]
tDif = tEnd - tBeg + 1
difL.append(tDif)
dbBegin = min(tBeg, dbBegin)
dbEnd = max(tEnd, dbEnd)
# range is calculate on off -
# if seqLen < dbEnd - dbBegin + 1:
if seqLen < dbEnd - dbBegin and not refSeqDbBegin == dbBegin:
fDif = sum(difL)
logger.debug(
"Bad alignment for %r %r %r %r (%d) seqLen %r (%d) dbBegin %r dbEnd %r difL %r tDif %r",
entryId,
entityId,
authAsymId,
alD["pdbx_strand_id"],
len(alDL),
seqLen,
dbEnd - dbBegin + 1,
dbBegin,
dbEnd,
difL,
fDif,
)
aCount += 1
else:
eCount += 1
logger.info("Incomplete %s struct_ref record count %d", dbName, eCount)
logger.info("Inconsistent %s db reference alignments %d/%d", dbName, aCount, tCount)
except Exception as e:
logger.exception("Failing with %s", str(e))
ok = False
return ok
def getEntityRefSeqAccessions(self, dbName, entryId, entityId):
acL = []
try:
dL = self.__entryD[entryId]["selected_polymer_entities"][entityId]["struct_ref"]
acL = list(set([d["pdbx_db_accession"] for d in dL if d["db_name"] == dbName]))
except Exception as e:
logger.exception("Failing with %s %r %r %s", dbName, entryId, entityId, str(e))
return acL
def __selectEntries(self, **kwargs):
"""Return a dictionary of PDB entries satifying the input conditions (e.g. method, resolution limit)"""
dbName = kwargs.get("dbName", "pdbx_core")
collectionName = kwargs.get("collectionName", "pdbx_core_entry")
selectionQueryD = kwargs.get("entrySelectionQuery", {})
#
entryD = {}
try:
with Connection(cfgOb=self.__cfgOb, resourceName=self.__resourceName) as client:
mg = MongoDbUtil(client)
if mg.collectionExists(dbName, collectionName):
logger.info("%s %s document count is %d", dbName, collectionName, mg.count(dbName, collectionName))
qD = {}
if selectionQueryD:
qD.update(qD)
selectL = ["rcsb_entry_container_identifiers"]
dL = mg.fetch(dbName, collectionName, selectL, queryD=qD)
logger.info("Selection %r fetch result count %d", selectL, len(dL))
#
for dD in dL:
#
if (
("rcsb_entry_container_identifiers" in dD)
and ("entry_id" in dD["rcsb_entry_container_identifiers"])
and ("polymer_entity_ids" in dD["rcsb_entry_container_identifiers"])
and dD["rcsb_entry_container_identifiers"]["polymer_entity_ids"]
):
entryD[dD["rcsb_entry_container_identifiers"]["entry_id"]] = {"polymer_entity_ids": dD["rcsb_entry_container_identifiers"]["polymer_entity_ids"]}
except Exception as e:
logger.exception("Failing with %s", str(e))
return entryD
#
def __selectPolymerEntities(self, entryD, **kwargs):
"""Skeleton entity selector recovering essential biological sequence mapping features
for macromolecules (default type = protein).
"1CP9": {
"polymer_entity_ids": [
"1",
"2"
],
"selected_polymer_entities": {
"1": {
"rcsb_multiple_source_flag": "N",
"asym_ids": [
"A"
],
"auth_asym_ids": [
"A"
],
"entity_id": "1",
"type": "polypeptide(L)",
"rcsb_entity_polymer_type": "Protein",
"rcsb_entity_source_organism": [
{
"ncbi_taxonomy_id": 587,
"beg_seq_num": 1,
"end_seq_num": 205,
"ncbi_scientific_name": "<NAME>"
}
],
"struct_ref": [
{
"id": "1",
"db_name": "UNP",
"pdbx_db_accession": "Q7WZI9",
"entity_id": "1",
"pdbx_seq_one_letter_code": "QSTQIKIERDNYGVPHIYANDTYSLFYGYGYA...",
"alignD": {
"A": [
{
"align_id": "1",
"ref_id": "1",
"pdbx_PDB_id_code": "1CP9",
"pdbx_strand_id": "A",
"seq_align_beg": 1,
"seq_align_end": 205,
"pdbx_db_accession": "Q7WZI9",
"db_align_beg": 24,
"db_align_end": 228,
"pdbx_auth_seq_align_beg": "1",
"pdbx_auth_seq_align_end": "205",
"rcsb_entity_id": "1"
}
]
}
}
]
},
"2": {
"rcsb_multiple_source_flag": "N",
"asym_ids": [
"B"
],
"auth_asym_ids": [
"B"
],
"entity_id": "2",
"type": "polypeptide(L)",
"rcsb_entity_polymer_type": "Protein",
"rcsb_entity_source_organism": [
{
"ncbi_taxonomy_id": 587,
"beg_seq_num": 1,
"end_seq_num": 553,
"ncbi_scientific_name": "<NAME>"
}
],
"struct_ref": [
{
"id": "2",
"db_name": "UNP",
"pdbx_db_accession": "Q7WZI9",
"entity_id": "2",
"pdbx_seq_one_letter_code": "SNVWLVGKTKASGAKAILLNGPQFGWFNPAYTYGIGLHG",
"alignD": {
"B": [
{
"align_id": "2",
"ref_id": "2",
"pdbx_PDB_id_code": "1CP9",
"pdbx_strand_id": "B",
"seq_align_beg": 1,
"seq_align_end": 553,
"pdbx_db_accession": "Q7WZI9",
"db_align_beg": 285,
"db_align_end": 837,
"pdbx_auth_seq_align_beg": "1",
"pdbx_auth_seq_align_end": "553",
"rcsb_entity_id": "2"
}
]
}
}
]
}
}
},
"""
dbName = kwargs.get("dbName", "pdbx_core")
collectionName = kwargs.get("collectionName", "pdbx_core_polymer_entity")
resultKey = kwargs.get("resultKey", "selected_polymer_entities")
entryLimit = kwargs.get("entryLimit", None)
selectionQueryD = kwargs.get("entitySelectionQuery", {"entity_poly.rcsb_entity_polymer_type": "Protein"})
| |
<filename>tests/basic_deployment.py<gh_stars>0
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import swiftclient
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG,
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
class SwiftStorageBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic swift-storage deployment."""
def __init__(self, series, openstack=None, source=None, stable=False):
"""Deploy the entire test environment."""
super(SwiftStorageBasicDeployment, self).__init__(series, openstack,
source, stable)
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
u.log.info('Waiting on extended status checks...')
exclude_services = []
# Wait for deployment ready msgs, except exclusions
self._auto_wait_for_status(exclude_services=exclude_services)
self.d.sentry.wait()
self._initialize_tests()
def _add_services(self):
"""Add services
Add the services that we're testing, where swift-storage is local,
and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
this_service = {'name': 'swift-storage'}
other_services = [
{'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},
{'name': 'keystone'},
{'name': 'glance'},
{'name': 'swift-proxy'}
]
super(SwiftStorageBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'keystone:shared-db': 'percona-cluster:shared-db',
'swift-proxy:identity-service': 'keystone:identity-service',
'swift-storage:swift-storage': 'swift-proxy:swift-storage',
'glance:identity-service': 'keystone:identity-service',
'glance:shared-db': 'percona-cluster:shared-db',
'glance:object-store': 'swift-proxy:object-store'
}
super(SwiftStorageBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {
'admin-password': '<PASSWORD>',
'admin-token': '<PASSWORD>',
}
swift_proxy_config = {
'zone-assignment': 'manual',
'replicas': '1',
'swift-hash': 'fdfef9d4-8b06-11e2-8ac0-531c923c8fae',
}
swift_storage_config = {
'zone': '1',
'block-device': 'vdb',
'overwrite': 'true',
}
pxc_config = {
'dataset-size': '25%',
'max-connections': 1000,
'root-password': '<PASSWORD>',
'sst-password': '<PASSWORD>',
}
configs = {
'keystone': keystone_config,
'swift-proxy': swift_proxy_config,
'swift-storage': swift_storage_config,
'percona-cluster': pxc_config,
}
super(SwiftStorageBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.pxc_sentry = self.d.sentry['percona-cluster'][0]
self.keystone_sentry = self.d.sentry['keystone'][0]
self.glance_sentry = self.d.sentry['glance'][0]
self.swift_proxy_sentry = self.d.sentry['swift-proxy'][0]
self.swift_storage_sentry = self.d.sentry['swift-storage'][0]
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Authenticate admin with keystone
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='<PASSWORD>',
tenant='admin')
# Authenticate admin with glance endpoint
self.glance = u.authenticate_glance_admin(self.keystone)
# Authenticate swift user
keystone_relation = self.keystone_sentry.relation(
'identity-service', 'swift-proxy:identity-service')
ep = self.keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
self.swift = swiftclient.Connection(
authurl=ep,
user=keystone_relation['service_username'],
key=keystone_relation['service_password'],
tenant_name=keystone_relation['service_tenant'],
auth_version='2.0')
# Create a demo tenant/role/user
self.demo_tenant = 'demoTenant'
self.demo_role = 'demoRole'
self.demo_user = 'demoUser'
if not u.tenant_exists(self.keystone, self.demo_tenant):
tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
description='demo tenant',
enabled=True)
self.keystone.roles.create(name=self.demo_role)
self.keystone.users.create(name=self.demo_user,
password='password',
tenant_id=tenant.id,
email='<EMAIL>')
# Authenticate demo user with keystone
self.keystone_demo = \
u.authenticate_keystone_user(self.keystone, user=self.demo_user,
password='password',
tenant=self.demo_tenant)
def test_100_services(self):
"""Verify the expected services are running on the corresponding
service units."""
u.log.debug('Checking system services...')
swift_storage_services = ['swift-account',
'swift-account-auditor',
'swift-account-reaper',
'swift-account-replicator',
'swift-container',
'swift-container-auditor',
'swift-container-replicator',
'swift-container-sync',
'swift-container-updater',
'swift-object',
'swift-object-auditor',
'swift-object-replicator',
'swift-object-updater']
service_names = {
self.keystone_sentry: ['keystone'],
self.glance_sentry: ['glance-registry',
'glance-api'],
self.swift_proxy_sentry: ['swift-proxy'],
self.swift_storage_sentry: swift_storage_services
}
if self._get_openstack_release() >= self.trusty_liberty:
service_names[self.keystone_sentry] = ['apache2']
ret = u.validate_services_by_name(service_names)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_102_users(self):
"""Verify all existing roles."""
u.log.debug('Checking keystone users...')
user1 = {'name': 'demoUser',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': '<EMAIL>'}
user2 = {'name': 'admin',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'}
user3 = {'name': 'glance',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': u'juju@localhost'}
user4 = {'name': 's3_swift',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': u'juju@localhost'}
expected = [user1, user2, user3, user4]
actual = self.keystone.users.list()
ret = u.validate_user_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_104_keystone_service_catalog(self):
"""Verify that the service catalog endpoint data is valid."""
u.log.debug('Checking keystone service catalog...')
endpoint_id = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url,
'id': u.not_null}
expected = {'image': [endpoint_id], 'object-store': [endpoint_id],
'identity': [endpoint_id], 's3': [endpoint_id]}
actual = self.keystone_demo.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_106_swift_object_store_endpoint(self):
"""Verify the swift object-store endpoint data."""
u.log.debug('Checking keystone endpoint for swift object store...')
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '8080'
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
message = 'object-store endpoint: {}'.format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_200_swift_storage_swift_storage_relation(self):
"""Verify the swift-storage to swift-proxy swift-storage relation
data."""
u.log.debug('Checking swift:swift-proxy swift-storage relation...')
unit = self.swift_storage_sentry
relation = ['swift-storage', 'swift-proxy:swift-storage']
expected = {
'account_port': '6002',
'zone': '1',
'object_port': '6000',
'container_port': '6001',
'private-address': u.valid_ip,
'device': 'vdb'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('swift-storage swift-storage', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_202_swift_proxy_swift_storage_relation(self):
"""Verify the swift-proxy to swift-storage swift-storage relation
data."""
u.log.debug('Checking swift-proxy:swift swift-storage relation...')
unit = self.swift_proxy_sentry
relation = ['swift-storage', 'swift-storage:swift-storage']
expected = {
'private-address': u.valid_ip,
'trigger': u.not_null,
'rings_url': u.valid_url,
'swift_hash': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('swift-proxy swift-storage', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_300_swift_config(self):
"""Verify the data in the swift-hash section of the swift config
file."""
u.log.debug('Checking swift config...')
unit = self.swift_storage_sentry
conf = '/etc/swift/swift.conf'
swift_proxy_relation = self.swift_proxy_sentry.relation(
'swift-storage', 'swift-storage:swift-storage')
expected = {
'swift_hash_path_suffix': swift_proxy_relation['swift_hash']
}
ret = u.validate_config_data(unit, conf, 'swift-hash', expected)
if ret:
message = "swift config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_302_account_server_config(self):
"""Verify the data in the account server config file."""
u.log.debug('Checking swift account-server config...')
unit = self.swift_storage_sentry
conf = '/etc/swift/account-server.conf'
expected = {
'DEFAULT': {
'bind_ip': '0.0.0.0',
'bind_port': '6002',
'workers': '1'
},
'pipeline:main': {
'pipeline': 'recon account-server'
},
'filter:recon': {
'use': 'egg:swift#recon',
'recon_cache_path': '/var/cache/swift'
},
'app:account-server': {
'use': 'egg:swift#account'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "account server config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_304_container_server_config(self):
"""Verify the data in the container server config file."""
u.log.debug('Checking swift container-server config...')
unit = self.swift_storage_sentry
conf = '/etc/swift/container-server.conf'
expected = {
'DEFAULT': {
'bind_ip': '0.0.0.0',
'bind_port': '6001',
'workers': '1'
},
'pipeline:main': {
'pipeline': 'recon container-server'
},
'filter:recon': {
'use': 'egg:swift#recon',
'recon_cache_path': '/var/cache/swift'
},
'app:container-server': {
'use': 'egg:swift#container',
'allow_versions': 'true'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "container server config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_306_object_server_config(self):
"""Verify the data in the object server config file."""
u.log.debug('Checking swift object-server config...')
unit = self.swift_storage_sentry
conf = '/etc/swift/object-server.conf'
expected = {
'DEFAULT': {
'bind_ip': '0.0.0.0',
'bind_port': '6000',
'workers': '1'
},
'pipeline:main': {
'pipeline': 'recon object-server'
},
'filter:recon': {
'use': 'egg:swift#recon',
'recon_cache_path': '/var/cache/swift'
},
'app:object-server': {
'use': 'egg:swift#object',
'threads_per_disk': '4'
},
'object-replicator': {
'concurrency': '1'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "object server config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_400_swift_backed_image_create(self):
"""Create an instance in glance, which is backed by swift, and validate
that some of the metadata for the image match in glance and swift."""
u.log.debug('Checking swift objects and containers with a '
'swift-backed glance image...')
# Create swift-backed glance image
img_new = u.create_cirros_image(self.glance, "cirros-image-1")
img_id = img_new.id
img_md5 = img_new.checksum
img_size = img_new.size
# Validate that swift object's checksum/size match that from glance
headers, containers = self.swift.get_account()
if len(containers) != 1:
msg = "Expected 1 swift container, found {}".format(
len(containers))
amulet.raise_status(amulet.FAIL, msg=msg)
container_name = containers[0].get('name')
headers, objects = self.swift.get_container(container_name)
if len(objects) != 1:
msg = "Expected 1 swift object, found {}".format(len(objects))
amulet.raise_status(amulet.FAIL, msg=msg)
swift_object_size = objects[0].get('bytes')
swift_object_md5 = objects[0].get('hash')
if img_size != swift_object_size:
msg = "Glance image size {} != swift object size {}".format(
img_size, swift_object_size)
amulet.raise_status(amulet.FAIL, msg=msg)
if img_md5 != swift_object_md5:
msg = "Glance image hash {} != swift object hash {}".format(
img_md5, swift_object_md5)
amulet.raise_status(amulet.FAIL, msg=msg)
# Cleanup
u.delete_resource(self.glance.images, img_id, msg="glance image")
u.log.info('OK')
def test_900_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config
is changed."""
u.log.info('Checking that conf files and system services respond '
'to a charm config change...')
sentry = self.swift_storage_sentry
juju_service = 'swift-storage'
# Expected default and alternate values
set_default = {'object-server-threads-per-disk': '4'}
set_alternate = {'object-server-threads-per-disk': '2'}
# Config file affected by juju set config change, and
# services which are expected to restart upon config change
services = {'swift-object-server': 'object-server.conf',
'swift-object-auditor': 'object-server.conf',
'swift-object-replicator': 'object-server.conf',
'swift-object-updater': 'object-server.conf'}
# Make config change, check for service restarts
u.log.debug('Making | |
FUN = FUN + Data[items[int(o)]]['sym'] + '+'
NAM = NAM + Data[items[int(o)]]['sym'] + '_'
print(' Function ' + FUN + ' is going to be propagated within PPDDM.')
# Parsing and selecting variable dependencies.
fun = parse_expr(FUN, transformations=transformations)
listvar = list(VariableExtractor(FUN))
v = {}; f = {}; f['fun'] = FUN; d = {}
for p in range(0, len(listvar)):
v[listvar[p]] = sp.symbols(listvar[p])
# Reading required data values.
for q in range(0, len(listvar)):
for r in range(0, len(Data)):
if Data[list(Data.keys())[r]]['sym'] == list(v.keys())[q]:
d[list(v.keys())[q]] = Data[list(Data.keys())[r]]
# Expression of U, total uncertainty of the function.
c = len(listvar); U = sp.symbols('U'); U = 0; u = sp.symbols('u')
for s in range(0, c):
if d[list(v.keys())[s]]['unc'][0] != 0:
U = U + (sp.diff(f['fun'], v[list(v.keys())[s]])) ** 2 * \
(u(v[list(v.keys())[s]])) ** 2
f['ufn'] = sp.simplify(U); del U; f['fun'] = sp.simplify(fun)
# Eval function all over the data.
dat = MAT[:, [0]]; uBd = MAT[:, [1]]
f['dat'] = float(sum(sum(dat))/aux3)
f['unc'] = float(np.sqrt((1/(aux3**2))*sum(sum(uBd**2))))
UncPrint(f['dat'],f['unc'])
# Creating the new variable.
NAM = 'mean_' + NAM
print(' New variable was created: ' + NAM)
SYM = '\\langle ' + SYM + ' \\rangle'
NEW = {'dat': np.array([f['dat']]), 'unc': np.array([f['unc']])}
NEW['sym'] = SYM; NEW['uni'] = UNI
StoreVar(NEW, NAM, ppath, 'Data')
with open(ppath + 'Statistics' + '.txt', 'a') as aux:
aux.write(separ + 'x' + separ + '\n\n')
aux.write('No que segue propagamos os datos de $' + SYM + '$')
aux.write(',\n\\[ ' + SYM + ' = ' + sp.latex(f['fun']) + ' \\],\n')
aux.write('mediante o método de derivadas parciais coa expresión\n')
aux.write('\\[u^2(' + SYM + ')=' + sp.latex(f['ufn']) + '\\]')
aux.write('\nObtemos, xa que logo, os seguintes resultados:\n')
# Error case
else:
print(' Invalid Mode!')
except:
print(' Cannot finish the job! Maybe you missselected the variables.')
###############################################################################
def StaReadSelect(self):
itemsList = self.StaVariableSelect.selectedItems()
selection = []
# Reading all selected items.
for item in itemsList:
selection.append(item.text())
return selection
###############################################################################
##########################################################################################
##########################################################################################
# Uncertainty propagator. ################################################################
def ProPropagateButton(self):
print('')
cprint(" Uncertainty propagator " + sectspa, \
'white', 'on_blue', attrs=['bold'], file=sys.stderr)
ppath = self.DirectoryName()
FUN = str(self.ProFunctionBox.text())
fun = parse_expr(FUN, transformations=transformations)
NAM = str(self.ProVariableNameBox.text())
SYM = str(self.ProSymbolicNameBox.text())
UNI = str(self.ProVariableUnitsBox.text())
listvar = list(VariableExtractor(FUN))
v = {}; f = {}; f['fun'] = FUN; d = {}
for k in range(0, len(listvar)):
v[listvar[k]] = sp.symbols(listvar[k])
# Leo Data
Data = LoadVar(ppath, 'Data')
for l in range(0, len(listvar)):
for m in range(0, len(Data)):
if Data[list(Data.keys())[m]]['sym'] == list(v.keys())[l]:
d[list(v.keys())[l]] = Data[list(Data.keys())[m]]
# Expression of U, total uncertainty of the function
c = len(listvar); U = sp.symbols('U'); U = 0; u = sp.symbols('u')
for n in range(0, c):
if d[list(v.keys())[n]]['unc'][0] != 0:
U = U + (sp.diff(f['fun'], v[list(v.keys())[n]])) ** 2 * \
(u(v[list(v.keys())[n]])) ** 2
f['ufn'] = sp.simplify(U); del U; f['fun'] = fun
# Eval function all over the data.
f['dat'] = np.zeros(len(d[list(v.keys())[0]]['dat']), )
f['unc'] = np.zeros(len(d[list(v.keys())[0]]['dat']), )
for p in range(0, len(f['dat'])):
aux1 = {}; aux2 = {}
for o in range(0, c):
aux1[v[list(v.keys())[o]]] = d[list(v.keys())[o]]['dat'][p]
aux2[u(v[list(v.keys())[o]])] = d[list(v.keys())[o]]['unc'][p]
f['dat'][p] = float(f['fun'].subs(aux1))
aux3 = f['ufn'].subs(aux2)
f['unc'][p] = np.sqrt(float(aux3.subs(aux1)))
# Creating the new variable.
NEW = {'dat': f['dat'], 'unc': f['unc'], 'sym': SYM, 'uni': UNI}
StoreVar(NEW, NAM, ppath, 'Data')
with open(ppath + 'Propagator' + '.txt', 'a') as aux:
aux.write('\n' + separ + 'x' + separ + '\n\n')
aux.write('No que segue propagamos os datos de $' + SYM + '$')
aux.write(',\n\\[ ' + SYM + ' = ' + sp.latex(f['fun']) + ' \\],\n')
aux.write('mediante o método de derivadas parciais coa expresión\n')
aux.write('\\[u^2(' + SYM + ')=' + sp.latex(f['ufn']) + '\\]')
aux.write('\nObtemos, xa que logo, os seguintes resultados:')
##########################################################################################
##########################################################################################
# Test de KaiSqr. ########################################################################
def KaySqrButton(self):
print('')
cprint(" KaiSqr hypothesis test " + sectspa, \
'white', 'on_blue', attrs=['bold'], file=sys.stderr)
ppath = self.DirectoryName()
# PHASE 1 - Subs parameter values in FUN
FUN = str(self.KaiDependentBox.text())
fun = parse_expr(FUN, transformations=transformations); f={}; f['fun'] = fun
print(f)
# Reading parameters (b).
rows = self.KaiParameterTable.rowCount(); null = 0; b = {}
for l in range(0, rows):
if self.KaiParameterTable.item(l, 0) is None:
null = null + 1
else:
try:
try:
b[str(self.KaiParameterTable.item(l, 0).text())] = float(
self.KaiParameterTable.item(l, 1).text())
except ValueError:
null = null + 1
except AttributeError:
null = null + 1
print(b) #OK
FUN = str(f['fun'].subs(b))
print(f['fun'].subs(b))
print(FUN)
print('aqui')
AUX = list(VariableExtractor(FUN))
YFN = str(self.KaiIndependentBox.text())
FUN = '(' + YFN + '-(' + FUN + '))'
fun = parse_expr(FUN, transformations=transformations); print(fun)
listvar = list(VariableExtractor(FUN))
v = {}; f = {}; f['fun'] = FUN; d = {}
for k in range(0, len(listvar)):
v[listvar[k]] = sp.symbols(listvar[k])
# Reading Data.
try:
Data = LoadVar(ppath, 'Data')
for l in range(0, len(listvar)):
for m in range(0, len(Data)):
if Data[list(Data.keys())[m]]['sym'] == list(v.keys())[l]:
d[list(v.keys())[l]] = Data[list(Data.keys())[m]]
# Expression of U, total uncertainty of the function
c = len(listvar); f['fun'] = fun; print(f['fun'])
# Eval function all over the data.
f['dat'] = np.zeros(len(d[list(v.keys())[0]]['dat']), )
except:
print('Error loading database. Check its existence.')
try:
for p in range(0, len(f['dat'])):
aux1 = {}; aux2 = {}
for o in range(0, c):
aux1[v[list(v.keys())[o]]] = d[list(v.keys())[o]]['dat'][p]
f['dat'][p] = float(f['fun'].subs(aux1))
except:
print('# Something went wrong, maybe array sizes? Check that, and try again.')
print(' Following results may not be correct because of data error.')
print(f)
try:
rKai = sum((f['dat']/d[YFN]['unc'])**2); cl = float(self.KaiConfidenceBox.text()); print(rKai)
pKai = stats.chi2.ppf(cl,len(f['dat'])-len(b.keys()))
print(' Testing at ' + str(cl) + 'confidence level.')
with open(ppath + 'KaiSqr' + '.txt', 'a') as aux:
aux.write('\n' + separ + 'x' + separ + '\n\n')
aux.write('Non obstante, cómpre ver mediante, por exemplo, un test--')
aux.write('$\\chi ^2$, que o axuste é satisfactorio. Para iso imos ')
aux.write('supoñer que os nosos puntos se axustan a $' + str(YFN) + ' = f(')
aux.write(str(AUX[0]) + ')$ para o conxunto $\\lbrace ' + str(AUX[0]) + '_i,')
aux.write(str(YFN) + '_i \\rbrace _{i=1}^n$ dos nosos ' + str(len(f['dat'])))
aux.write(' valores, e que a distribución nai das medidas $' + str(YFN) + '$')
aux.write(' é gaussiana, posto que en xeral esta é a distribución que ')
aux.write('goberna os procesos de medida. Daquela é esperable que \n')
aux.write('\\[z_i = \\frac{' + str(YFN) + ' _i -\\bar{' + str(YFN) + ' _i }}')
aux.write('{\\sigma_i}\\] \n sexa $z_i \in N(0,1)$ de tal modo que podemos ')
aux.write('agardar que, \n \\[\\chi ^2 = \\sum_{i=1}^n \\frac{(' + str(YFN))
aux.write('_i -\\bar{' + str(YFN) + '}_i)^2}{\\sigma_i^2} \\] \n')
aux.write('responda a unha distribución $\chi^2$ de Pearson. Asumimos agora ')
aux.write('que, a primeira orde, os valores que medimos DE ALGO ')
aux.write('coinciden coas predicións da lei teórica e polo tanto é licita a ')
aux.write('aproximación $\\bar{'+str(YFN)+'_i} \\approx f('+str(AUX[0])+')$.')
aux.write(' Ademais, dado que temos incertezas variables nas medidas DE ')
aux.write('ALGO, tomamos por válido que $\\sigma_i^2 \\approx u^2('+str(YFN))
aux.write('_i )$ polo que, conseguintemente,\n\\begin{equation}\n')
aux.write('\\chi ^2 = \\sum_{i=1}^n \\frac{(' +str(YFN)+' -f('+ str(AUX[0]))
aux.write('))^2}{u^2(' + str(YFN) + ')}.\n \\end{equation}')
if rKai>=pKai:
print('# WARNING! Your fitting model is not a valid one.')
aux.write('Partimos dos ' + str(len(f['dat'])) + ' valores medidos no ')
aux.write('laboratorio e como temos '+str(len(b.keys()))+ ' parámetros ')
aux.write('que determinamos mediante o axuste, temos ao final ')
aux.write(str(len(f['dat'])-len(b.keys())) + ' graos de liberdade que ')
aux.write('gobernan a distribución de Pearson. Practicamos un test—')
aux.write('$\chi^2$ cun nivel de confianza de')
aux.write(str(float(cl)) + 'polo que empregamos o')
aux.write(' percentil $\chi_{' + str(1 - float(cl)))
aux.write(';' + str(len(f['dat']) - len(b.keys())) + '}^2$.')
aux.write('Se comparamos este valor co obtido coa suma de cadrados, ')
aux.write('concluímos que, como \n\\[ \chi_{')
aux.write(str(1 - float(self.KaiConfidenceBox.text())) + ';')
aux.write(str(len(f['dat']) - len(b.keys())) + '}^2 = ' + str(pKai) + '<')
aux.write(str(rKai) + '\\]\n e rexeitamos por tanto a hipótese proposta ')
aux.write('a este nivel de confianza \n\n COMENTAR CONCLUSIÓNS')
elif rKai<pKai:
print('# Your model is OK: ' + str(rKai) + ' < ' + str(pKai))
aux.write('Partimos dos ' + str(len(f['dat'])) + ' valores medidos no ')
aux.write('laboratorio e como temos '+str(len(b.keys()))+ ' parámetros ')
aux.write('que determinamos mediante o axuste, temos ao final ')
aux.write(str(len(f['dat'])-len(b.keys())) + ' graos de liberdade que ')
aux.write('gobernan a distribución de Pearson. Practicamos un test—')
aux.write('$\chi^2$ cun nivel de confianza de ')
aux.write(str(cl)+' polo que | |
import os
import subprocess
from functools import partial
from pathlib import Path
from typing import Iterable, Union, List, Dict, Optional
import cv2
import tensorflow as tf
import torch
import yaml
from modelci.hub.client.onnx_client import CVONNXClient
from modelci.hub.client.tfs_client import CVTFSClient
from modelci.hub.client.torch_client import CVTorchClient
from modelci.hub.client.trt_client import CVTRTClient
from modelci.hub.converter import TorchScriptConverter, TFSConverter, TRTConverter, ONNXConverter
from modelci.hub.utils import parse_path, generate_path, TensorRTPlatform
from modelci.persistence.service import ModelService
from modelci.types.bo import IOShape, Task, Metric, ModelVersion, Engine, Framework, Weight, DataType, ModelBO
__all__ = ['get_remote_model_weight', 'register_model', 'register_model_from_yaml', 'retrieve_model',
'retrieve_model_by_task']
def register_model(
origin_model,
dataset: str,
metric: Dict[Metric, float],
task: Task,
inputs: List[IOShape],
outputs: List[IOShape],
model_input: Optional[List] = None,
architecture: str = None,
framework: Framework = None,
engine: Engine = None,
version: ModelVersion = None,
convert=True,
profile=True,
):
"""Upload a model to ModelDB.
This function will upload the given model into the database with some variation. It may optionally generate a
branch of models (i.e. model family) with different optimization techniques. Besides, a benchmark will be
scheduled for each generated model, in order to gain profiling results for model selection strategies.
In the `no_generate` model(i.e. `no_generate` flag is set to be `True`), `architecture`, `framework`, `engine`
and `version` could be None. If any of the above arguments is `None`, all of them will be auto induced
from the origin_model path. An `ValueError` will be raised if the mata info cannot be induced.
TODO:
This function has a super comprehensive logic, need to be simplified.
Arguments:
origin_model: The uploaded model without optimization. When `no_generate` flag is set, this parameter should
be a str indicating model file path.
architecture (str): Model architecture name. Default to None.
framework (Framework): Framework name. Default to None.
version (ModelVersion): Model version. Default to None.
dataset (str): Model testing dataset.
metric (Dict[Metric,float]): Scoring metric and its corresponding score used for model evaluation
task (Task): Model task type.
inputs (Iterable[IOShape]): Model input tensors.
outputs (Iterable[IOShape]): Model output tensors.
model_input: specify sample model input data
TODO: specify more model conversion related params
engine (Engine): Model optimization engine. Default to `Engine.NONE`.
convert (bool): Flag for generation of model family. When set, `origin_model` should be a path to model saving
file. Default to `True`.
profile (bool): Flag for profiling uploaded (including converted) models. Default to `False`.
"""
from modelci.controller import job_executor
from modelci.controller.executor import Job
model_dir_list = list()
# type and existence check
if isinstance(origin_model, str):
model_dir = Path(origin_model).absolute()
assert model_dir.exists(), f'model weight does not exist at {origin_model}'
if all([architecture, task, framework, engine, version]):
# from explicit architecture, framework, engine and version
ext = model_dir.suffix
path = generate_path(architecture, task, framework, engine, version).with_suffix(ext)
# if already in the destination folder
if path == model_dir:
pass
# create destination folder
else:
if ext:
path.parent.mkdir(parents=True, exist_ok=True)
else:
path.mkdir(parents=True, exist_ok=True)
# copy to cached folder
subprocess.call(['cp', model_dir, path])
else: # from implicit extracted from path, check validity of the path later at registration
path = model_dir
model_dir_list.append(path)
elif framework == Framework.PYTORCH and engine == Engine.PYTORCH:
# save original pytorch model
pytorch_dir = generate_path(
task=task,
model_name=architecture,
framework=framework,
engine=Engine.PYTORCH,
version=str(version),
)
pytorch_dir.parent.mkdir(parents=True, exist_ok=True)
save_path_with_ext = pytorch_dir.with_suffix('.pth')
torch.save(origin_model, str(save_path_with_ext))
model_dir_list.append(pytorch_dir.with_suffix('.pth'))
if convert:
# TODO: generate from path name
# generate model variant
model_dir_list.extend(_generate_model_family(
origin_model,
architecture,
task,
framework,
filename=str(version),
inputs=inputs,
outputs=outputs,
model_input=model_input
))
# register
for model_dir in model_dir_list:
parse_result = parse_path(model_dir)
architecture = parse_result['architecture']
task = parse_result['task']
framework = parse_result['framework']
engine = parse_result['engine']
version = parse_result['version']
filename = parse_result['filename']
with open(str(model_dir), 'rb') as f:
model = ModelBO(
name=architecture,
task=task,
framework=framework,
engine=engine,
version=version,
dataset=dataset,
metric=metric,
inputs=inputs,
outputs=outputs,
weight=Weight(f, filename=filename)
)
ModelService.post_model(model)
# TODO refresh
model = ModelService.get_models(
name=architecture,
task=task,
framework=framework,
engine=engine,
version=version)[0]
# profile registered model
if profile and engine != Engine.PYTORCH:
file = tf.keras.utils.get_file(
"grace_hopper.jpg",
"https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg")
test_img_bytes = cv2.imread(file)
kwargs = {
'repeat_data': test_img_bytes,
'batch_size': 32,
'batch_num': 100,
'asynchronous': False,
'model_info': model,
}
if engine == Engine.TORCHSCRIPT:
client = CVTorchClient(**kwargs)
elif engine == Engine.TFS:
client = CVTFSClient(**kwargs)
elif engine == Engine.ONNX:
client = CVONNXClient(**kwargs)
elif engine == Engine.TRT:
client = CVTRTClient(**kwargs)
else:
raise ValueError(f'No such serving engine: {engine}')
job_cuda = Job(client=client, device='cuda:0', model_info=model)
# job_cpu = Job(client=client, device='cpu', model_info=model)
job_executor.submit(job_cuda)
# job_executor.submit(job_cpu)
def register_model_from_yaml(file_path: Union[Path, str]):
def convert_ioshape_plain_to_ioshape(ioshape_plain):
"""Convert IOShape-like dictionary to IOShape.
"""
# unpack
i, ioshape_plain = ioshape_plain
assert isinstance(ioshape_plain['shape'], Iterable), \
f'inputs[{i}].shape expected to be iterable, but got {ioshape_plain["shape"]}'
assert isinstance(ioshape_plain['dtype'], str), \
f'inputs[{i}].dtype expected to be a `DataType`, but got {ioshape_plain["dtype"]}.'
ioshape_plain['dtype'] = DataType[ioshape_plain['dtype']]
return IOShape(**ioshape_plain)
# check if file exist
file_path = Path(file_path)
assert file_path.exists(), f'Model definition file at {str(file_path)} does not exist'
# read yaml
with open(file_path) as f:
model_config = yaml.safe_load(f)
# TODO able to parse ~ in file path by os.path.expanduser
origin_model = model_config['weight']
dataset = model_config['dataset']
metric = model_config['metric']
inputs_plain = model_config['inputs']
outputs_plain = model_config['outputs']
model_input = model_config.get('model_input', None)
architecture = model_config.get('architecture', None)
task = model_config.get('task', None)
framework = model_config.get('framework', None)
engine = model_config.get('engine', None)
version = model_config.get('version', None)
convert = model_config.get('convert', True)
# convert inputs and outputs
inputs = list(map(convert_ioshape_plain_to_ioshape, enumerate(inputs_plain)))
outputs = list(map(convert_ioshape_plain_to_ioshape, enumerate(outputs_plain)))
# wrap POJO
if model_input is not None:
model_input = list(map(convert_ioshape_plain_to_ioshape, enumerate(model_input)))
if task is not None:
task = Task[task.upper()]
if metric is not None:
metric = {Metric[key.upper()]: val for key, val in metric[0].items()}
if framework is not None:
framework = Framework[framework.upper()]
if engine is not None:
engine = Engine[engine.upper()]
if version is not None:
version = ModelVersion(version)
# os.path.expanduser
register_model(
origin_model=origin_model,
dataset=dataset,
metric=metric,
task=task,
inputs=inputs,
outputs=outputs,
model_input=model_input,
architecture=architecture,
framework=framework,
engine=engine,
version=version,
convert=convert,
)
def _generate_model_family(
model,
model_name: str,
task: Task,
framework: Framework,
filename: str,
inputs: List[IOShape],
model_input: Optional[List] = None,
outputs: List[IOShape] = None,
max_batch_size: int = -1
):
generated_dir_list = list()
generate_this_path = partial(generate_path, task=task, model_name=model_name, framework=framework, version=filename)
torchscript_dir = generate_this_path(engine=Engine.TORCHSCRIPT)
tfs_dir = generate_this_path(engine=Engine.TFS)
onnx_dir = generate_this_path(engine=Engine.ONNX)
trt_dir = generate_this_path(engine=Engine.TRT)
if framework == Framework.PYTORCH:
# to TorchScript
if TorchScriptConverter.from_torch_module(model, torchscript_dir):
generated_dir_list.append(torchscript_dir.with_suffix('.zip'))
# to ONNX, TODO(lym): batch cache, input shape, opset version
if ONNXConverter.from_torch_module(model, onnx_dir, inputs, outputs, model_input, optimize=False):
generated_dir_list.append(onnx_dir.with_suffix('.onnx'))
# to TRT
# TRTConverter.from_onnx(
# onnx_path=onnx_dir.with_suffix('.onnx'), save_path=trt_dir, inputs=inputs, outputs=outputs
# )
return generated_dir_list
elif framework == Framework.TENSORFLOW:
# to TFS
TFSConverter.from_tf_model(model, tfs_dir)
generated_dir_list.append(tfs_dir.with_suffix('.zip'))
# to TRT
TRTConverter.from_saved_model(tfs_dir, trt_dir, inputs, outputs, max_batch_size=32)
generated_dir_list.append(trt_dir.with_suffix('.zip'))
return generated_dir_list
def get_remote_model_weight(model: ModelBO):
"""Download a local cache of model from remote ModelDB in a structured path. And generate a configuration file.
TODO(lym):
1. set force insert config.pbtxt
2. set other options in generation of config.pbtxt (e.g. max batch size, instance group...)
This function will keep a local cache of the used model in the path:
`~/.modelci/<architecture_name>/<framework>-<engine>/<task>/<version>`
Arguments:
model (ModelBO): Model business object.
Return:
Path: Model saved path.
"""
save_path = model.saved_path
save_path.parent.mkdir(exist_ok=True, parents=True)
if not save_path.exists():
with open(str(save_path), 'wb') as f:
f.write(model.weight.weight)
if model.engine == Engine.TFS:
subprocess.call(['unzip', save_path, '-d', '/'])
os.remove(save_path)
elif model.engine == Engine.TRT:
subprocess.call(['unzip', save_path, '-d', '/'])
os.remove(save_path)
TRTConverter.generate_trt_config(
save_path.parent, # ~/.modelci/<model-arch-name>/<framework>-<engine>/<task>/
inputs=model.inputs,
outputs=model.outputs,
arch_name=model.name,
platform=TensorRTPlatform.TENSORFLOW_SAVEDMODEL
)
return save_path
def _get_remote_model_weights(models: List[ModelBO]):
"""Get remote model weights from a list of models.
Only models with highest version of each unique task, architecture, framework, and engine pair are download.
"""
# group by (task, architecture, framework, engine) pair
pairs = set(map(lambda x: (x.task, x.name, x.framework, x.engine), models))
model_groups = [
[model for model in models if (model.task, model.name, model.framework, model.engine) == pair] for pair in pairs
]
# get weights of newest version of each pair
for model_group in model_groups:
get_remote_model_weight(model_group[0])
def delete_remote_weight(model: ModelBO):
save_path = model.saved_path
if model.engine in [Engine.TORCHSCRIPT, Engine.ONNX]:
os.remove(save_path)
else:
os.removedirs(save_path)
def retrieve_model(
architecture_name: str = 'ResNet50',
task: Task = None,
framework: Framework = None,
engine: Engine = None,
version: ModelVersion = None,
download: bool = True,
) -> List[ModelBO]:
"""Query a model by name, task, framework, engine or version.
Arguments:
architecture_name (str): Model architecture name.
task (Task): which machine learn task is model used for,Default to None
framework (Framework): Framework name, optional query key. Default to None.
engine (Engine): Model optimization engine name.
version (ModelVersion): Model version. Default to None.
download (bool): Flag for whether the model needs to be cached locally.
Returns:
List[ModelBO]: A list of model business object.
"""
# retrieve
models = ModelService.get_models(architecture_name, task=task, framework=framework, engine=engine, | |
<reponame>cjsteel/python3-venv-ansible-2.10.5<filename>lib/python3.8/site-packages/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py
#!/usr/bin/python
# (c) 2020, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: na_santricity_ldap
short_description: NetApp E-Series manage LDAP integration to use for authentication
description:
- Configure an E-Series system to allow authentication via an LDAP server
author:
- <NAME> (@lmprice)
- <NAME> (@ndswartz)
extends_documentation_fragment:
- netapp_eseries.santricity.santricity.santricity_doc
options:
state:
description:
- When I(state=="present") the defined LDAP domain will be added to the storage system.
- When I(state=="absent") the domain specified will be removed from the storage system.
- I(state=="disabled") will result in deleting all existing LDAP domains on the storage system.
type: str
choices:
- present
- absent
- disabled
default: present
identifier:
description:
- This is a unique identifier for the configuration (for cases where there are multiple domains configured).
type: str
default: "default"
required: false
bind_user:
description:
- This is the user account that will be used for querying the LDAP server.
- Required when I(bind_password) is specified.
- "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com"
type: str
required: false
bind_password:
description:
- This is the password for the bind user account.
- Required when I(bind_user) is specified.
type: str
required: false
server_url:
description:
- This is the LDAP server url.
- The connection string should be specified as using the ldap or ldaps protocol along with the port information.
type: str
required: false
names:
description:
- The domain name[s] that will be utilized when authenticating to identify which domain to utilize.
- Default to use the DNS name of the I(server).
- The only requirement is that the name[s] be resolvable.
- "Example: <EMAIL>"
type: list
required: false
search_base:
description:
- The search base is used to find group memberships of the user.
- "Example: ou=users,dc=example,dc=com"
type: str
required: false
role_mappings:
description:
- This is where you specify which groups should have access to what permissions for the
storage-system.
- For example, all users in group A will be assigned all 4 available roles, which will allow access
to all the management functionality of the system (super-user). Those in group B only have the
storage.monitor role, which will allow only read-only access.
- This is specified as a mapping of regular expressions to a list of roles. See the examples.
- The roles that will be assigned to to the group/groups matching the provided regex.
- storage.admin allows users full read/write access to storage objects and operations.
- storage.monitor allows users read-only access to storage objects and operations.
- support.admin allows users access to hardware, diagnostic information, the Major Event
Log, and other critical support-related functionality, but not the storage configuration.
- security.admin allows users access to authentication/authorization configuration, as well
as the audit log configuration, and certification management.
type: dict
required: false
group_attributes:
description:
- The user attributes that should be considered for the group to role mapping.
- Typically this is used with something like "memberOf", and a user"s access is tested against group
membership or lack thereof.
type: list
default: ["memberOf"]
required: false
user_attribute:
description:
- This is the attribute we will use to match the provided username when a user attempts to
authenticate.
type: str
default: "sAMAccountName"
required: false
notes:
- Check mode is supported
- This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for
authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given
different (or no), access to certain aspects of the system and API.
- The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible.
- Generally, you"ll need to get the details of your organization"s LDAP server before you"ll be able to configure
the system for using LDAP authentication; every implementation is likely to be very different.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy
v3.0 and higher.
"""
EXAMPLES = """
- name: Disable LDAP authentication
na_santricity_ldap:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "<PASSWORD>"
validate_certs: true
state: absent
- name: Remove the "default" LDAP domain configuration
na_santricity_ldap:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "<PASSWORD>"
validate_certs: true
state: absent
identifier: default
- name: Define a new LDAP domain, utilizing defaults where possible
na_santricity_ldap:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "<PASSWORD>"
validate_certs: true
state: enabled
bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com"
bind_password: "<PASSWORD>"
server: "ldap://example.com:389"
search_base: "OU=Users,DC=example,DC=com"
role_mappings:
".*dist-dev-storage.*":
- storage.admin
- security.admin
- support.admin
- storage.monitor
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The ldap settings have been updated.
"""
from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
from ansible.module_utils._text import to_native
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
class NetAppESeriesLdap(NetAppESeriesModule):
NO_CHANGE_MSG = "No changes were necessary."
TEMPORARY_DOMAIN = "ANSIBLE_TMP_DOMAIN"
def __init__(self):
ansible_options = dict(state=dict(type="str", required=False, default="present", choices=["present", "absent", "disabled"]),
identifier=dict(type="str", required=False, default="default"),
bind_user=dict(type="str", required=False),
bind_password=dict(type="str", required=False, no_log=True),
names=dict(type="list", required=False),
server_url=dict(type="str", required=False),
search_base=dict(type="str", required=False),
role_mappings=dict(type="dict", required=False, no_log=True),
group_attributes=dict(type="list", default=["memberOf"], required=False),
user_attribute=dict(type="str", required=False, default="sAMAccountName"))
required_if = [["state", "present", ["server_url"]]]
required_together = [["bind_user", "bind_password"]]
super(NetAppESeriesLdap, self).__init__(ansible_options=ansible_options,
web_services_version="02.00.0000.0000",
required_if=required_if,
required_together=required_together,
supports_check_mode=True)
args = self.module.params
self.state = args["state"]
self.id = args["identifier"]
self.bind_user = args["bind_user"]
self.bind_password = args["bind_password"]
self.names = args["names"]
self.server = args["server_url"]
self.search_base = args["search_base"]
self.role_mappings = args["role_mappings"]
self.group_attributes = args["group_attributes"]
self.user_attribute = args["user_attribute"]
if self.server and not self.names:
parts = urlparse.urlparse(self.server)
self.names = [parts.netloc.split(':')[0]]
# Check whether request needs to be forwarded on to the controller web services rest api.
self.url_path_prefix = ""
if self.is_embedded():
self.url_path_prefix = "storage-systems/1/"
elif self.ssid != "0" and self.ssid != "proxy":
self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/storage-systems/1/" % self.ssid
self.existing_domain_ids = []
self.domain = {} # Existing LDAP domain
self.body = {} # Request body
def get_domains(self):
"""Retrieve all domain information from storage system."""
domains = None
try:
rc, response = self.request(self.url_path_prefix + "ldap")
domains = response["ldapDomains"]
except Exception as error:
self.module.fail_json(msg="Failed to retrieve current LDAP configuration. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
return domains
def build_request_body(self):
"""Build the request body."""
self.body.update({"id": self.id, "groupAttributes": self.group_attributes, "ldapUrl": self.server, "names": self.names, "roleMapCollection": []})
if self.search_base:
self.body.update({"searchBase": self.search_base})
if self.user_attribute:
self.body.update({"userAttribute": self.user_attribute})
if self.bind_user and self.bind_password:
self.body.update({"bindLookupUser": {"password": self.bind_password, "user": self.bind_user}})
if self.role_mappings:
for regex, names in self.role_mappings.items():
for name in names:
self.body["roleMapCollection"].append({"groupRegex": regex, "ignorecase": True, "name": name})
def are_changes_required(self):
"""Determine whether any changes are required and build request body."""
change_required = False
domains = self.get_domains()
if self.state == "disabled" and domains:
self.existing_domain_ids = [domain["id"] for domain in domains]
change_required = True
elif self.state == "present":
for domain in domains:
if self.id == domain["id"]:
self.domain = domain
if self.state == "absent":
change_required = True
elif (len(self.group_attributes) != len(domain["groupAttributes"]) or
any([a not in domain["groupAttributes"] for a in self.group_attributes])):
change_required = True
elif self.user_attribute != domain["userAttribute"]:
change_required = True
elif self.search_base.lower() != domain["searchBase"].lower():
change_required = True
elif self.server != domain["ldapUrl"]:
change_required = True
elif any(name not in domain["names"] for name in self.names) or any(name not in self.names for name in domain["names"]):
change_required = True
elif self.role_mappings:
if len(self.body["roleMapCollection"]) != len(domain["roleMapCollection"]):
change_required = True
else:
for role_map in self.body["roleMapCollection"]:
for existing_role_map in domain["roleMapCollection"]:
if role_map["groupRegex"] == existing_role_map["groupRegex"] and role_map["name"] == existing_role_map["name"]:
break
else:
change_required = True
if not change_required and self.bind_user and self.bind_password:
if self.bind_user != domain["bindLookupUser"]["user"]:
change_required = True
elif self.bind_password:
temporary_domain = None
try:
# Check whether temporary domain exists
if any(domain["id"] == self.TEMPORARY_DOMAIN for domain in domains):
self.delete_domain(self.TEMPORARY_DOMAIN)
temporary_domain = self.add_domain(temporary=True, skip_test=True)
rc, tests = self.request(self.url_path_prefix + "ldap/test", method="POST")
temporary_domain_test = {}
domain_test = {}
for test in tests:
if test["id"] == temporary_domain["id"]:
temporary_domain_test = test["result"]
if self.id == test["id"]:
domain_test = test["result"]
if temporary_domain_test["authenticationTestResult"] == "ok" and domain_test["authenticationTestResult"] != "ok":
change_required = True
elif temporary_domain_test["authenticationTestResult"] != "ok":
self.module.fail_json(msg="Failed to authenticate bind credentials! Array Id [%s]." % self.ssid)
finally:
if temporary_domain:
self.delete_domain(self.TEMPORARY_DOMAIN)
break
else:
change_required = True
elif self.state == "absent":
for domain in domains:
if self.id == domain["id"]:
change_required = True
return change_required
def add_domain(self, temporary=False, skip_test=False):
"""Add domain to storage system."""
domain = None
body | |
each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
"""
for line in lines:
if _SEARCH_C_FILE.search(line):
for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
_global_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment or
global suppression.
"""
return (_global_error_suppressions.get(category, False) or
linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
def __init__(self):
self.include_list = [[]]
self.ResetSection('')
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.error_count = 0 # global count of reported errors
self.errors_by_category = {} # string to int dict storing error counts
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
def PrintError(self, message):
sys.stderr.write(message)
_cpplint_state = _CppLintState()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if not self.in_a_function:
return
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def Extension(self):
"""File extension - text following the final period, includes that period."""
return os.path.splitext(self.FullName())
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if not IsErrorSuppressedByNolint(category, linenum):
_cpplint_state.IncrementErrorCount(category)
final_message = '%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence)
sys.stderr.write(final_message)
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = regex.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = regex.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
#
# Once we have matched a raw string, we check the prefix of the
# line to make sure that the line is not part of a single line
# comment. It's done this way because we remove raw strings
# before removing comments as opposed to removing comments
# before removing raw strings. This is because there are some
# cpplint checks that requires the comments to be preserved, but
# we don't want to check comments that are inside raw strings.
matched | |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 by <NAME>
import os
import math
import json
import copy
import torch
import logging
import numpy as np
import torch.nn as nn
from tqdm import tqdm
from transformers.data import processors
from transformers.file_utils import is_torch_available
from transformers import glue_processors, glue_output_modes
from transformers.tokenization_utils_base import BatchEncoding
from transformers.models.bert.tokenization_bert import whitespace_tokenize
from transformers.data.processors.utils import DataProcessor #, InputExample
if is_torch_available():
import torch
from torch.utils.data import TensorDataset
logger = logging.getLogger(__name__)
def gelu(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gelu_new(x):
""" Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).
Also see https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
def mish(x):
return x * torch.tanh(nn.functional.softplus(x))
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish}
def split_ques_context(sequence_output, pq_end_pos):
ques_max_len = 64
context_max_len =512-64
sep_tok_len = 1
ques_sequence_output = sequence_output.new(
torch.Size((sequence_output.size(0), ques_max_len, sequence_output.size(2)))).zero_()
context_sequence_output = sequence_output.new_zeros(
(sequence_output.size(0), context_max_len, sequence_output.size(2)))
context_attention_mask = sequence_output.new_zeros((sequence_output.size(0), context_max_len))
ques_attention_mask = sequence_output.new_zeros((sequence_output.size(0), ques_max_len))
for i in range(0, sequence_output.size(0)):
q_end = pq_end_pos[i][0]
p_end = pq_end_pos[i][1]
ques_sequence_output[i, :min(ques_max_len, q_end)] = sequence_output[i,
1: 1 + min(ques_max_len, q_end)]
context_sequence_output[i, :min(context_max_len, p_end - q_end - sep_tok_len)] = sequence_output[i,
q_end + sep_tok_len + 1: q_end + sep_tok_len + 1 + min(
p_end - q_end - sep_tok_len,
context_max_len)]
context_attention_mask[i, :min(context_max_len, p_end - q_end - sep_tok_len)] = sequence_output.new_ones(
(1, context_max_len))[0, :min(context_max_len, p_end - q_end - sep_tok_len)]
ques_attention_mask[i, : min(ques_max_len, q_end)] = sequence_output.new_ones((1, ques_max_len))[0,
: min(ques_max_len, q_end)]
return ques_sequence_output, context_sequence_output, ques_attention_mask, context_attention_mask
def masked_softmax(vector: torch.Tensor,
mask: torch.Tensor,
dim: int = -1,
memory_efficient: bool = False,
mask_fill_value: float = -1e32) -> torch.Tensor:
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
#mask = mask.half()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result
class SCAttention(nn.Module) :
def __init__(self, input_size, hidden_size) :
super(SCAttention, self).__init__()
self.hidden_size = hidden_size
self.W = nn.Linear(input_size, hidden_size)
self.map_linear = nn.Linear(hidden_size, hidden_size)
self.init_weights()
def init_weights(self) :
nn.init.xavier_uniform_(self.W.weight.data)
self.W.bias.data.fill_(0.1)
def forward(self, passage, question, q_mask):
Wp = passage
Wq = question
scores = torch.bmm(Wp, Wq.transpose(2, 1))
mask = q_mask.unsqueeze(1).repeat(1, passage.size(1), 1)
# scores.data.masked_fill_(mask.data, -float('inf'))
alpha = masked_softmax(scores, mask)
output = torch.bmm(alpha, Wq)
output = nn.ReLU()(self.map_linear(output))
#output = self.map_linear(all_con)
return output
class TrmCoAtt(nn.Module):
def __init__(self, config):
super(TrmCoAtt, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pruned_heads = set()
self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
# attention mask 对应 input_ids
def forward(self, input_ids, input_ids_1, attention_mask=None, head_mask=None):
extended_attention_mask = attention_mask[:, None, None, :]
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
attention_mask = extended_attention_mask
mixed_query_layer = self.query(input_ids_1)
mixed_key_layer = self.key(input_ids)
mixed_value_layer = self.value(input_ids)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
reshaped_context_layer = context_layer.view(*new_context_layer_shape)
# Should find a better way to do this
w = self.dense.weight.t().view(self.num_attention_heads, self.attention_head_size, self.hidden_size).to(context_layer.dtype)
b = self.dense.bias.to(context_layer.dtype)
projected_context_layer = torch.einsum("bfnd,ndh->bfh", context_layer, w) + b
projected_context_layer_dropout = self.dropout(projected_context_layer)
layernormed_context_layer = self.LayerNorm(input_ids_1 + projected_context_layer_dropout)
ffn_output = self.ffn(layernormed_context_layer)
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
hidden_states = self.full_layer_layer_norm(ffn_output + layernormed_context_layer)
return hidden_states
def squad_convert_examples_to_features(
examples, tokenizer, max_seq_length, doc_stride, max_query_length,
is_training, return_dataset=False, regression=False, pq_end=False,
):
# Defining helper methods
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(tqdm(examples, desc="Converting examples to features")):
if is_training and not example.is_impossible:
# Get start and end position
start_position = example.start_position
end_position = example.end_position
# If the answer cannot be found in the text, then skip this example.
actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text)
continue
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
)
spans = []
truncated_query = tokenizer.encode(
example.question_text, add_special_tokens=False, max_length=max_query_length
)
sequence_added_tokens = (
tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1
if "roberta" in str(type(tokenizer))
else tokenizer.model_max_length - tokenizer.max_len_single_sentence
)
sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair
span_doc_tokens = all_doc_tokens
while len(spans) * doc_stride < len(all_doc_tokens):
encoded_dict = tokenizer.encode_plus(
truncated_query if tokenizer.padding_side == "right" else span_doc_tokens,
span_doc_tokens if tokenizer.padding_side == "right" else truncated_query,
max_length=max_seq_length,
return_overflowing_tokens=True,
return_token_type_ids=True,
pad_to_max_length=True,
stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,
truncation_strategy="only_second" if tokenizer.padding_side == "right" else "only_first",
)
paragraph_len = min(
len(all_doc_tokens) - len(spans) * doc_stride,
max_seq_length - len(truncated_query) - sequence_pair_added_tokens,
)
if tokenizer.pad_token_id in encoded_dict["input_ids"]:
non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)]
else:
non_padded_ids = encoded_dict["input_ids"]
tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
token_to_orig_map = {}
for i in range(paragraph_len):
index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i
token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]
encoded_dict["paragraph_len"] = paragraph_len
encoded_dict["tokens"] = tokens
encoded_dict["token_to_orig_map"] = token_to_orig_map
encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens
encoded_dict["token_is_max_context"] = {}
encoded_dict["start"] = len(spans) * doc_stride
encoded_dict["length"] = paragraph_len
spans.append(encoded_dict)
if "overflowing_tokens" not in encoded_dict or (
"overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0
):
break
span_doc_tokens = encoded_dict["overflowing_tokens"]
for doc_span_index in range(len(spans)):
for j in range(spans[doc_span_index]["paragraph_len"]):
is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)
index = (
j
if tokenizer.padding_side == "left"
else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j
)
spans[doc_span_index]["token_is_max_context"][index] = is_max_context
for span in spans:
# Identify the position of the CLS token
cls_index = span["input_ids"].index(tokenizer.cls_token_id)
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
p_mask = np.array(span["token_type_ids"])
p_mask = np.minimum(p_mask, 1)
if tokenizer.padding_side == "right":
# Limit positive values to one
p_mask = 1 - p_mask
p_mask[np.where(np.array(span["input_ids"]) == tokenizer.sep_token_id)[0]] = 1
# Set the CLS index to '0'
p_mask[cls_index] = 0
span_is_impossible = example.is_impossible
# if example.qas_id == "5a8d7bf7df8bba001a0f9ab2":
# print("hello")
# if span_is_impossible:
# print("True")
start_position = 0
end_position = 0
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = span["start"]
doc_end = span["start"] + span["length"] - 1
| |
<reponame>nerminsamet/HPRNet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from .utils import _gather_feat, _tranpose_and_gather_feat
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
'''
# Slow for large number of categories
def _topk(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, -1), K)
topk_clses = (topk_inds / (height * width)).int()
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
'''
def _topk_channel(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_ys, topk_xs
def _topk(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(
topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def multi_pose_decode(
heat, wh, kps, reg=None, hm_hp=None, hp_offset=None, K=100):
batch, cat, height, width = heat.size()
num_joints = kps.shape[1] // 2
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
kps = _tranpose_and_gather_feat(kps, inds)
kps = kps.view(batch, K, num_joints * 2)
kps[..., ::2] += xs.view(batch, K, 1).expand(batch, K, num_joints)
kps[..., 1::2] += ys.view(batch, K, 1).expand(batch, K, num_joints)
if reg is not None:
reg = _tranpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _tranpose_and_gather_feat(wh, inds)
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1)
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
if hm_hp is not None:
hm_hp = _nms(hm_hp)
thresh = 0.1
kps = kps.view(batch, K, num_joints, 2).permute(
0, 2, 1, 3).contiguous() # b x J x K x 2
reg_kps = kps.unsqueeze(3).expand(batch, num_joints, K, K, 2)
hm_score, hm_inds, hm_ys, hm_xs = _topk_channel(hm_hp, K=K) # b x J x K
if hp_offset is not None:
hp_offset = _tranpose_and_gather_feat(
hp_offset, hm_inds.view(batch, -1))
hp_offset = hp_offset.view(batch, num_joints, K, 2)
hm_xs = hm_xs + hp_offset[:, :, :, 0]
hm_ys = hm_ys + hp_offset[:, :, :, 1]
else:
hm_xs = hm_xs + 0.5
hm_ys = hm_ys + 0.5
mask = (hm_score > thresh).float()
hm_score = (1 - mask) * -1 + mask * hm_score
hm_ys = (1 - mask) * (-10000) + mask * hm_ys
hm_xs = (1 - mask) * (-10000) + mask * hm_xs
hm_kps = torch.stack([hm_xs, hm_ys], dim=-1).unsqueeze(
2).expand(batch, num_joints, K, K, 2)
dist = (((reg_kps - hm_kps) ** 2).sum(dim=4) ** 0.5)
min_dist, min_ind = dist.min(dim=3) # b x J x K
hm_score = hm_score.gather(2, min_ind).unsqueeze(-1) # b x J x K x 1
min_dist = min_dist.unsqueeze(-1)
min_ind = min_ind.view(batch, num_joints, K, 1, 1).expand(
batch, num_joints, K, 1, 2)
hm_kps = hm_kps.gather(3, min_ind)
hm_kps = hm_kps.view(batch, num_joints, K, 2)
l = bboxes[:, :, 0].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
t = bboxes[:, :, 1].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
r = bboxes[:, :, 2].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
b = bboxes[:, :, 3].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
mask = (hm_kps[..., 0:1] < l) + (hm_kps[..., 0:1] > r) + \
(hm_kps[..., 1:2] < t) + (hm_kps[..., 1:2] > b) + \
(hm_score < thresh) + (min_dist > (torch.max(b - t, r - l) * 0.3))
mask = (mask > 0).float().expand(batch, num_joints, K, 2)
kps = (1 - mask) * hm_kps + mask * kps
kps = kps.permute(0, 2, 1, 3).contiguous().view(
batch, K, num_joints * 2)
detections = torch.cat([bboxes, scores, kps, clses], dim=2)
return detections
def landmark_decode(
heat, wh, kps, wh_face=None, reg=None, hm_hp=None, hp_offset=None, K=32,
face_lms= None, hand_lms=None, foot_lms=None):
batch, cat, height, width = heat.size()
num_joints = kps.shape[1] // 2 - 3
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
kps_orj = kps.clone()
face_kps = kps_orj[:, 46:48, :, :]
face_hm_hp = hm_hp[:,23:24,:,:]
lefthand_kps = kps_orj[:, 48:50, :, :]
lefthand_hm_hp = hm_hp[:,24:25,:,:]
righthand_kps = kps_orj[:,50:52, :, :]
righthand_hm_hp = hm_hp[:,25:26,:,:]
kps = kps[:, :46, :, :]
hm_hp = hm_hp[:, :23, :, :]
kps = _tranpose_and_gather_feat(kps, inds)
kps = kps.view(batch, K, num_joints * 2)
kps[..., ::2] += xs.view(batch, K, 1).expand(batch, K, num_joints)
kps[..., 1::2] += ys.view(batch, K, 1).expand(batch, K, num_joints)
if reg is not None:
reg = _tranpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _tranpose_and_gather_feat(wh, inds)
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1)
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
face_bboxes, face_cnt, hm_face_scores, face_lms = \
decode_single_part(face_kps, face_lms, inds, xs, ys, batch, K, bboxes,
face_hm_hp, hp_offset, wh_face)
lefthand_bboxes, lefthand_cnt, hm_lefthand_scores, lefthand_lms = \
decode_single_part(lefthand_kps, hand_lms[:, :42, :, :], inds, xs, ys, batch, K, bboxes,
lefthand_hm_hp, hp_offset)
righthand_bboxes, righthand_cnt, hm_righthand_scores, righthand_lms = \
decode_single_part(righthand_kps, hand_lms[:, 42:, :, :], inds, xs, ys, batch, K, bboxes,
righthand_hm_hp, hp_offset)
if hm_hp is not None:
hm_hp = _nms(hm_hp)
thresh = 0.1
kps = kps.view(batch, K, num_joints, 2).permute(
0, 2, 1, 3).contiguous() # b x J x K x 2
reg_kps = kps.unsqueeze(3).expand(batch, num_joints, K, K, 2)
hm_score, hm_inds, hm_ys, hm_xs = _topk_channel(hm_hp, K=K) # b x J x K
if hp_offset is not None:
hp_offset = _tranpose_and_gather_feat(
hp_offset, hm_inds.view(batch, -1))
hp_offset = hp_offset.view(batch, num_joints, K, 2)
hm_xs = hm_xs + hp_offset[:, :, :, 0]
hm_ys = hm_ys + hp_offset[:, :, :, 1]
else:
hm_xs = hm_xs + 0.5
hm_ys = hm_ys + 0.5
mask = (hm_score > thresh).float()
hm_score = (1 - mask) * -1 + mask * hm_score
hm_ys = (1 - mask) * (-10000) + mask * hm_ys
hm_xs = (1 - mask) * (-10000) + mask * hm_xs
hm_kps = torch.stack([hm_xs, hm_ys], dim=-1).unsqueeze(
2).expand(batch, num_joints, K, K, 2)
dist = (((reg_kps - hm_kps) ** 2).sum(dim=4) ** 0.5)
min_dist, min_ind = dist.min(dim=3) # b x J x K
hm_score = hm_score.gather(2, min_ind).unsqueeze(-1) # b x J x K x 1
min_dist = min_dist.unsqueeze(-1)
min_ind = min_ind.view(batch, num_joints, K, 1, 1).expand(
batch, num_joints, K, 1, 2)
hm_kps = hm_kps.gather(3, min_ind)
hm_kps = hm_kps.view(batch, num_joints, K, 2)
l = bboxes[:, :, 0].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
t = bboxes[:, :, 1].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
r = bboxes[:, :, 2].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
b = bboxes[:, :, 3].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
mask = (hm_kps[..., 0:1] < l) + (hm_kps[..., 0:1] > r) + \
(hm_kps[..., 1:2] < t) + (hm_kps[..., 1:2] > b) + \
(hm_score < thresh) + (min_dist > (torch.max(b - t, r - l) * 0.3))
mask = (mask > 0).float().expand(batch, num_joints, K, 2)
kps = (1 - mask) * hm_kps + mask * kps
kps = kps.permute(0, 2, 1, 3).contiguous().view(
batch, K, num_joints * 2)
detections = torch.cat([bboxes, scores, kps, face_bboxes, hm_face_scores, face_lms, clses,
hm_lefthand_scores, lefthand_lms, hm_righthand_scores, righthand_lms,], dim=2)
return detections
def decode_single_part(part_cnt, part_lms, inds, xs, ys, batch, K, bboxes, hm_hp_part_cnt=None, hp_offset_part_cnt=None,
wh_part =None):
part_bboxes = None
num_part_joints | |
<gh_stars>0
import argparse
import os
import numpy as np
from tqdm import tqdm
import torch
# from utils.parallel import DataParallelModel, DataParallelCriterion
from modeling.postprocess import LanePostprocess
from apex import amp
from apex.parallel import DistributedDataParallel
from dataloaders import make_data_loader
from utils.loss import SegmentationLosses, SegmentationCELosses, SegmentationfocalLosses, FocalLoss, disc_loss
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from modeling.SCNN import SCNN
from scipy import misc
from collections import OrderedDict
import ssl
import mvpuai
from mvpuai.annotation.frame import MFrame
from mvpuai.resource.string import MString
import glog as log
from geomdl import BSpline, utilities
from BsplineModel.inference_bs import inference
from BsplineModel.GetBspline import GetBspline_from_sampled_points
ssl._create_default_https_context = ssl._create_unverified_context
class Point(object):
def __init__(self, x: int, y: int, color_=None, editable: bool=None):
self.coord = np.array([x, y])
# self._color = color.POINT if color_ is None else color_
self.editable = True if editable is None else editable
# @property
# def color(self):
# return self._color
#
# @color.setter
# def color(self, color_):
# self._color = color_
#
@property
def x(self):
return int(self.coord[0])
#
@x.setter
def x(self, x: int):
self.coord[0] = x
#
@property
def y(self):
return int(self.coord[1])
#
@y.setter
def y(self, y: int):
self.coord[1] = y
class Trainer(object):
def __init__(self, args):
self.args = args
# Define Saver
if args.distributed:
if args.local_rank == 0:
self.saver = Saver(args)
else:
self.saver = Saver(args)
self.saver.save_experiment_config()
# Define Tensorboard Summary
self.summary = TensorboardSummary(self.saver.experiment_dir)
self.writer = self.summary.create_summary()
# PATH = args.path
# Define Dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': True}
self.val_loader, self.nclass = make_data_loader(args, **kwargs)
# self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)
# Define network
model = SCNN(nclass=self.nclass, backbone=args.backbone, output_stride=args.out_stride, cuda=args.cuda,
extension=args.ext)
# Define Optimizer
# optimizer = torch.optim.SGD(model.parameters(),args.lr, momentum=args.momentum,
# weight_decay=args.weight_decay, nesterov=args.nesterov)
optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay)
# model, optimizer = amp.initialize(model,optimizer,opt_level="O1")
# Define Criterion
weight = None
# criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)
# self.criterion = SegmentationCELosses(weight=weight, cuda=args.cuda)
# self.criterion = SegmentationCELosses(weight=weight, cuda=args.cuda)
# self.criterion = FocalLoss(gamma=0, alpha=[0.2, 0.98], img_size=512*512)
self.criterion1 = FocalLoss(gamma=5, alpha=[0.2, 0.98], img_size=512 * 512)
self.criterion2 = disc_loss(delta_v=0.5, delta_d=3.0, param_var=1.0, param_dist=1.0,
param_reg=0.001, EMBEDDING_FEATS_DIMS=21, image_shape=[512, 512])
self.model, self.optimizer = model, optimizer
# Define Evaluator
self.evaluator = Evaluator(self.nclass)
# Define lr scheduler
self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr,
args.epochs, len(self.val_loader), local_rank=args.local_rank)
# Using cuda
# if args.cuda:
self.model = self.model.cuda()
# if args.distributed:
# self.model = DistributedDataParallel(self.model)
# self.model = torch.nn.DataParallel(self.model)
# patch_replication_callback(self.model)
# Resuming checkpoint
self.best_pred = 0.0
if args.resume is not None:
filename = 'checkpoint.pth.tar'
args.resume = os.path.join(args.ckpt_dir, filename)
if not os.path.isfile(args.resume):
raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
# if args.cuda:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
checkpoint['state_dict'] = new_state_dict
self.model.load_state_dict(checkpoint['state_dict'])
# else:
# self.model.load_state_dict(checkpoint['state_dict'])
# if not args.ft:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.best_pred = checkpoint['best_pred']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
def training(self, epoch):
train_loss = 0.0
self.model.train()
tbar = tqdm(self.train_loader)
num_img_tr = len(self.train_loader)
max_instances = 1
for i, sample in enumerate(tbar):
# image, target = sample['image'], sample['label']
image, target, ins_target = sample['image'], sample['bin_label'], sample['label']
# _target = target.cpu().numpy()
# if np.max(_target) > max_instances:
# max_instances = np.max(_target)
# print(max_instances)
if self.args.cuda:
image, target = image.cuda(), target.cuda()
self.scheduler(self.optimizer, i, epoch, self.best_pred)
self.optimizer.zero_grad()
output = self.model(image)
# if i % 10==0:
# misc.imsave('/mfc/user/1623600/.temp6/train_{:s}_epoch:{}_i:{}.png'.format(str(self.args.distributed),epoch,i),np.transpose(image[0].cpu().numpy(),(1,2,0)))
# os.chmod('/mfc/user/1623600/.temp6/train_{:s}_epoch:{}_i:{}.png'.format(str(self.args.distributed),epoch,i),0o777)
# self.criterion = DataParallelCriterion(self.criterion)
loss1 = self.criterion1(output, target)
loss2 = self.criterion2(output, ins_target)
reg_lambda = 0.01
loss = loss1 + 10 * loss2
# loss = loss1
output = output[1]
# loss.back
# with amp.scale_loss(loss, self.optimizer) as scaled_loss:
# scaled_loss.backward()
loss.backward()
self.optimizer.step()
train_loss += loss.item()
tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))
if self.args.distributed:
if self.args.local_rank == 0:
self.writer.add_scalar('train/total_loss_iter', loss.item(), i + num_img_tr * epoch)
else:
self.writer.add_scalar('train/total_loss_iter', loss.item(), i + num_img_tr * epoch)
# Show 10 * 3 inference results each epoch
if i % (num_img_tr / 10) == 0:
global_step = i + num_img_tr * epoch
if self.args.distributed:
if self.args.local_rank == 0:
self.summary.visualize_image(self.writer, self.args.dataset, image, target, output, global_step)
else:
self.summary.visualize_image(self.writer, self.args.dataset, image, target, output, global_step)
if self.args.distributed:
if self.args.local_rank == 0:
self.writer.add_scalar('train/total_loss_epoch', train_loss, epoch)
else:
self.writer.add_scalar('train/total_loss_epoch', train_loss, epoch)
if self.args.local_rank == 0:
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
print('Loss: %.3f' % train_loss)
if self.args.distributed:
if self.args.local_rank == 0:
if self.args.no_val:
# save checkpoint every epoch
is_best = False
self.saver.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
}, is_best)
else:
if self.args.no_val:
# save checkpoint every epoch
is_best = False
self.saver.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
}, is_best)
def validation(self):
self.model.eval()
self.evaluator.reset()
tbar = tqdm(self.val_loader, desc='\r')
test_loss = 0.0
destination_path = os.path.join(self.args.path,'seg_lane')
if not os.path.isdir(destination_path):
os.mkdir(destination_path,0o777)
postprocessor = LanePostprocess.LaneNetPostProcessor()
aa_sequence = mvpuai.MSequence()
for i, sample in enumerate(tbar):
# image, target = sample['image'], sample['label']
image, lbl_path, resized_img = sample['image'], sample['lbl_path'], sample['resized_img']
img = [image[0][...,_ind*512: (_ind+1)*512] for _ind in range(4)]
img = np.stack(img+[resized_img[0]],axis=0)
img = torch.from_numpy(img)
if self.args.cuda:
img = img.cuda()
image = image.cuda()
with torch.no_grad():
output = self.model(img)
pred = output[1]
_upsampled=torch.nn.Upsample(size=[512,2048])
overall_pred = pred[4,...].view([1,2,512,512])
_upsampled=_upsampled(overall_pred)
upsampled_final = torch.zeros(2,1024,2048)
upsampled_final[:,512:,:512] = pred[0,...]
upsampled_final[:, 512:, 512:1024] = pred[1, ...]
upsampled_final[:, 512:, 1024:1024+512] = pred[2, ...]
upsampled_final[:, 512:, 1024 + 512:2048] = pred[3, ...]
upsampled_final = upsampled_final.view([1, 2, 1024, 2048])
upsampled_final[..., 512:, :] = _upsampled
upsampled_final = np.argmax(upsampled_final, axis=1)
pred = upsampled_final.data.cpu().numpy()
instance_seg = output[0]
_upsampled_instance = torch.nn.Upsample(size=[512, 2048])
overall_pred = instance_seg[4, ...].view([1, 21, 512, 512])
_upsampled_instance = _upsampled_instance(overall_pred)
upsampled_final_instance = torch.zeros(21, 1024, 2048)
upsampled_final_instance[:, 512:, :512] = instance_seg[0, ...]
upsampled_final_instance[:, 512:, 512:1024] = instance_seg[1, ...]
upsampled_final_instance[:, 512:, 1024:1024 + 512] = instance_seg[2, ...]
upsampled_final_instance[:, 512:, 1024 + 512:2048] = instance_seg[3, ...]
upsampled_final_instance= upsampled_final_instance.view([1, 21, 1024, 2048])
upsampled_final_instance[..., 512:, :] = _upsampled_instance
instance_seg = upsampled_final_instance.data.cpu().numpy()
# instance_seg = np.argmax(upsampled_final_instance, axis=1)
# Add batch sample into evaluator
# if i % 100 == 0:
resized_img = np.squeeze(resized_img)
pred = np.squeeze(pred)
instance_seg = np.squeeze(instance_seg)
# resized_img = np.transpose(resized_img.cpu().numpy(), (1, 2, 0))
instance_seg = np.transpose(instance_seg, (1, 2, 0))
postprocess_result = postprocessor.postprocess(
binary_seg_result=pred,
instance_seg_result=instance_seg,
source_image=image
)
image = self.de_normalize(np.transpose(image[0].cpu().numpy(),(1,2,0)))
# misc.imsave(destination_path + '/' + lbl_path[0],
# np.transpose(image.cpu().numpy(), (1, 2, 0)) + 3 * np.asarray(
# np.stack((pred, pred, pred), axis=-1), dtype=np.uint8))
show_source_image = np.zeros((1024, 2048, 3))
show_source_image[512:, ...] = image
image = show_source_image
predicted_lanes = postprocess_result['lane_pts']
# predicted_lanes = predicted_lanes[...,0]
# bsp_lanes = []
predicted_lanes = [np.asarray(pred_lane) for pred_lane in predicted_lanes]
tensor_curvepts, tensor_cpts =inference(bsplineMat=predicted_lanes,i=i)
tmp_mask = np.zeros(shape=(image.shape[0], image.shape[1]), dtype=np.uint8)
src_lane_pts = np.asarray(tensor_curvepts)
for lane_index, coords in enumerate(src_lane_pts):
tmp_mask[tuple((np.int_(coords[:, 1]), np.int_(coords[:, 0])))] = lane_index + 1
bsppts_mask = np.stack((tmp_mask, tmp_mask, tmp_mask), axis=-1)
# misc.imsave(destination_path + '/mask_' + lbl_path[0],
# postprocess_result['mobis_mask_image'])
# misc.imsave(destination_path + '/' + lbl_path[0],
# 50*postprocess_result['mask_image']+50*postprocess_result['lanepts_mask'])
misc.imsave(destination_path + '/' + lbl_path[0],
postprocess_result['mobis_mask_image'])
try:
os.chmod(destination_path + '/'+ lbl_path[0],0o777)
except:
pass
aa_sequence.add_frame(MFrame(i))
for idx in range(tensor_cpts.shape[1]):
_Obj = mvpuai.get_object_by_name(MString.Frame.Object.Type.LANE)
_Obj.subclass_id = 1
_Obj.instance_id = idx
_list = []
for ind in range(10):
_list.append(Point(int(tensor_cpts[0,idx,ind]), int(tensor_cpts[1,idx,ind])))
_ctrl_pts = list([point.x, point.y] for point in _list)
# b_spline = BSpline.Curve()
# b_spline.degree = 4
# b_spline.set_ctrlpts(_ctrl_pts)
#
# b_spline.knotvector = utilities.generate_knot_vector(b_spline.degree, len(_ctrl_pts))
# b_spline.delta = 0.001
# b_spline.evaluate()
_cpts = []
for _cpt in _ctrl_pts:
_cpts.append(_cpt[0])
_cpts.append(_cpt[1])
_Obj.b_spline = _cpts
aa_sequence.frame_list[-1].add_object(_Obj)
# .add_frame(MFrame(0))
self.write_json(aa_sequence)
def de_normalize(self,img,mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
# img = np.array(img).astype(np.float32)
img *= std
img += mean
img *= 255.0
return img
def write_json(self,aa_sequence):
output_file_path = os.path.join(self.args.path,'json') + '/annotation_bs.json'
mvpuai.write_json(output_file_path, aa_sequence)
try:
os.chmod(output_file_path, 0o777)
except :
pass
def main():
parser = argparse.ArgumentParser(description="PyTorch SCNN Training")
parser.add_argument('--distributed', type=bool, default=False,
help='backbone name (default: resnet)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
# parser.add_argument('--path', type=str, default='/mfc/data/compressed/Cityscapes/download',
# help='path of cityscapes')
parser.add_argument('--path', type=str, default='/mfc/data/mobis/real/30_aa_seg_test/1438_20190418_173931_DL/1438_20190418_173931_00000000',
help='path of LaneMobis')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--dataset', type=str, default='inference',
help='dataset name (default: cityscapes)')
parser.add_argument('--workers', type=int, default=8,
metavar='N', help='dataloader threads')
parser.add_argument('--base-size', type=int, default=512,
help='base image size')
parser.add_argument('--crop-size', type=int, default=512,
help='crop image size')
parser.add_argument('--loss-type', type=str, default='ce',
choices=['ce', 'focal'],
help='loss func type (default: ce)')
# training hyper params
parser.add_argument('--epochs', type=int, default=250, metavar='N',
help='number of epochs to train (default: auto)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='start epochs (default:0)')
parser.add_argument('--batch-size', type=int, default=6,
metavar='N', help='input batch size for \
training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=1,
metavar='N', help='input batch size for \
testing (default: auto)')
parser.add_argument('--use-balanced-weights', action='store_true', default=False,
help='whether to use balanced weights | |
# Copyright (c) 2018-2022 curoky(<EMAIL>).
#
# This file is part of my-own-x.
# See https://github.com/curoky/my-own-x for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import sys
class KeyItem(object):
datatype_size = [4, 1, 1, 2, 1, 2, 2, 4, 4, 8, 4, 4, 4, 0, 0, 0]
def __init__(self):
self.dict_typedef = 0
self.datatype = []
self.attr_idx = 0
self.key_data_idx = 0
self.data_idx = 0
self.v6 = 0
class HeaderItem(object):
def __init__(self):
self.offset = 0
self.datasize = 0
self.used_datasize = 0
def parse(self, f):
self.offset = ReadUint32(f)
self.datasize = ReadUint32(f)
self.used_datasize = ReadUint32(f)
class AttributeItem(object):
def __init__(self):
self.count = 0
self.a2 = 0
self.data_id = 0
self.b2 = 0
class HashStore(object):
def __init__(self):
self.offset = 0
self.count = 0
def parse(self, f):
self.offset = ReadUint32(f)
self.count = ReadUint32(f)
class LString(object):
def __init__(self):
self.size = 0
self.data = None
self.string = None
def __str__(self):
if self.size == 0:
return 'LString(empty)'
else:
return f'LString(size={self.size}, string="{self.string}")'
def parse(self, f):
self.size = ReadUint16(f)
self.data = f.read(self.size)
self.string = self.data.decode('utf-16')
class AttrWordData(object):
def __init__(self):
self.offset = 0
self.freq = 0
self.aflag = 0
self.i8 = 0
self.p1 = 0
self.iE = 0
def parse(self, f):
self.offset = ReadUint32(f)
self.freq = ReadUint16(f)
self.aflag = ReadUint16(f)
self.i8 = ReadUint32(f)
self.p1 = ReadUint16(f)
self.iE = ReadInt32(f) # always zero
_ = ReadInt32(f) # next offset
''' Dict Structure
key -> attrId attr_store[data]
-> dataId ds[data]
-> keyDataId ds[data]
-> dataId ds[data]
'''
class UserHeader(object):
def __init__(self):
self.p2 = 0
self.p3 = 0
def parse(self, f):
uints = [ReadUint32(f) for _ in range(19)]
self.p2 = uints[14]
self.p3 = uints[15]
class BaseDict(object):
datatype_hash_size = [0, 27, 414, 512, -1, -1, 512, 0]
def __init__(self, corev3=True):
self.attr = None
self.key = None
self.aint = None
self.header_index = None
self.header_attr = None
self.datastore = None
self.ds_base = None
self.datatype_size = None
self.attr_size = None
self.base_hash_size = None
self.key_hash_size = [0] * 10
self.aflag = False
if corev3: # t_usrDictV3Core::t_usrDictV3Core
self.key_hash_size[0] = 500
def init(self):
self.datatype_size = []
self.base_hash_size = []
self.attr_size = [0] * len(self.attr)
for idx_key, key in enumerate(self.key):
size = (key.dict_typedef >> 2) & 4
masked_typedef = key.dict_typedef & 0xFFFFFF8F
# hash item
if self.key_hash_size[idx_key] > 0:
self.base_hash_size.append(self.key_hash_size[idx_key])
else:
self.base_hash_size.append(BaseDict.datatype_hash_size[masked_typedef])
# datatype size
if key.attr_idx < 0:
for i, datatype in enumerate(key.datatype):
if i > 0 or masked_typedef != 4:
size += KeyItem.datatype_size[datatype]
if key.attr_idx == -1:
size += 4
self.datatype_size.append(size)
else:
num_attr = self.attr[key.attr_idx].count
# non-attr data size
num_non_attr = len(key.datatype) - num_attr
for i in range(num_non_attr):
if i > 0 or masked_typedef != 4:
size += KeyItem.datatype_size[key.datatype[i]]
if key.dict_typedef & 0x60 > 0:
size += 4
size += 4
self.datatype_size.append(size)
# attr data size
attr_size = 0
for i in range(num_non_attr, len(key.datatype)):
attr_size += KeyItem.datatype_size[key.datatype[i]]
if (key.dict_typedef & 0x40) == 0:
attr_size += 4
self.attr_size[key.attr_idx] = attr_size
# ???
if self.attr[key.attr_idx].b2 == 0:
self.aflag = True
def GetHashStore(self, index_id, datatype):
if index_id < 0 or datatype > 6 or index_id > len(self.header_index):
assert False
index_offset = self.header_index[index_id].offset
assert index_offset >= 0
size = self.base_hash_size[index_id]
offset = index_offset - 8 * size
assert offset >= 0
return self.ds_base.subview(offset)
def GetIndexStore(self, index_id):
return self.ds_base.subview(self.header_index[index_id].offset)
def GetAttriStore(self, attr_id):
return self.ds_base.subview(self.header_attr[attr_id].offset)
def GetAttriFromIndex(self, index_id, attr_id, offset):
datatype_size = self.datatype_size[index_id]
data_offset = offset + datatype_size * attr_id
return self.GetIndexStore(index_id).subview(data_offset)
def GetAttriFromAttri(self, key_id, offset):
attr_id = self.key[key_id].attr_idx
attri_store = self.GetAttriStore(attr_id).subview(offset)
if attri_store.pos >= len(attri_store.buff):
return None
return attri_store
def GetAllDataWithAttri(self, key_id):
results = []
key = self.key[key_id]
hashstore_base = self.GetHashStore(key_id, key.dict_typedef & 0xFFFFFF8F)
attr_header = self.header_attr[key.attr_idx]
if attr_header.used_datasize == 0:
num_attr = attr_header.data_size
else:
num_attr = attr_header.used_datasize
num_hashstore = self.base_hash_size[key_id]
print(f'base_hash_size: {num_hashstore} num_attr: {num_attr}')
for idx_hashstore in range(num_hashstore):
hashstore = HashStore()
hashstore.parse(hashstore_base)
# print(
# f'hashstore [ offset: {hashstore.offset}, count: {hashstore.count} ]'
# )
for attr_id in range(hashstore.count):
attr_base = self.GetAttriFromIndex(key_id, attr_id, hashstore.offset)
offset = ReadUint32(attr_base.subview(self.datatype_size[key_id] - 4))
# print(f'attr_id: {attr_id} offset: {offset}')
for attr2_id in range(num_attr):
attr2_base = self.GetAttriFromAttri(key_id, offset)
if attr2_base is None:
print(f'attr2 out of range (offset: {offset})')
break
results.append((attr_base, attr2_base))
offset = ReadInt32(attr2_base.subview(self.attr_size[key.attr_idx] - 4))
# print(f'attr2_id: {attr2_id} new offset: {offset}')
if offset == -1:
break
return results
def GetDataStore(self, data_id):
return self.ds_base.subview(self.datastore[data_id].offset)
def GetData(self, data_id, offset):
header = self.datastore[data_id]
assert offset <= header.datasize
if header.used_datasize > 0:
if not offset <= header.used_datasize:
pass
# print(
# f'GetData overflow data_id: {data_id} offset: {offset} '
# f'header [ used: {header.used_datasize} size: {header.datasize} ]'
# )
datastore = self.GetDataStore(data_id)
return datastore.subview(offset)
def GetPys(self, offset):
data_id = self.key[0].key_data_idx
return self.GetData(data_id, offset)
def GetDataIdByAttriId(self, attr_id):
return self.attr[attr_id].data_id
def DecryptWordsEx(lstr_dataview, p1, p2, p3):
lstr = lstr_dataview.subview()
k1 = (p1 + p2) << 2
k2 = (p1 + p3) << 2
xk = (k1 + k2) & 0xffff
n = ReadUint16(lstr) // 2
decwords = b''
for _ in range(n):
shift = p2 % 8
ch = ReadUint16(lstr)
dch = (ch << (16 - (shift % 8)) | (ch >> shift)) & 0xffff
dch ^= xk
decwords += struct.pack('<H', dch)
dec_lstr = LString()
dec_lstr.size = n * 2
dec_lstr.data = decwords
dec_lstr.string = decwords.decode('utf-16')
return dec_lstr
class DataView(object):
def __init__(self, buff, pos=0):
self.buff = buff
self.pos = pos
def read(self, n):
assert n >= 0
end = self.pos + n
assert end <= len(self.buff)
data = self.buff[self.pos:end]
self.pos = end
return data
def len(self):
return len(self.buff) - self.pos
def subview(self, off=0):
return DataView(self.buff, self.pos + off)
def offset_of(self, base):
assert base.buff == self.buff
return self.pos - base.pos
def ReadInt32(b):
return struct.unpack('<i', b.read(4))[0]
def ReadUint32(b):
return struct.unpack('<I', b.read(4))[0]
def ReadUint16(b):
return struct.unpack('<H', b.read(2))[0]
def read_bin(bin_path):
in_path = bin_path
# out_path = sys.argv[2]
with open(in_path, 'rb') as fin:
filedata = fin.read()
size = len(filedata)
f = DataView(filedata)
# File header
file_chksum = ReadUint32(f)
uint_4 = ReadUint32(f)
uint_8 = ReadUint32(f)
uint_12 = ReadUint32(f)
uint_16 = ReadUint32(f)
# print('uint0-16:', file_chksum, uint_4, uint_8, uint_12, uint_16)
config_size = uint_4
chksum = uint_4 + uint_8 + uint_12 + uint_16
assert 0 <= uint_4 <= size
f2 = DataView(filedata, uint_4 + 8)
f_s8 = DataView(filedata, 20)
pos_2 = uint_4 + 8
key_items = []
if uint_8 > 0:
# Parse config
for i in range(uint_8):
key = KeyItem()
key.dict_typedef = ReadUint16(f_s8)
assert key.dict_typedef < 100
num_datatype = ReadUint16(f_s8)
if num_datatype > 0:
for _ in range(num_datatype):
datatype = ReadUint16(f_s8)
key.datatype.append(datatype)
key.attr_idx = ReadUint32(f_s8)
key.key_data_idx = ReadUint32(f_s8)
key.data_idx = ReadUint32(f_s8)
key.v6 = ReadUint32(f_s8)
# ??? key.dict_typedef = ReadUint32(f_s8)
key_items.append(key)
attr_items = []
if uint_12 > 0:
for _ in range(uint_12):
attr = AttributeItem()
attr.count = ReadUint32(f_s8)
attr.a2 = ReadUint32(f_s8)
attr.data_id = ReadUint32(f_s8)
attr.b2 = ReadUint32(f_s8)
attr_items.append(attr)
aint_items = []
if uint_16 > 0:
for _ in range(uint_16):
aint = ReadUint32(f_s8)
aint_items.append(aint)
assert f_s8.pos == f2.pos # all the sec8 data has been processed
usrdict = BaseDict()
usrdict.key = key_items
usrdict.attr = attr_items
usrdict.aint = aint_items
usrdict.init()
header_size = 12 * (len(usrdict.attr) + len(usrdict.aint) + len(usrdict.key)) + 24
b2_version = ReadUint32(f2)
b2_format = ReadUint32(f2)
print(f'version:{b2_version} format:{b2_format}')
total_size = ReadUint32(f2)
USR_DICT_HEADER_SIZE = 4 + 76
assert total_size > 0 and total_size + header_size + config_size + 8 == size - USR_DICT_HEADER_SIZE # assert buff2.1
size3_b2 = ReadUint32(f2)
size4_b2 = ReadUint32(f2)
size5_b2 = ReadUint32(f2)
print('header size:', total_size, size3_b2, size4_b2, size5_b2)
header_items_index = []
for _ in range(size3_b2):
header = HeaderItem()
header.parse(f2)
chksum += header.offset + header.datasize + header.used_datasize
header_items_index.append(header)
usrdict.header_index = header_items_index
header_items_attr = []
for _ in range(size4_b2):
header = HeaderItem()
header.parse(f2)
chksum += header.offset + header.datasize + header.used_datasize
header_items_attr.append(header)
usrdict.header_attr = header_items_attr
datastore_items = []
for _ in range(size5_b2):
header = HeaderItem()
header.parse(f2)
chksum += header.offset + header.datasize + header.used_datasize
datastore_items.append(header)
usrdict.datastore = datastore_items
usrdict.ds_base | |
<reponame>barryCrunch/cloudvision-python
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
import notification_pb2 as notification__pb2
import router_pb2 as router__pb2
class RouterV1Stub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Publish = channel.unary_unary(
'/RouterV1/Publish',
request_serializer=router__pb2.PublishRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Subscribe = channel.unary_stream(
'/RouterV1/Subscribe',
request_serializer=router__pb2.SubscribeRequest.SerializeToString,
response_deserializer=notification__pb2.NotificationBatch.FromString,
)
self.Get = channel.unary_stream(
'/RouterV1/Get',
request_serializer=router__pb2.GetRequest.SerializeToString,
response_deserializer=notification__pb2.NotificationBatch.FromString,
)
self.GetAndSubscribe = channel.unary_stream(
'/RouterV1/GetAndSubscribe',
request_serializer=router__pb2.GetAndSubscribeRequest.SerializeToString,
response_deserializer=notification__pb2.NotificationBatch.FromString,
)
self.GetDatasets = channel.unary_stream(
'/RouterV1/GetDatasets',
request_serializer=router__pb2.DatasetsRequest.SerializeToString,
response_deserializer=router__pb2.DatasetsResponse.FromString,
)
class RouterV1Servicer(object):
# missing associated documentation comment in .proto file
pass
def Publish(self, request, context):
"""Publish is used to send notifications to CloudVision.
They will be saved into the storage and sent to all
the clients subscribing to the same device/path.
* Publish guarantees atomicity of the data saved per {timestamp+path+key}.
For Notification => For one Notification having multiple keys,
each key is ensured to be saved atomically
but atomicity is not guaranteed for the entire notification.
For NotificationBatch => if Notif[1] and Notif[5]
both have updates for a {timestamp+path+key}
either the update of Notif[1] will be saved, or the update of Notif[5] will be saved.
The value will be one or the other, not a corrupted combination of both requests.
* There is no guarantee for write order within a single publish request.
When sending multiple notifications where multiple notification will have
the same timestamp, path and keys,
Publish does not guarantee that Notif[1] will be processed before Notif[5]
This means that for two notifications in the same Publish call having the
same {timestamp+path+key}, the result is undefined and will randomly vary
(i.e. the first notif data will be saved, or the second one).
The client must send two synchronous Publish requests to guarantee
the write order at which the requests are processed.
* Publish is asynchronous by default:
When the call to Publish ends without error, it means the data has been
correctly received by CloudVision but not stored yet.
So, if a "get" call is done right after the Publish call, the get might
not return the data just published.
When the "sync" field is set to true in PublishRequest, the Publish
will be synchronous:
When the call to Publish ends without error, it means the data has been
correctly received AND stored by CloudVision.
So, if a "get" call is done right after the synchronous Publish call, the get will
return the data just published (unless someone else stored more recent data of course).
* Client-side and Server-side timestamping:
The notification object has a timestamp that can be populated by the client.
In case the Client sends a notification with a "null" timestamp as the
Notification.timestamp field, the server will populate the timestamp with
the current time of the node with the server process is running.
This "current time" will be queried once at the beginning of the Publish request
and will be used as the Notification.timestamp for all the notification having this field
as null.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Subscribe(self, request, context):
"""Subscribe allows the client to request a live stream of updates
(V1: either based on regexp or exact match, V2: based on exact match)
There is no order guarantee for batches received by subscribers.
It means that two batches A and B published synchronously (B is published after A)
the subscribers can receive batch A first or B second, OR batch B first and A second.
This is also true for notifications within a batch.
The backend can decide to split a batch and reorder notifications so subscribers
might receive notifications within a batch in a different order that they were published.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Get is used to request notifications for a given path over a specified time range.
Wildcards are supported with Get requests, but when given a range of time the server
will resolve all wildcard paths at the starting timestamp of the given range, so any
pointers and/or paths that are created after the given start timestamp will not be
accounted for during wildcard resolution.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAndSubscribe(self, request, context):
"""GetAndSubscribe allows the client to issue one request to do both Get and Subscribe requests.
The server will first send a mix of subscribe and get batches, and there's no distinction
between which batches are subscribe or get batches. Then the server will send a sync signal
signaling that the Get stream has finished. After that, server will stream out only subscribe
batches. There's no order guarantee for batches received by client.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetDatasets(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RouterV1Servicer_to_server(servicer, server):
rpc_method_handlers = {
'Publish': grpc.unary_unary_rpc_method_handler(
servicer.Publish,
request_deserializer=router__pb2.PublishRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Subscribe': grpc.unary_stream_rpc_method_handler(
servicer.Subscribe,
request_deserializer=router__pb2.SubscribeRequest.FromString,
response_serializer=notification__pb2.NotificationBatch.SerializeToString,
),
'Get': grpc.unary_stream_rpc_method_handler(
servicer.Get,
request_deserializer=router__pb2.GetRequest.FromString,
response_serializer=notification__pb2.NotificationBatch.SerializeToString,
),
'GetAndSubscribe': grpc.unary_stream_rpc_method_handler(
servicer.GetAndSubscribe,
request_deserializer=router__pb2.GetAndSubscribeRequest.FromString,
response_serializer=notification__pb2.NotificationBatch.SerializeToString,
),
'GetDatasets': grpc.unary_stream_rpc_method_handler(
servicer.GetDatasets,
request_deserializer=router__pb2.DatasetsRequest.FromString,
response_serializer=router__pb2.DatasetsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'RouterV1', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class AuthStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateDataset = channel.unary_unary(
'/Auth/CreateDataset',
request_serializer=router__pb2.CreateDatasetRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.SetPermission = channel.unary_unary(
'/Auth/SetPermission',
request_serializer=router__pb2.SetPermissionRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetPermissionSet = channel.unary_stream(
'/Auth/GetPermissionSet',
request_serializer=router__pb2.GetRequest.SerializeToString,
response_deserializer=router__pb2.PermissionSet.FromString,
)
self.SetPassword = channel.unary_unary(
'/Auth/SetPassword',
request_serializer=router__pb2.SetPasswordRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateSession = channel.unary_stream(
'/Auth/CreateSession',
request_serializer=router__pb2.CreateSessionRequest.SerializeToString,
response_deserializer=router__pb2.CreateSessionResponse.FromString,
)
class AuthServicer(object):
# missing associated documentation comment in .proto file
pass
def CreateDataset(self, request, context):
"""CreateDataset from a given Dataset wrapped in a CreateDatasetRequest
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetPermission(self, request, context):
"""SetPermission sets a permission for a dataset using a SetPermissionRequest.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPermissionSet(self, request, context):
"""GetPermissionSet returns the set of all permissions present for the datasets specified
in the 'query'(s) of the GetRequest.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetPassword(self, request, context):
"""SetPassword sets the password for a user.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateSession(self, request, context):
"""CreateSession creates session for user
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AuthServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateDataset': grpc.unary_unary_rpc_method_handler(
servicer.CreateDataset,
request_deserializer=router__pb2.CreateDatasetRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'SetPermission': grpc.unary_unary_rpc_method_handler(
servicer.SetPermission,
request_deserializer=router__pb2.SetPermissionRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetPermissionSet': grpc.unary_stream_rpc_method_handler(
servicer.GetPermissionSet,
request_deserializer=router__pb2.GetRequest.FromString,
response_serializer=router__pb2.PermissionSet.SerializeToString,
),
'SetPassword': grpc.unary_unary_rpc_method_handler(
servicer.SetPassword,
request_deserializer=router__pb2.SetPasswordRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateSession': grpc.unary_stream_rpc_method_handler(
servicer.CreateSession,
request_deserializer=router__pb2.CreateSessionRequest.FromString,
response_serializer=router__pb2.CreateSessionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Auth', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class AlphaStub(object):
"""Alpha services are services which are not supported and
can be added/removed/changed anytime, without notice.
Clients should not user them and build applications on top of this service
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Search = channel.unary_stream(
'/Alpha/Search',
request_serializer=router__pb2.SearchRequest.SerializeToString,
response_deserializer=notification__pb2.NotificationBatch.FromString,
)
self.SearchSubscribe = channel.unary_stream(
'/Alpha/SearchSubscribe',
request_serializer=router__pb2.SearchRequest.SerializeToString,
response_deserializer=notification__pb2.NotificationBatch.FromString,
)
class AlphaServicer(object):
"""Alpha services are services which are not supported and
can be added/removed/changed anytime, without notice.
Clients should not user them and build applications on top of this service
"""
def Search(self, request, context):
"""you know, for search...
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SearchSubscribe(self, request, context):
"""SearchSubscribe allows the client to request a live stream of updates
based on client search request
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AlphaServicer_to_server(servicer, server):
rpc_method_handlers = {
'Search': grpc.unary_stream_rpc_method_handler(
servicer.Search,
request_deserializer=router__pb2.SearchRequest.FromString,
response_serializer=notification__pb2.NotificationBatch.SerializeToString,
),
'SearchSubscribe': grpc.unary_stream_rpc_method_handler(
servicer.SearchSubscribe,
request_deserializer=router__pb2.SearchRequest.FromString,
response_serializer=notification__pb2.NotificationBatch.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Alpha', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class ClusterStub(object):
"""Cluster service gives some descriptions about the cluster where the service
is running.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ClusterInfo = channel.unary_stream(
'/Cluster/ClusterInfo',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=router__pb2.ClusterDescription.FromString,
)
class ClusterServicer(object):
"""Cluster service gives some descriptions about the cluster where the service
is running.
"""
def ClusterInfo(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ClusterServicer_to_server(servicer, server):
rpc_method_handlers = | |
+ 59392 * uk_120
+ 217152 * uk_121
+ 417600 * uk_122
+ 7424 * uk_123
+ 793962 * uk_124
+ 1526850 * uk_125
+ 27144 * uk_126
+ 2936250 * uk_127
+ 52200 * uk_128
+ 928 * uk_129
+ 1623008 * uk_13
+ 64 * uk_130
+ 512 * uk_131
+ 1872 * uk_132
+ 3600 * uk_133
+ 64 * uk_134
+ 4096 * uk_135
+ 14976 * uk_136
+ 28800 * uk_137
+ 512 * uk_138
+ 54756 * uk_139
+ 5934123 * uk_14
+ 105300 * uk_140
+ 1872 * uk_141
+ 202500 * uk_142
+ 3600 * uk_143
+ 64 * uk_144
+ 32768 * uk_145
+ 119808 * uk_146
+ 230400 * uk_147
+ 4096 * uk_148
+ 438048 * uk_149
+ 11411775 * uk_15
+ 842400 * uk_150
+ 14976 * uk_151
+ 1620000 * uk_152
+ 28800 * uk_153
+ 512 * uk_154
+ 1601613 * uk_155
+ 3080025 * uk_156
+ 54756 * uk_157
+ 5923125 * uk_158
+ 105300 * uk_159
+ 202876 * uk_16
+ 1872 * uk_160
+ 11390625 * uk_161
+ 202500 * uk_162
+ 3600 * uk_163
+ 64 * uk_164
+ 3025 * uk_17
+ 3190 * uk_18
+ 220 * uk_19
+ 55 * uk_2
+ 1760 * uk_20
+ 6435 * uk_21
+ 12375 * uk_22
+ 220 * uk_23
+ 3364 * uk_24
+ 232 * uk_25
+ 1856 * uk_26
+ 6786 * uk_27
+ 13050 * uk_28
+ 232 * uk_29
+ 58 * uk_3
+ 16 * uk_30
+ 128 * uk_31
+ 468 * uk_32
+ 900 * uk_33
+ 16 * uk_34
+ 1024 * uk_35
+ 3744 * uk_36
+ 7200 * uk_37
+ 128 * uk_38
+ 13689 * uk_39
+ 4 * uk_4
+ 26325 * uk_40
+ 468 * uk_41
+ 50625 * uk_42
+ 900 * uk_43
+ 16 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 149200183738 * uk_47
+ 10289667844 * uk_48
+ 82317342752 * uk_49
+ 32 * uk_5
+ 300972784437 * uk_50
+ 578793816225 * uk_51
+ 10289667844 * uk_52
+ 153424975 * uk_53
+ 161793610 * uk_54
+ 11158180 * uk_55
+ 89265440 * uk_56
+ 326376765 * uk_57
+ 627647625 * uk_58
+ 11158180 * uk_59
+ 117 * uk_6
+ 170618716 * uk_60
+ 11766808 * uk_61
+ 94134464 * uk_62
+ 344179134 * uk_63
+ 661882950 * uk_64
+ 11766808 * uk_65
+ 811504 * uk_66
+ 6492032 * uk_67
+ 23736492 * uk_68
+ 45647100 * uk_69
+ 225 * uk_7
+ 811504 * uk_70
+ 51936256 * uk_71
+ 189891936 * uk_72
+ 365176800 * uk_73
+ 6492032 * uk_74
+ 694292391 * uk_75
+ 1335177675 * uk_76
+ 23736492 * uk_77
+ 2567649375 * uk_78
+ 45647100 * uk_79
+ 4 * uk_8
+ 811504 * uk_80
+ 166375 * uk_81
+ 175450 * uk_82
+ 12100 * uk_83
+ 96800 * uk_84
+ 353925 * uk_85
+ 680625 * uk_86
+ 12100 * uk_87
+ 185020 * uk_88
+ 12760 * uk_89
+ 2572416961 * uk_9
+ 102080 * uk_90
+ 373230 * uk_91
+ 717750 * uk_92
+ 12760 * uk_93
+ 880 * uk_94
+ 7040 * uk_95
+ 25740 * uk_96
+ 49500 * uk_97
+ 880 * uk_98
+ 56320 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 183260 * uk_100
+ 346500 * uk_101
+ 89320 * uk_102
+ 778855 * uk_103
+ 1472625 * uk_104
+ 379610 * uk_105
+ 2784375 * uk_106
+ 717750 * uk_107
+ 185020 * uk_108
+ 15625 * uk_109
+ 1267975 * uk_11
+ 36250 * uk_110
+ 17500 * uk_111
+ 74375 * uk_112
+ 140625 * uk_113
+ 36250 * uk_114
+ 84100 * uk_115
+ 40600 * uk_116
+ 172550 * uk_117
+ 326250 * uk_118
+ 84100 * uk_119
+ 2941702 * uk_12
+ 19600 * uk_120
+ 83300 * uk_121
+ 157500 * uk_122
+ 40600 * uk_123
+ 354025 * uk_124
+ 669375 * uk_125
+ 172550 * uk_126
+ 1265625 * uk_127
+ 326250 * uk_128
+ 84100 * uk_129
+ 1420132 * uk_13
+ 195112 * uk_130
+ 94192 * uk_131
+ 400316 * uk_132
+ 756900 * uk_133
+ 195112 * uk_134
+ 45472 * uk_135
+ 193256 * uk_136
+ 365400 * uk_137
+ 94192 * uk_138
+ 821338 * uk_139
+ 6035561 * uk_14
+ 1552950 * uk_140
+ 400316 * uk_141
+ 2936250 * uk_142
+ 756900 * uk_143
+ 195112 * uk_144
+ 21952 * uk_145
+ 93296 * uk_146
+ 176400 * uk_147
+ 45472 * uk_148
+ 396508 * uk_149
+ 11411775 * uk_15
+ 749700 * uk_150
+ 193256 * uk_151
+ 1417500 * uk_152
+ 365400 * uk_153
+ 94192 * uk_154
+ 1685159 * uk_155
+ 3186225 * uk_156
+ 821338 * uk_157
+ 6024375 * uk_158
+ 1552950 * uk_159
+ 2941702 * uk_16
+ 400316 * uk_160
+ 11390625 * uk_161
+ 2936250 * uk_162
+ 756900 * uk_163
+ 195112 * uk_164
+ 3025 * uk_17
+ 1375 * uk_18
+ 3190 * uk_19
+ 55 * uk_2
+ 1540 * uk_20
+ 6545 * uk_21
+ 12375 * uk_22
+ 3190 * uk_23
+ 625 * uk_24
+ 1450 * uk_25
+ 700 * uk_26
+ 2975 * uk_27
+ 5625 * uk_28
+ 1450 * uk_29
+ 25 * uk_3
+ 3364 * uk_30
+ 1624 * uk_31
+ 6902 * uk_32
+ 13050 * uk_33
+ 3364 * uk_34
+ 784 * uk_35
+ 3332 * uk_36
+ 6300 * uk_37
+ 1624 * uk_38
+ 14161 * uk_39
+ 58 * uk_4
+ 26775 * uk_40
+ 6902 * uk_41
+ 50625 * uk_42
+ 13050 * uk_43
+ 3364 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 64310424025 * uk_47
+ 149200183738 * uk_48
+ 72027674908 * uk_49
+ 28 * uk_5
+ 306117618359 * uk_50
+ 578793816225 * uk_51
+ 149200183738 * uk_52
+ 153424975 * uk_53
+ 69738625 * uk_54
+ 161793610 * uk_55
+ 78107260 * uk_56
+ 331955855 * uk_57
+ 627647625 * uk_58
+ 161793610 * uk_59
+ 119 * uk_6
+ 31699375 * uk_60
+ 73542550 * uk_61
+ 35503300 * uk_62
+ 150889025 * uk_63
+ 285294375 * uk_64
+ 73542550 * uk_65
+ 170618716 * uk_66
+ 82367656 * uk_67
+ 350062538 * uk_68
+ 661882950 * uk_69
+ 225 * uk_7
+ 170618716 * uk_70
+ 39763696 * uk_71
+ 168995708 * uk_72
+ 319529700 * uk_73
+ 82367656 * uk_74
+ 718231759 * uk_75
+ 1358001225 * uk_76
+ 350062538 * uk_77
+ 2567649375 * uk_78
+ 661882950 * uk_79
+ 58 * uk_8
+ 170618716 * uk_80
+ 166375 * uk_81
+ 75625 * uk_82
+ 175450 * uk_83
+ 84700 * uk_84
+ 359975 * uk_85
+ 680625 * uk_86
+ 175450 * uk_87
+ 34375 * uk_88
+ 79750 * uk_89
+ 2572416961 * uk_9
+ 38500 * uk_90
+ 163625 * uk_91
+ 309375 * uk_92
+ 79750 * uk_93
+ 185020 * uk_94
+ 89320 * uk_95
+ 379610 * uk_96
+ 717750 * uk_97
+ 185020 * uk_98
+ 43120 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 186340 * uk_100
+ 346500 * uk_101
+ 38500 * uk_102
+ 805255 * uk_103
+ 1497375 * uk_104
+ 166375 * uk_105
+ 2784375 * uk_106
+ 309375 * uk_107
+ 34375 * uk_108
+ 8000 * uk_109
+ 1014380 * uk_11
+ 10000 * uk_110
+ 11200 * uk_111
+ 48400 | |
t5_v
station["time_series_dt"] = dt
station["time_series_num_samples"] = num_samples
station["nyquist"] = nyquist
station["luf"] = luf
station["huf"] = huf
station["ufb"] = ufb
station["lup"] = lup
station["hup"] = hup
station["upb"] = upb
station["pga_h1"] = pga1
station["pga_h2"] = pga2
station["pga_v"] = pga3
station["pgv_h1"] = pgv1
station["pgv_h2"] = pgv2
station["pgv_v"] = pgv3
station["rotdnn_fractile"] = "PSA_RotD50"
station["damping"] = 0.05
station["arias_dur_5_75"] = "-999"
station["arias_dur_5_95"] = "-999"
station["arias_total"] = "-999"
def collect_rd50_values(station, args):
"""
Collect RotD50 values for all periods
"""
rd50_file = os.path.join(args.top_level_outdir, station["rd50_file_name"])
rd50_vertical_file = os.path.join(args.top_level_outdir,
station["rd50_vertical_file_name"])
# Start with an empty list
rd50_periods = []
rd50_psa_h1 = []
rd50_psa_h2 = []
rd50_psa_v = []
rd50_psa_rd50 = []
# Read horizontal psa file
input_file = open(rd50_file, 'r')
for line in input_file:
line = line.strip()
if not line:
continue
if line.startswith("#") or line.startswith("%"):
continue
tokens = [float(token) for token in line.split()]
rd50_periods.append(tokens[0])
rd50_psa_h1.append(tokens[1])
rd50_psa_h2.append(tokens[2])
rd50_psa_rd50.append(tokens[3])
# Close file
input_file.close()
# Read vertical psa file
input_file = open(rd50_vertical_file, 'r')
for line in input_file:
line = line.strip()
if not line:
continue
if line.startswith("#") or line.startswith("%"):
continue
tokens = [float(token) for token in line.split()]
rd50_psa_v.append(tokens[1])
# Close file
input_file.close()
# All done!
if "rd50_periods" not in args:
args.rd50_periods = rd50_periods
station["psa_h1"] = rd50_psa_h1
station["psa_h2"] = rd50_psa_h2
station["psa_v"] = rd50_psa_v
station["rd50"] = rd50_psa_rd50
def collect_rd100_values(station, args):
"""
Collect RotD100 values for all periods
"""
rd100_file = os.path.join(args.top_level_outdir, station["rd100_file_name"])
# Skip if RD100 file doesn't exist
if not os.path.isfile(rd100_file):
# RotD100 file not available
station["rd100"] = None
return
# Start with an empty list
rd100_psa_rd100 = []
# Read file
input_file = open(rd100_file, 'r')
for line in input_file:
line = line.strip()
if not line:
continue
if line.startswith("#") or line.startswith("%"):
continue
tokens = [float(token) for token in line.split()]
rd100_psa_rd100.append(tokens[4])
# Close file
input_file.close()
# All done!
station["rd100"] = rd100_psa_rd100
def collect_station_params(site, station, src_files,
args, realization, vs_30):
"""
Collects parameters for one station
"""
station["sim_station_name"] = site.scode
station["sim_station_latitude"] = site.lat
station["sim_station_longitude"] = site.lon
station["sim_station_elevation"] = -999.0
if isinstance(vs_30, int):
station["target_station_vs30"] = vs_30
if vs_30 > 1500:
site_class = "A"
elif vs_30 > 760:
site_class = "B"
elif vs_30 > 360:
site_class = "C"
elif vs_30 > 180:
site_class = "D"
else:
site_class = "E"
else:
station["target_station_vs30"] = "-888"
site_class = "-888"
station["target_station_nehrp_class"] = site_class
(station["rrup"],
station["rjb"],
station["rx"]) = calculate_distances(src_files, site)
if args.general_method in ["exsim"]:
station["components"] = 1
else:
station["components"] = 3
station["vel_file_name"] = os.path.join(realization,
"%s.%s.vel.bbp" %
(realization, site.scode))
station["acc_file_name"] = os.path.join(realization,
"%s.%s.acc.bbp" %
(realization, site.scode))
station["rd50_file_name"] = os.path.join(realization,
"%s.%s.rd50" %
(realization, site.scode))
station["rd50_vertical_file_name"] = os.path.join(realization,
"%s.%s.rd50.vertical" %
(realization, site.scode))
station["rd100_file_name"] = os.path.join(realization,
"%s.%s.rd100" %
(realization, site.scode))
station["h1_azimuth"] = 0
station["h2_azimuth"] = 90
station["v_orientation"] = "UP"
calculate_timeseries_param(station, site, args, realization)
# Copy files, as needed
if args.copy_timeseries:
shutil.copy2(os.path.join(args.top_level_outdir, station["acc_file_name"]),
os.path.join(args.output_dir, station["acc_file_name"]))
def collect_realization_params(args, realization):
"""
Collects parameters for one realization
"""
indir = os.path.join(args.top_level_indir, realization)
outdir = os.path.join(args.top_level_outdir, realization)
src_files = glob.glob("%s/*.src" % (indir))
stl_file = glob.glob("%s/*.stl" % (indir))[0]
data = {}
# Compile data from SRC file(s)
data["num_src"] = len(src_files)
# Save info in args too for first realization
if "num_src" not in args:
args.num_src = len(src_files)
for i, src_file in zip(range(1, len(src_files) + 1), src_files):
src_index = "bbp_src_%d" % (i)
src_keys = bband_utils.parse_src_file(src_file)
src_keys["mechanism"] = calculate_mechanism(src_keys["rake"])
data[src_index] = src_keys
# Combine SRC information
data["segments_length"] = data["bbp_src_1"]["fault_length"]
data["segments_width"] = data["bbp_src_1"]["fault_width"]
data["segments_ztor"] = data["bbp_src_1"]["depth_to_top"]
data["segments_strike"] = data["bbp_src_1"]["strike"]
data["segments_rake"] = data["bbp_src_1"]["rake"]
data["segments_dip"] = data["bbp_src_1"]["dip"]
data["total_length"] = float(data["bbp_src_1"]["fault_length"])
data["average_strike"] = [float(data["bbp_src_1"]["strike"])]
data["average_rake"] = [float(data["bbp_src_1"]["rake"])]
data["average_dip"] = [float(data["bbp_src_1"]["dip"])]
data["average_width"] = [float(data["bbp_src_1"]["fault_width"])]
data["average_ztor"] = [float(data["bbp_src_1"]["depth_to_top"])]
for i in range(2, len(src_files) + 1):
src_index = "bbp_src_%d" % (i)
data["segments_length"] = "%s,%s" % (data["segments_length"],
data[src_index]["fault_length"])
data["segments_width"] = "%s,%s" % (data["segments_width"],
data[src_index]["fault_width"])
data["segments_ztor"] = "%s,%s" % (data["segments_ztor"],
data[src_index]["depth_to_top"])
data["segments_strike"] = "%s,%s" % (data["segments_strike"],
data[src_index]["strike"])
data["segments_rake"] = "%s,%s" % (data["segments_rake"],
data[src_index]["rake"])
data["segments_dip"] = "%s,%s" % (data["segments_dip"],
data[src_index]["dip"])
data["total_length"] = (data["total_length"] +
float(data[src_index]["fault_length"]))
data["average_strike"].append(data[src_index]["strike"])
data["average_rake"].append(data[src_index]["rake"])
data["average_dip"].append(data[src_index]["dip"])
data["average_width"].append(data[src_index]["fault_width"])
data["average_ztor"].append(data[src_index]["depth_to_top"])
data["average_strike"] = np.average(data["average_strike"])
data["average_rake"] = np.average(data["average_rake"])
data["average_dip"] = np.average(data["average_dip"])
data["average_width"] = np.average(data["average_width"])
data["average_ztor"] = np.average(data["average_ztor"])
data["average_mechanism"] = calculate_mechanism(data["average_rake"])
# Get velocity model data
html_file = glob.glob("%s/*.html" % (outdir))[0]
data["vmodel_name"] = get_vmodel_from_html(html_file)
vel_obj = velocity_models.get_velocity_model_by_name(data["vmodel_name"])
if vel_obj is None:
print("ERROR: Cannot find velocity model %s!" % (data["vmodel_name"]))
sys.exit(-1)
if args.general_method in ["gp", "sdsu", "song"]:
vmodel_params = vel_obj.get_codebase_params('gp')
vmodel_file = vel_obj.get_velocity_model('gp')
data["gf_name"] = vmodel_params['GF_NAME']
data["vs_30"] = calculate_vs30(vmodel_file)
data["gf_dt"] = float(vmodel_params['GF_DT'])
elif args.general_method in ["ucsb"]:
vmodel_params = vel_obj.get_codebase_params('ucsb')
vmodel_file = vel_obj.get_velocity_model('ucsb')
data["gf_name"] = vmodel_params['GREEN_SOIL']
data["vs_30"] = "-999"
data["gf_dt"] = float(vmodel_params['GF_DT'])
else:
data["gf_name"] = "-888"
data["vs_30"] = "-888"
data["gf_dt"] = "-888"
# Parse STL file
slo = StationList(stl_file)
site_list = slo.getStationList()
station_names = []
for site in site_list:
station_names.append(site.scode)
data["station_names"] = station_names
stations = {}
for site in site_list:
stations[site.scode] = {}
if args.bbp_software_info_site == "None":
vs_30 = data["vs_30"]
elif site.vs30 is None:
vs_30 = data["vs_30"]
else:
vs_30 = site.vs30
collect_station_params(site, stations[site.scode], src_files,
args, realization, vs_30)
collect_rd50_values(stations[site.scode], args)
collect_rd100_values(stations[site.scode], args)
# Save data
data["stations"] = stations
# Save realization data
args.data[realization] = data
def write_output_data(args):
"""
This function writes all output to the flat file
"""
# Output filenames
output_filename_prefix = args.prefix
output_filename_suffix = args.suffix
if args.suffix:
output_filename_suffix = "_%s" % (args.suffix)
output_filename_date = datetime.date.today().strftime("%y%m%d")
output_filename_extension = ".f01.csv"
output_main_filename = os.path.join(args.output_dir,
"%s_%s%s_Main%s" %
(output_filename_prefix,
output_filename_date,
output_filename_suffix,
output_filename_extension))
output_psa_h1_filename = os.path.join(args.output_dir,
"%s_%s%s_PSA_H1_D0pt05%s" %
(output_filename_prefix,
output_filename_date,
output_filename_suffix,
output_filename_extension))
output_psa_h2_filename = os.path.join(args.output_dir,
"%s_%s%s_PSA_H2_D0pt05%s" %
(output_filename_prefix,
output_filename_date,
output_filename_suffix,
output_filename_extension))
output_psa_v_filename = os.path.join(args.output_dir,
"%s_%s%s_PSA_V_D0pt05%s" %
(output_filename_prefix,
output_filename_date,
output_filename_suffix,
output_filename_extension))
output_psa_rd50_filename = os.path.join(args.output_dir,
"%s_%s%s_PSA_RotD50_D0pt05%s" %
(output_filename_prefix,
output_filename_date,
output_filename_suffix,
output_filename_extension))
output_psa_rd100_filename = os.path.join(args.output_dir,
"%s_%s%s_PSA_RotD100_D0pt05%s" %
(output_filename_prefix,
output_filename_date,
output_filename_suffix,
output_filename_extension))
output_psa_period_table_filename = os.path.join(args.output_dir,
"%s_%s%s_PSA_Period_Table%s" %
(output_filename_prefix,
output_filename_date,
output_filename_suffix,
output_filename_extension))
# Create header
header = ("acc_file_name,bbp_software_version,sim_simulation_workflow,"
"sim_method_short_name,sim_site_effects,"
"eq_id,eq_magnitude,"
"realization,number_of_segments")
header = ("%s,segment_lengths,segment_widths,segment_ztors,"
"segment_strikes,segment_rakes,segment_dips,"
"total_length,average_strike,average_rake,"
"average_dip,average_width,average_ztor,"
"mechanism_based_on_average_rake" % (header))
header = ("%s,vmodel_name,gf_name,gf_dt,vmodel_vs30" % (header))
header = ("%s,sim_station_name,sim_station_latitude,"
"sim_station_longitude,sim_station_elevation,"
"target_station_vs30,"
"target_station_nehrp_class,station_rrup,station_rjb,station_rx,"
"num_components,h1_azimuth,h2_azimuth,v_orientation,"
"dt,num_samples,nyquist,luf,huf,ufb,lup,hup,upb,"
"pga_h1,pga_h2,pga_v,pgv_h1,pgv_h2,pgv_v" % (header))
header = ("%s,ai_h1,ai_h2,ai_v,aid5_75_h1,aid5_75_h2,aid5_75_v,"
"aid5_95_h1,aid5_95_h2,aid5_95_v" % (header))
header_psa = "acc_file_name,intensity_measure,damping"
header_periods = "T%dp%03d" % (int(args.rd50_periods[0]),
args.rd50_periods[0] % 1 * 1000)
for period in args.rd50_periods[1:]:
header_periods = ("%s,T%dp%03d" % (header_periods,
int(period),
(period % 1 * 1000)))
header_psa = "%s,%s" % (header_psa, header_periods)
# Create first (common) part of the output
sim_params = ('"%s","%s","%s","%s","%s",%s' %
(args.bbp_software_info_version,
"/".join(args.bbp_software_info_modules),
args.general_method,
args.bbp_software_info_site,
args.general_eqid,
str(args.general_magnitude)))
# Output PSA period table
output_file = open(output_psa_period_table_filename, 'w')
output_file.write("%s\n" % (header_periods))
output_file.write("%.3f" % (args.rd50_periods[0]))
for period in args.rd50_periods[1:]:
output_file.write(",%.3f" % (period))
output_file.write("\n")
output_file.close()
# Output main data file
output_file = open(output_main_filename, 'w')
# Print header
output_file.write('%s\n' % (header))
for realization in args.realizations:
realization_data = args.data[realization]
station_names = realization_data["station_names"]
realization_params = ('%s,%d' % (realization,
realization_data["num_src"]))
realization_params = ('%s,"%s","%s","%s","%s","%s","%s",%s,%s,%s,'
'%s,%s,%s,"%s"' %
(realization_params,
realization_data["segments_length"],
realization_data["segments_width"],
realization_data["segments_ztor"],
realization_data["segments_strike"],
realization_data["segments_rake"],
realization_data["segments_dip"],
realization_data["total_length"],
realization_data["average_strike"],
realization_data["average_rake"],
realization_data["average_dip"],
realization_data["average_width"],
realization_data["average_ztor"],
realization_data["average_mechanism"]))
realization_params = ('%s,"%s","%s",%.2f,%s' %
(realization_params,
realization_data["vmodel_name"],
realization_data["gf_name"],
realization_data["gf_dt"],
realization_data["vs_30"]))
for station in station_names:
st_data = realization_data["stations"][station]
station_params = ('%s,%s,%s,%.1f,%s,"%s",%s,%s,%s,%s,'
'%s,%s,"%s",%s,%s,%s,%s,%s,%s,'
'%s,%s,%s,%s,%s,%s,%s,%s,%s' %
(station,
st_data["sim_station_latitude"],
st_data["sim_station_longitude"],
st_data["sim_station_elevation"],
st_data["target_station_vs30"],
st_data["target_station_nehrp_class"],
st_data["rrup"],
st_data["rjb"],
st_data["rx"],
st_data["components"],
st_data["h1_azimuth"],
st_data["h2_azimuth"],
st_data["v_orientation"],
st_data["time_series_dt"],
st_data["time_series_num_samples"],
st_data["nyquist"],
st_data["luf"],
st_data["huf"],
st_data["ufb"],
st_data["lup"],
st_data["hup"],
st_data["upb"],
st_data["pga_h1"],
st_data["pga_h2"],
st_data["pga_v"],
st_data["pgv_h1"],
st_data["pgv_h2"],
st_data["pgv_v"]))
station_params = ('%s,%.2f,%.2f,%.2f,'
'%.2f,%.2f,%.2f,'
'%.2f,%.2f,%.2f' % (station_params,
st_data["ai_h1"],
st_data["ai_h2"],
st_data["ai_v"],
st_data["ad5_75_h1"],
st_data["ad5_75_h2"],
st_data["ad5_75_v"],
st_data["ad5_95_h1"],
st_data["ad5_95_h2"],
st_data["ad5_95_v"]))
output_file.write('"%s",%s,%s,%s\n' %
(st_data["acc_file_name"], sim_params,
realization_params, station_params))
# All done
output_file.close()
# Write PSA files
psa_files = [output_psa_h1_filename, output_psa_h2_filename,
output_psa_v_filename, output_psa_rd50_filename,
output_psa_rd100_filename]
psa_measurements = ["psa_h1", "psa_h2", "psa_v",
"rd50", "rd100"]
psa_meas_labels = ["PSA_H1", "PSA_H2", "PSA_V",
"PSA_RotD50", "PSA_RotD100"]
for output_filename, psa_data, psa_label in zip(psa_files,
psa_measurements,
psa_meas_labels):
# Output psa data file
output_file = open(output_filename, 'w')
# Print header
output_file.write("%s\n" % (header_psa))
for realization in args.realizations:
realization_data = args.data[realization]
station_names = realization_data["station_names"]
for station in station_names:
st_data = realization_data["stations"][station]
if st_data[psa_data] is None:
continue
psa_params = '"%s","%s",%.2f' % (st_data["acc_file_name"],
psa_label,
st_data["damping"])
for period in st_data[psa_data]:
psa_params = ('%s,%.7f' % (psa_params, period))
# Write output
output_file.write('%s\n' % (psa_params))
# All done
output_file.close()
def create_flat_file_from_cluster():
"""
Create a flat file from a cluster simulation
"""
# Get all we need from the command-line
args = parse_arguments()
# Figure out top-level directories
args.top_level_indir = os.path.join(args.input_dir, "Sims", "indata")
args.top_level_outdir = os.path.join(args.input_dir, "Sims", "outdata")
args.realizations = sorted(os.listdir(args.top_level_indir))
args.data = {}
# Create top-level output directory
bband_utils.mkdirs([args.output_dir], print_cmd=False)
# Collect simulation-wide parameters
collect_simulation_params(args)
# Collect parameters for each realization
for realization in args.realizations:
print("==> | |
to a list of spike trains in data. These calls
do not include the statistical testing (for details see the documentation
of spade.spade())
>>> import elephant.spade
>>> import quantities as pq
>>> binsize = 3 * pq.ms # time resolution used to discretize the data
>>> winlen = 10 # maximal pattern length in bins (i.e., sliding window)
>>> result_spade = spade.spade(data, binsize, winlen)
References
----------
[1] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.(2013)
Statistical evaluation of synchronous spike patterns extracted by
frequent item set mining. Frontiers in Computational Neuroscience, 7.
[2] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.(2017)
Detection and Evaluation of Spatio-Temporal Spike Patterns in Massively
Parallel Spike Train Data with SPADE.
Frontiers in Computational Neuroscience, 11.
"""
if HAVE_MPI: # pragma: no cover
comm = MPI.COMM_WORLD # create MPI communicator
rank = comm.Get_rank() # get rank of current MPI task
else:
rank = 0
if output_format not in ['concepts', 'patterns']:
raise ValueError("The output_format value has to be"
"'patterns' or 'concepts'")
time_mining = time.time()
if rank == 0 or n_subsets > 0:
# Mine the data for extraction of concepts
concepts, rel_matrix = concepts_mining(data, binsize, winlen,
min_spikes=min_spikes,
min_occ=min_occ,
max_spikes=max_spikes,
max_occ=max_occ,
min_neu=min_neu,
report='a')
time_mining = time.time() - time_mining
print("Time for data mining: {}".format(time_mining))
# Decide if compute the approximated stability
if n_subsets > 0:
# Computing the approximated stability of all the concepts
time_stability = time.time()
concepts = approximate_stability(concepts, rel_matrix, n_subsets,
delta=delta, epsilon=epsilon)
time_stability = time.time() - time_stability
print("Time for stability computation: {}".format(time_stability))
# Filtering the concepts using stability thresholds
if stability_thresh is not None:
concepts = list(filter(
lambda c: _stability_filter(c, stability_thresh), concepts))
elif stability_thresh is not None:
warnings.warn('Stability_thresh not None but stability has not been '
'computed (n_subsets==0)')
output = {}
pv_spec = None # initialize pv_spec to None
# Decide whether compute pvalue spectrum
if n_surr > 0:
# Compute pvalue spectrum
time_pvalue_spectrum = time.time()
pv_spec = pvalue_spectrum(data, binsize, winlen, dither=dither,
n_surr=n_surr, min_spikes=min_spikes,
min_occ=min_occ, max_spikes=max_spikes,
max_occ=max_occ, min_neu=min_neu,
spectrum=spectrum)
time_pvalue_spectrum = time.time() - time_pvalue_spectrum
print("Time for pvalue spectrum computation: {}".format(
time_pvalue_spectrum))
# Storing pvalue spectrum
output['pvalue_spectrum'] = pv_spec
elif 0 < alpha < 1:
warnings.warn('0<alpha<1 but p-value spectrum has not been '
'computed (n_surr==0)')
# rank!=0 returning None
if rank != 0:
warnings.warn('Returning None because executed on a process != 0')
return None
# Initialize non-significant signatures as empty list:
ns_sgnt = []
# Decide whether filter concepts with psf
if n_surr > 0:
if len(pv_spec) > 0:
# Computing non-significant entries of the spectrum applying
# the statistical correction
ns_sgnt = test_signature_significance(pv_spec, alpha,
corr=stat_corr,
report='non_significant',
spectrum=spectrum)
# Storing non-significant entries of the pvalue spectrum
output['non_sgnf_sgnt'] = ns_sgnt
# Filter concepts with pvalue spectrum (psf)
if len(ns_sgnt) > 0:
concepts = list(filter(
lambda c: _pattern_spectrum_filter(
c, ns_sgnt, spectrum, winlen), concepts))
# Decide whether to filter concepts using psr
if psr_param is not None:
# Filter using conditional tests (psr)
concepts = pattern_set_reduction(concepts, ns_sgnt,
winlen=winlen,
h_subset_filtering=psr_param[0],
k_superset_filtering=psr_param[1],
l_covered_spikes=psr_param[2],
min_spikes=min_spikes,
min_occ=min_occ) # nopep8
# Storing patterns for ouput format concepts
if output_format == 'concepts':
output['patterns'] = concepts
return output
# Transforming concepts to dictionary containing pattern's infos
output['patterns'] = concept_output_to_patterns(concepts,
winlen, binsize,
pv_spec, spectrum,
data[0].t_start)
return output
def concepts_mining(data, binsize, winlen, min_spikes=2, min_occ=2,
max_spikes=None, max_occ=None, min_neu=1, report='a'):
"""
Find pattern candidates extracting all the concepts of the context formed
by the objects defined as all windows of length winlen*binsize slided
along the data and the attributes as the spikes occurring in each of the
window discretized at a time resolution equal to binsize. Hence, the output
are all the repeated sequences of spikes with maximal length winlen, which
are not trivially explained by the same number of occurrences of a superset
of spikes.
Parameters
----------
data: list of neo.SpikeTrains
List containing the parallel spike trains to analyze
binsize: Quantity
The time precision used to discretize the data (binning).
winlen: int (positive)
The size (number of bins) of the sliding window used for the analysis.
The maximal length of a pattern (delay between first and last spike) is
then given by winlen*binsize
min_spikes: int (positive)
Minimum number of spikes of a sequence to be considered a pattern.
Default: 2
min_occ: int (positive)
Minimum number of occurrences of a sequence to be considered as a
pattern.
Default: 2
max_spikes: int (positive)
Maximum number of spikes of a sequence to be considered a pattern. If
None no maximal number of spikes is considered.
Default: None
max_occ: int (positive)
Maximum number of occurrences of a sequence to be considered as a
pattern. If None, no maximal number of occurrences is considered.
Default: None
min_neu: int (positive)
Minimum number of neurons in a sequence to considered a pattern.
Default: 1
report: str
Indicates the output of the function.
'a': all the mined patterns
'#': pattern spectrum using as signature the pair:
(number of spikes, number of occurrence)
'3d#': pattern spectrum using as signature the triplets:
(number of spikes, number of occurrence, difference between the
times of the last and the first spike of the pattern)
Default: 'a'
Returns
-------
mining_results: list
If report == 'a':
All the pattern candidates (concepts) found in the data. Each
pattern is represented as a tuple containing
(spike IDs, discrete times (window position)
of the occurrences of the pattern). The spike IDs are defined as:
spike_id=neuron_id*bin_id; with neuron_id in [0, len(data)] and
bin_id in [0, winlen].
If report == '#':
The pattern spectrum is represented as a list of triplets each
formed by:
(pattern size, number of occurrences, number of patterns)
If report == '3d#':
The pattern spectrum is represented as a list of quadruplets each
formed by:
(pattern size, number of occurrences, difference between last
and first spike of the pattern, number of patterns)
rel_matrix : sparse.coo_matrix
A binary matrix with shape (number of windows, winlen*len(data)). Each
row corresponds to a window (order according to their position in
time). Each column corresponds to one bin and one neuron and it is 0 if
no spikes or 1 if one or more spikes occurred in that bin for that
particular neuron. For example, the entry [0,0] of this matrix
corresponds to the first bin of the first window position for the first
neuron, the entry [0,winlen] to the first bin of the first window
position for the second neuron.
"""
# Check that data is a list of SpikeTrains
if not all([isinstance(elem, neo.SpikeTrain) for elem in data]):
raise TypeError(
'data must be a list of SpikeTrains')
# Check that all spiketrains have same t_start and same t_stop
if not all([st.t_start == data[0].t_start for st in data]) or not all(
[st.t_stop == data[0].t_stop for st in data]):
raise AttributeError(
'All spiketrains must have the same t_start and t_stop')
if report not in ['a', '#', '3d#']:
raise ValueError(
"report has to assume of the following values:" +
" 'a', '#' and '3d#,' got {} instead".format(report))
# Binning the data and clipping (binary matrix)
binary_matrix = conv.BinnedSpikeTrain(
data, binsize).to_sparse_bool_array().tocoo()
# Computing the context and the binary matrix encoding the relation between
# objects (window positions) and attributes (spikes,
# indexed with a number equal to neuron idx*winlen+bin idx)
context, transactions, rel_matrix = _build_context(binary_matrix, winlen)
# By default, set the maximum pattern size to the maximum number of
# spikes in a window
if max_spikes is None:
max_spikes = np.max((int(np.max(np.sum(rel_matrix, axis=1))),
min_spikes + 1))
# By default, set maximum number of occurrences to number of non-empty
# windows
if max_occ is None:
max_occ = int(np.sum(np.sum(rel_matrix, axis=1) > 0))
# Check if fim.so available and use it
if HAVE_FIM:
# Return the output
mining_results = _fpgrowth(
transactions,
rel_matrix=rel_matrix,
min_c=min_occ,
min_z=min_spikes,
max_z=max_spikes,
max_c=max_occ,
winlen=winlen,
min_neu=min_neu,
report=report)
return mining_results, rel_matrix
# Otherwise use fast_fca python implementation
warnings.warn(
'Optimized C implementation of FCA (fim.so/fim.pyd) not found ' +
'in elephant/spade_src folder, or not compatible with this ' +
'Python version. You | |
1)\
and (range_check(10, -20, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 154, psi_dict[resnum_count_dict[count+1]]) == 1)
)\
):
return(4)
else:
return(0)
def pp_turn_calc(count):
# returns a different number depending on how many neibghoring residues are E
if (\
(\
(range_check(10, -64,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, -23, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -87, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, -2, psi_dict[resnum_count_dict[count+1]]) == 1)
)\
):
return(1)
elif (\
(\
(range_check(10, -55,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, 133, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, 87, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, -7, psi_dict[resnum_count_dict[count+1]]) == 1)
) or (\
(range_check(10, -67,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, 159, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, 61, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 35, psi_dict[resnum_count_dict[count+1]]) == 1)
)
):
return(2)
elif (\
(\
(range_check(10, -64,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, -40, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -78, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 125, psi_dict[resnum_count_dict[count+1]]) == 1)\
) or (\
(range_check(10, -65,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, -41, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -78, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 153, psi_dict[resnum_count_dict[count+1]]) == 1)
) or (\
(range_check(10, -80,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, -22, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -159, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 165, psi_dict[resnum_count_dict[count+1]]) == 1)
) or (\
(range_check(10, -62,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, -42, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -115, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 131, psi_dict[resnum_count_dict[count+1]]) == 1)
)\
):
return(3)
elif (\
(\
(range_check(10, -86,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, -2, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, 75, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 22, psi_dict[resnum_count_dict[count+1]]) == 1)
)\
):
return(4)
elif (\
(\
(range_check(10, 53,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, 37, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, 81, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 2, psi_dict[resnum_count_dict[count+1]]) == 1)
) or (\
(range_check(10, 59,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, -132, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -93, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 8, psi_dict[resnum_count_dict[count+1]]) == 1)
)\
):
return(5)
elif (\
(\
(range_check(10, 91,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, -9, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -70, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 145, psi_dict[resnum_count_dict[count+1]]) == 1)
) or (\
(range_check(10, 80,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, 4, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -114, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 140, psi_dict[resnum_count_dict[count+1]]) == 1)
) or (\
(range_check(10, 77,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, 8, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -120, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 154, psi_dict[resnum_count_dict[count+1]]) == 1)
) or (\
(range_check(10, 72,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, 22, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -105, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 151, psi_dict[resnum_count_dict[count+1]]) == 1)
)\
):
return(6)
elif (\
(\
(range_check(10, -129,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, 124, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, 51, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 41, psi_dict[resnum_count_dict[count+1]]) == 1)
) or (\
(range_check(10, -134,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, 113, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, 59, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, -127, psi_dict[resnum_count_dict[count+1]]) == 1)
) or (\
(range_check(10, -127,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, 99, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, 59, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, -124, psi_dict[resnum_count_dict[count+1]]) == 1)
)\
):
return(7)
else:
return(0)
def pp_pi_calc(count):
# returns a different number depending on how many neibghoring residues are E
if (\
(\
(range_check(10, -102,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, -24, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -116, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, -59, psi_dict[resnum_count_dict[count+1]]) == 1)
)\
):
return(1)
else:
return(0)
def pp_stig_calc(count):
# returns a different number depending on how many neibghoring residues are E
if (\
(\
(range_check(10, 75,phi_dict[resnum_count_dict[count]]) == 1) \
and (range_check(10, -173, psi_dict[resnum_count_dict[count]]) == 1)\
and (range_check(10, -63, phi_dict[resnum_count_dict[count+1]]) == 1)\
and (range_check(10, 143, psi_dict[resnum_count_dict[count+1]]) == 1)
)\
):
return(1)
else:
return(0)
def cis_calc(count):
# checks for cis peptides
if (range_check(40, 0, ome_dict[resnum_count_dict[count]]) == 1):
return(1)
else:
return(0)
# These three lists contain everything needed from PDB file for all calculations
# declared here for correct scope, but cleared below to reduce overhead
atom_list = []
atom_xyz = []
resnum_list_unsort = []
pdb_reader = PDBParser(PERMISSIVE = 1, QUIET = True)
struc = pdb_reader.get_structure("temp", pdb)
########## NOTE TO THE READER! ############
########## CAUTION CAUTION CATUION ########
# There is a loop below, "for model in struc"
# all the code needs to be in that loop.
for model in struc:
#### EVERYTHING MUST BE WITHIN THIS CHAIN LOOP
for chain in model:
# clear these here to reduce overhead
atom_list = []
atom_xyz = []
resnum_list_unsort = []
for residue in chain:
for atom in residue:
if atom.get_name() == 'CA' or atom.get_name() == 'C' or atom.get_name() == 'N' or atom.get_name() == 'O' and residue.get_resname() != 'HOH':
atom_list.append([str(residue.get_id()[1])+str(residue.get_id()[2]), atom.get_id(), residue.get_resname(), chain.get_id(), model.get_id() ])
atom_xyz.append(atom.get_coord())
resnum_list_unsort.append(float(str(residue.get_id()[1])+"."+str(ord(str(residue.get_id()[2])))))
pdb.close()
#this makes the resnum_list unique, and deals correctly with insertion codes
#ie. it gets unique values, sorts them numerically, which works because codes (A, B, C, etc) get converted to ascii values ie. 101A becomes 101.52
# then after sorting it undoes this ascii conversion and regenerates the string
res_set = set(resnum_list_unsort)
resnum_list_float = list(res_set)
resnum_list = []
for resnum in resnum_list_float:
num = re.sub(r"([0-9]+)(\.)([0-9]+)", r"\1", str(resnum))
ascii = re.sub(r"([0-9]+)(\.)([0-9]+)", r"\3", str(resnum))
if int(ascii) < 0:
ascii = int(ascii) * -1
insert = chr(int(ascii))
resnum_list.append(str(num)+str(insert))
# this is necessicary so that we can call the next or previous residue by using count +1 or count -1, but we need to pass the actual resnum in resnum_list to the calculators
resnum_count = range(1,len(resnum_list) + 1)
count = 1
resnum_count_dict = {}
for resnum in resnum_list:
resnum_count_dict[count] = resnum
count = count + 1
###############################################################################################
######### Below this line is all the looping and such that calls the calulators then ##########
######### assigns secondary structure. ########################################################
###############################################################################################
### loop that makes a dictionary of residue numbers and their zeta values
zeta_dict = {}
tau_dict = {}
dison3_dict = {}
dison4_dict = {}
discn3_dict = {}
disnn1_dict = {}
discaca3_dict = {}
phi_dict = {}
psi_dict = {}
ome_dict = {}
## dictionary that contains all secondary structure assignments
ss_dict = {}
pp_dict = {}
final_dict = {}
##dictionary for residue types based on residue number
res_type_dict = {}
chain_id_dict = {}
model_id_dict = {}
aa_dict = {}
for count in resnum_count:
#using try means that we will not be thrown off by gaps
try:
phi_dict[resnum_count_dict[count]] = phi_calc(resnum_count_dict[count])
except:
pass #print "Phi not calculated for residue number %i\n" % resnum_count_dict[count]
try:
psi_dict[resnum_count_dict[count]] = psi_calc(resnum_count_dict[count])
except:
pass #print "psi not calculated for residue number %i\n" % resnum_count_dict[count]
try:
zeta_dict[resnum_count_dict[count]] = zeta_calc(resnum_count_dict[count])
except:
pass #print "Zeta not calculated for residue number %i\n" % resnum_count_dict[count]
try:
tau_dict[resnum_count_dict[count]] = tau_calc(resnum_count_dict[count])
except:
pass #print "Tau not calculated for residue number %i\n" % resnum_count_dict[count]
try:
dison3_dict[resnum_count_dict[count]] = dison3_calc(resnum_count_dict[count])
except:
pass #print "Dison3 not calculated for residue number %i\n" % resnum_count_dict[count]
try:
dison4_dict[resnum_count_dict[count]] = dison4_calc(resnum_count_dict[count])
except:
pass #print "Dison4 not calculated for residue number %i\n" % resnum_count_dict[count]
try:
discn3_dict[resnum_count_dict[count]] = discn3_calc(resnum_count_dict[count])
except:
pass #print "Discn3 not calculated for residue number %i\n" % resnum_count_dict[count]
try:
disnn1_dict[resnum_count_dict[count]] = disnn1_calc(resnum_count_dict[count])
except:
pass
try:
discaca3_dict[resnum_count_dict[count]] = discaca3_calc(resnum_count_dict[count])
except:
pass
try:
ome_dict[resnum_count_dict[count]] = ome_calc(resnum_count_dict[count])
except:
pass
indices = index_getter(resnum_count_dict[count])
atom_types = 'CA'
for i in indices:
if atom_getter(i,'CA') == 'no':
pass
else:
res_type_dict[resnum_count_dict[count]] = atom_get(i,'CA')
chain_id_dict[resnum_count_dict[count]] = chain_get(i,'CA')
try:
model_id_dict[resnum_count_dict[count]] = model_get(i, 'CA')
except:
model_id_dict[resnum_count_dict[count]] = "X"
try:
aa_dict[resnum_count_dict[count]] = to_single(one_letter,res_type_dict[resnum_count_dict[count]])
except:
aa_dict[resnum_count_dict[count]] = '?'
### setting all the SS to blank
ss_dict[resnum_count_dict[count]] = '-'
pp_dict[resnum_count_dict[count]] = '-'
###############################################################################################
######### Above this line is all the looping and such that calls the calulators ###############
################## Below is the acutal assignment #############################################
###############################################################################################
# assigns SS to all residues
# ensures that there is a minimum of 4 residues in a row for a helix
#PRIORITY: B, P(4+), H, G, T, E, N, P(2-3)
for count in resnum_count:
try:
if helix1_short_calc(count) == 1:
ss_dict[resnum_count_dict[count]] = 'P'
except:
pass
try:
if betan_test(count) == 1:
ss_dict[resnum_count_dict[count]] = 'N'
except:
pass
| |
import decimal
import pandas as pd
import datetime
import re
def round_decimal(x, digits=0):
"""This function returns the round up float.
Parameters
----------
x : a float
digits : decimal point
Returns
----------
Rounded up float
"""
x = decimal.Decimal(str(x))
if digits == 0:
return int(x.quantize(decimal.Decimal("1"), rounding='ROUND_HALF_UP'))
if digits > 1:
string = '1e' + str(-1 * digits)
else:
string = '1e' + str(-1 * digits)
return float(x.quantize(decimal.Decimal(string), rounding='ROUND_HALF_UP'))
def intersection(list1, list2):
"""This function computes intersection between two input lists and returns the result.
Parameters
----------
list1 : First list
list2 : Second list
Returns
----------
list3 : Intersection of list1 and list2
"""
list3 = [value for value in list1 if value in list2]
return list3
def get_effective_df(df_tbot_raw, ineffective_intents, df_escalate_nodes, filter_non_intent_node, workspace_nodes=None):
"""This function checks the conversations in df_Tbot_raw for escalations, flags them and returns the resulting
updated dataframe.
Parameters
----------
df_tbot_raw : Dataframe with workspace logs
ineffective_intents: list of intents
df_escalate_nodes: dataframe with escalation dialog nodes
filter_non_intent_node: whether to filter out utterances whose last visited node does not contain intents
workspace_nodes: workspace nodes
Returns
----------
df_tbot_raw : Dataframe with 'Escalated conversation' flag added and updated for each conversation
"""
# Add an 'Escalated_conversation' flag to dataframe
df_tbot_raw['Escalated_conversation'] = False
# Load node titles
node_title_map = dict()
for idx, node in workspace_nodes.iterrows():
if str(node['title']) != 'nan':
node_title_map[node['dialog_node']] = node['title']
# Use node title in nodes_visited_s and response_dialog_stack if it exists
for idx, item in df_tbot_raw.iterrows():
node_id_visit_list = item['response.output.nodes_visited_s']
for seq_id, node_id in enumerate(node_id_visit_list):
if node_id in node_title_map:
node_id_visit_list[seq_id] = node_title_map[node_id]
node_stack_list = item['response_dialog_stack']
for stack_id, stack_item in enumerate(node_stack_list):
for key, item in stack_item.items():
if item in node_title_map:
stack_item[key] = node_title_map[item]
# Get the list of valid effective dialog node ids
ineffective_nodes = df_escalate_nodes[df_escalate_nodes['Valid']]['Node ID'].tolist()
# If nodes visited contains any of the ineffective node ids, get the conversation id
if filter_non_intent_node:
df_tbot_raw['last_node'] = df_tbot_raw['response.output.nodes_visited_s'].str[-1].apply(
lambda x: x if x else [''])
df_tbot_raw['last_node_value'] = df_tbot_raw['last_node'].apply(
lambda x: workspace_nodes.loc[workspace_nodes['dialog_node'] == x]['conditions'].values)
df_tbot_raw['last_node_value'] = df_tbot_raw['last_node_value'].apply(lambda x: x if x else ['']).str[0]
df_tbot_raw['contain_intent'] = df_tbot_raw['last_node_value'].apply(
lambda x: bool(re.match('#[a-zA-Z_0-9]+', str(x))))
conversation_id = [conversation for conversation in df_tbot_raw.loc[
df_tbot_raw['response.output.nodes_visited_s'].apply(
lambda x: bool(intersection(x, ineffective_nodes)))].loc[df_tbot_raw['contain_intent']][
'response.context.conversation_id']]
# If top intent for a message is present in ineffective_intents list, get the conversation id
conversation_id.extend(df_tbot_raw.loc[(df_tbot_raw['response.top_intent_intent'].isin(
ineffective_intents)), 'response.context.conversation_id'].loc[
df_tbot_raw['contain_intent']].tolist())
else:
conversation_id = [conversation for conversation in df_tbot_raw.loc[
df_tbot_raw['response.output.nodes_visited_s'].apply(
lambda x: bool(intersection(x, ineffective_nodes)))]['response.context.conversation_id']]
# If top intent for a message is present in ineffective_intents list, get the conversation id
conversation_id.extend(df_tbot_raw.loc[(df_tbot_raw['response.top_intent_intent'].isin(
ineffective_intents)), 'response.context.conversation_id'].tolist())
# Remove duplicate conversation ids from conversation_id list
conv_id = list(set(conversation_id))
# Flag all conversations in conv_id list as 'Escalated'
df_tbot_raw.loc[df_tbot_raw['response.context.conversation_id'].isin(conv_id), ['Escalated_conversation']] = True
# Return dataframe with 'Escalated' flag information
return df_tbot_raw
def get_coverage_df(df_tbot_raw, df_coverage_nodes, conf_threshold):
"""This function computes intersection between two input lists and returns the result.
Parameters
----------
df_tbot_raw : Dataframe with workspace logs
df_coverage_nodes: dataframe with non-coverage dialog nodes
conf_threshold: float, confidence threshold for identifying top intent from assistant
Returns
----------
df_tbot_raw : Dataframe with 'Covered' flag and 'Not Covered cause' added and updated for each message
"""
# Convert confidence to numeric type
# df_tbot_raw['response.top_intent_confidence'] = pd.to_numeric(df_Tbot_raw['response.top_intent_confidence'])
# Create a 'covered' flag and 'Not covered cause' in dataframe
df_tbot_raw['Covered'] = True
df_tbot_raw['Not Covered cause'] = None
# Filter all the valid dialog node ids for non-coverage
df_coverage_valid = df_coverage_nodes[df_coverage_nodes['Valid']] # ['dialog_node'].tolist()
# (1) Mark all messages that hit any non-coverage node including but not limited to 'anything_else' as 'Not covered'
# and update the 'Not Covered cause' column
for node in df_coverage_valid['Node ID'].tolist():
cause = "'{}' node".format(df_coverage_valid.loc[df_coverage_valid['Node ID'] == node, 'Condition'].values[0])
df_tbot_raw.loc[
(df_tbot_raw['response.output.nodes_visited_s'].apply(lambda x: bool(intersection(x, node.split())))), [
'Covered', 'Not Covered cause']] = [False, cause]
# (2) Mark all messages that did not meet confidence threshold set as 'Not covered' and update the 'Not Covered
# cause' column
df_tbot_raw.loc[df_tbot_raw['response.top_intent_confidence'] < conf_threshold, ['Covered']] = False
df_tbot_raw.loc[df_tbot_raw['response.top_intent_confidence'] < conf_threshold, [
'Not Covered cause']] = 'Classified below confidence threshold'
return df_tbot_raw
def chk_is_valid_node(node_ids, node_name, node_conditions, nodes):
"""This function checks if the nodes(id's, names and conditions) are present in the workspace.
Parameters
----------
node_ids : List with node ids'
node_name: List with node names
node_conditions: List with node conditions
nodes: All nodes present in current version of workspace
Returns
----------
df_valid_nodes : Dataframe with 'Valid' flag added and updated for each node
"""
# Add a valid flag to dataframe
nodes['valid'] = True
# Create a dataframe to store node ids, title, validity, type and conditions
df_valid_nodes = pd.DataFrame(columns=['conditions', 'dialog_node', 'title', 'type', 'valid'])
for node in node_ids:
# Check if the node id is present in current version of workspace
if node not in nodes['dialog_node'].tolist():
# Update validity of node to False
df_valid_nodes.loc[len(df_valid_nodes)] = ['', node, '', '', False]
else:
# Add node to valid nodes dataframe
df_valid_nodes = df_valid_nodes.append(
nodes[nodes['dialog_node'] == node][['conditions', 'dialog_node', 'title', 'type', 'valid']],
ignore_index=True)
for condition in node_conditions:
# Check if the node condition is present in current version of workspace
if condition not in nodes['conditions'].tolist():
# Update validity of node to False
df_valid_nodes.loc[len(df_valid_nodes)] = [condition, '', '', '', False]
else:
# Add node to valid nodes dataframe
df_valid_nodes = df_valid_nodes.append(
nodes[nodes['conditions'] == condition][['conditions', 'dialog_node', 'title', 'type', 'valid']],
ignore_index=True)
for name in node_name:
# Check if the node name is present in current version of workspace
if name not in nodes['title'].tolist():
# Update validity of node to False
df_valid_nodes.loc[len(df_valid_nodes)] = ['', '', name, '', False]
else:
# Add node to valid nodes dataframe
df_valid_nodes = df_valid_nodes.append(
nodes[nodes['title'] == name][['conditions', 'dialog_node', 'title', 'type', 'valid']],
ignore_index=True)
# Remove duplicates
df_valid_nodes = df_valid_nodes.drop_duplicates(keep='first')
df_valid_nodes.columns = ['Condition', 'Node ID', 'Node Name', 'Type', 'Valid']
df_valid_nodes = df_valid_nodes.drop('Type', 1)
return df_valid_nodes
def format_data(df):
"""This function formats the log data from watson assistant by separating columns and changing datatypes
Parameters
----------
df : Dataframe with logs from the workspace
Returns
----------
df6 : Dataframe formatted by separating columns and changing datatypes
"""
# Separate the fields in request and response
df1 = pd.concat([df.drop(['request', 'response'], axis=1).reset_index(drop=True),
df['request'].apply(pd.Series).add_prefix('request_').reset_index(drop=True),
pd.DataFrame(df['response']
.tolist()).add_prefix('response_')], axis=1) # type: pd.DataFrame
df1['request_input'] = pd.io.json.json_normalize(df['request'])['input.text']
# Add context and output fields
df2 = pd.concat([df1.drop(['response_context', 'response_output'], axis=1),
df1['response_context'].apply(pd.Series).add_prefix('response_context_'),
pd.DataFrame(df1['response_output'].tolist()).add_prefix('response_')],
axis=1) # type: pd.DataFrame
# Add context_system fields
df3 = pd.concat([df2.drop(['response_context_system'], axis=1),
df2['response_context_system'].apply(pd.Series).add_prefix('response_')],
axis=1) # type: pd.DataFrame
if 'response_context_response_context_IntentStarted' in df3.columns \
and 'response_context_response_context_IntentCompleted' in df3.columns:
cols = ['log_id', 'response_timestamp', 'response_context_conversation_id', 'request_input', 'response_text',
'response_intents', 'response_entities', 'response_nodes_visited', 'response_dialog_request_counter',
'response_dialog_stack', 'response_dialog_turn_counter',
'response_context_response_context_IntentStarted', 'response_context_response_context_IntentCompleted']
else:
cols = ['log_id', 'response_timestamp', 'response_context_conversation_id', 'request_input', 'response_text',
'response_intents', 'response_entities', 'response_nodes_visited', 'response_dialog_request_counter',
'response_dialog_stack', 'response_dialog_turn_counter']
# Select a few required columns
df4 = df3[cols].copy(deep=True) # type: pd.DataFrame
# Limit fetched intents to a maximum value of 3
df4.loc[:, 'response_intents'] = df4['response_intents'].apply(lambda x: x[:3])
# Separate intents into different fields
df5 = pd.concat([df4.drop(['response_intents'], axis=1),
pd.DataFrame(df4['response_intents'].values.tolist()).add_prefix(
'response_intent_')], axis=1) # type: pd.DataFrame
# Check if at least 3 intents are identified
if 'response_intent_2' in df5.columns:
# Put the 3 intents and confidences into separate fields
df6 = pd.concat([df5.drop(['response_intent_0', 'response_intent_1',
'response_intent_2'], axis=1),
df5['response_intent_0'].apply(pd.Series).add_prefix('response.top_intent_'),
df5['response_intent_1'].apply(pd.Series).add_prefix('Intent 2 '),
df5['response_intent_2'].apply(pd.Series).add_prefix('Intent 3 ')],
axis=1) # type: pd.DataFrame
# Convert confidence to numeric type
cols = ['response.top_intent_confidence', 'Intent 2 confidence', 'Intent 3 confidence']
df6[cols] = df6[cols].apply(pd.to_numeric, errors='coerce', axis=1)
# Add confidence gap column
df6['Confidence gap (between 1 and 2)'] = df6['response.top_intent_confidence'] - df6['Intent 2 confidence']
elif 'response_intent_1' in df5.columns:
# Put the 3 intents and confidences into separate fields
df6 = pd.concat([df5.drop(['response_intent_0', 'response_intent_1'], axis=1),
df5['response_intent_0'].apply(pd.Series).add_prefix('response.top_intent_'),
df5['response_intent_1'].apply(pd.Series).add_prefix('Intent 2 ')],
axis=1) # type: pd.DataFrame
# Convert confidence to numeric type
cols = ['response.top_intent_confidence', 'Intent 2 confidence']
df6[cols] = df6[cols].apply(pd.to_numeric, errors='coerce', axis=1)
df6['Intent 3 intent'] = ''
df6['Intent 3 confidence'] = ''
# Add confidence gap column
df6['Confidence gap (between 1 and 2)'] = df6['response.top_intent_confidence'] - df6['Intent 2 confidence']
else:
# Create the top intent and its confidence column
df6 = pd.concat([df5.drop(['response_intent_0'], axis=1),
df5['response_intent_0'].apply(pd.Series).add_prefix('response.top_intent_')],
axis=1) # type: pd.DataFrame
# df6['Confidence gap (between 1 and 2)'] = ''
# df6['Intent 2 intent'] =''
# df6['Intent 2 confidence'] = ''
# df6['Intent 3 intent'] =''
# df6['Intent 3 confidence'] = ''
new_cols_list = ['Confidence gap (between 1 and | |
<reponame>MartinPdS/PyMieSim
import numpy as np
import matplotlib.pyplot as plt
from PyMieSim.Tools.utils import NA2Angle
from PyMieSim.Tools.units import Area
from PyMieSim.Tools.Directories import *
from mayavi import mlab
from tvtk.tools import visual
from PyMieSim.Tools.utils import Sp2Cart
from PyMieSim.Tools.Plots import StructuredMesh
from PyMieSim.Tools.PlotsUtils import PlotCone
class Stokes(dict): # https://en.wikipedia.org/wiki/Stokes_parameters
"""Dict subclass representing scattering Far-field in the Stokes
representation.
| The stokes parameters are:
| I : Intensity of the fields
| Q : linear polarization parallel to incident polarization
| U : linear polarization 45 degree to incident polarization
| V : Circular polarization
.. math:
I &= \\big| E_x \big|^2 + \\big| E_y \\big|^2
Q &= \\big| E_x \big|^2 - \\big| E_y \\big|^2
U &= 2 \\mathcal{Re} \\big\{ E_x E_y^* \\big\}
V &= 2 \\mathcal{Im} \\big\{ E_x E_y^* \\big\}
Parameters
----------
Parent : :class:`Scatterer`
The scatterer parent.
Num : :class:`int`
Number of point to evaluate the Stokes parameters in spherical coord.
Distance : :class:`float`
Distance at which we evaluate the Stokes parameters.
Returns
-------
:class:`dict`
Representation of Stokes parameters.
"""
def __init__(self, Parent, Num=100, Distance=1.):
self.Parent = Parent
EPhi, ETheta, Theta, Phi = Parent.Bind.FullFields(Sampling=Num, R=1)
I = np.abs(EPhi)**2 + np.abs(ETheta)**2
self.I = ScalarIntensity(Field = I/np.max(I),
Phi = Phi,
Theta = Theta,
Mesh = None,
Name = 'I')
self.Q = ScalarIntensity(Field = (np.abs(EPhi)**2 - np.abs(ETheta)**2)/I,
Phi = Phi,
Theta = Theta,
Mesh = None,
Name = 'Q')
self.U = ScalarIntensity(Field = (+2 * np.real(EPhi*ETheta.conjugate()))/I,
Phi = Phi,
Theta = Theta,
Mesh = None,
Name = 'U')
self.V = ScalarIntensity(Field = (-2 * np.imag(EPhi*ETheta.conjugate()))/I,
Phi = Phi,
Theta = Theta,
Mesh = None,
Name = 'V')
def Plot(self, Source=True, Axes=True):
Figure = mlab.figure(figure='Stokes parameter', size=(600,300), bgcolor=(1,1,1), fgcolor=(0.,0.,0.))
visual.set_viewer(Figure)
self.I._Plot( Source=Source, Axes=Axes, Figure=Figure, Origin=(0,0,0), ColorBar=False, label='I' )
self.Parent.Source._Plot(Figure=Figure, Origin=(0,0,-4))
self.Q._Plot( Source=Source, Axes=Axes, Figure=Figure, Origin=(0,4,0), ColorBar=False, label='Q' )
self.Parent.Source._Plot(Figure=Figure, Origin=(0,4,-4))
self.U._Plot( Source=Source, Axes=Axes, Figure=Figure, Origin=(0,8,0), ColorBar=False, label='U' )
self.Parent.Source._Plot(Figure=Figure, Origin=(0,8,-4))
self.V._Plot( Source=Source, Axes=Axes, Figure=Figure, Origin=(0,12,0), ColorBar=False, label='V' )
self.Parent.Source._Plot(Figure=Figure, Origin=(0,12,-4))
self.I.Image.module_manager.scalar_lut_manager.data_range = (0, 1)
self.Q.Image.module_manager.scalar_lut_manager.data_range = (0, 1)
self.U.Image.module_manager.scalar_lut_manager.data_range = (0, 1)
self.V.Image.module_manager.scalar_lut_manager.data_range = (0, 1)
mlab.show()
def __repr__(self):
return f"""
Object: Dictionary
Keys: S1, S2, S3,, S4, Theta, Phi
Structured data: Yes
Method: <Plot>
Shape: {self['S1'].shape}"""
class SPF(dict):
"""Dict subclass representing scattering phase function of SPF in short.
The SPF is defined as:
.. math::
\\text{SPF} = E_{\\parallel}(\\phi,\\theta)^2 + E_{\\perp}(\\phi,\\theta)^2
Parameters
----------
Parent : :class:`Scatterer`
The scatterer parent.
Num : :class:`int`
Number of point to evaluate the SPF in spherical coord.
Distance : :class:`float`
Distance at which we evaluate the SPF.
Returns
-------
:class:`dict`
Representation of SPF.
"""
def __init__(self, Parent, Num=100, Distance=1.):
self.Parent = Parent
EPhi, ETheta, Theta, Phi = Parent.Bind.FullFields(Sampling=Num, R=1)
spf = np.sqrt( np.abs(EPhi)**2 + np.abs(ETheta)**2 )
self.SPF = ScalarIntensity(Field = spf,
Phi = Phi,
Theta = Theta,
Mesh = spf/np.max(spf)*3,
Name = 'Scattering phase function')
def Plot(self, Source=True, Axes=True):
Figure = mlab.figure(figure='Scattering phase function',
size=(600,300),
bgcolor=(1,1,1),
fgcolor=(0.,0.,0.))
visual.set_viewer(Figure)
self.SPF._Plot(Figure=Figure, Source=Source, Axes=Axes)
if Source:
self.Parent.Source._Plot(Figure=Figure)
mlab.show()
def __repr__(self):
return f"""
Object: Dictionary
Keys: SPF, EPhi, ETheta, Theta, Phi
Structured data: Yes
Method: <Plot>
Shape: {self['Phi'].shape}"""
class S1S2(dict):
"""Dict subclass representing S1 and S2 function.
S1 and S2 are defined as:
Parameters
----------
Parent : :class:`Scatterer`
The scatterer parent.
Num : :class:`int`
Number of point to evaluate the S1 and S2 in spherical coord.
Returns
-------
:class:`dict`
Representation of S1 S2.
"""
def __init__(self, Parent, Phi):
self.Parent = Parent
S1, S2 = Parent.Bind.S1S2( Phi = np.deg2rad(Phi) )
self.S1 = ScatteringElement(S1, Phi, Name='S1')
self.S2 = ScatteringElement(S2, Phi, Name='S2')
def Plot(self):
fig, ax = plt.subplots(nrows = 1, ncols = 2, figsize = (7,4), subplot_kw = {'projection':'polar'})
self.S1._Plot(ax[0], ColorMap='C0')
self.S2._Plot(ax[1], ColorMap='C1')
plt.show()
def __repr__(self):
return f"""
Object: Dictionary
Keys: S1, S2, Phi
Structured data: Yes
Method: <Plot>
Shape: {self['Phi'].shape}"""
class FarField(dict):
"""Dict subclass representing scattering Far-field in a spherical
coordinate representation.
The Far-fields are defined as:
.. math::
\\text{Fields} = E_{||}(\\phi,\\theta)^2, E_{\\perp}(\\phi,\\theta)^2
Parameters
----------
Parent : :class:`Scatterer`
The scatterer parent.
Num : :class:`int`
Number of point to evaluate the far-fields in spherical coord.
Distance : :class:`float`
Distance at which we evaluate the far-fields.
Returns
-------
:class:`dict`
Representation of far-fields.
"""
def __init__(self, Num = 200, Parent = None, Distance=1.):
self.Parent = Parent
EPhi, ETheta, Theta, Phi = Parent.Bind.FullFields(Sampling=Num, R=1)
spf = np.sqrt( np.abs(EPhi)**2 + np.abs(ETheta)**2 )
spf /= np.max(spf) / 2
self.EPhi = ScalarAmplitude(Field = EPhi,
Phi = Phi,
Theta = Theta,
Mesh = spf,
Name = 'EPhi')
self.ETheta = ScalarAmplitude(Field = ETheta,
Phi = Phi,
Theta = Theta,
Mesh = spf,
Name = 'ETheta')
def Plot(self, Source=True, Axes=True):
Figure0 = mlab.figure(figure='ETheta',
size=(600,300),
bgcolor=(1,1,1),
fgcolor=(0.,0.,0.))
Figure1 = mlab.figure(figure='EPhi',
size=(600,300),
bgcolor=(1,1,1),
fgcolor=(0.,0.,0.))
visual.set_viewer(Figure0)
self.ETheta._Plot(Figure=Figure0, Source=Source, Axes=Axes, label='ETheta')
if Source:
self.Parent.Source._Plot(Figure=Figure0, Origin=(0,+3,-2))
self.Parent.Source._Plot(Figure=Figure0, Origin=(0,-3,-2))
visual.set_viewer(Figure1)
self.EPhi._Plot(Figure=Figure1, Source=Source, Axes=Axes, label='EPhi')
if Source:
self.Parent.Source._Plot(Figure=Figure1, Origin=(0,+3,-2))
self.Parent.Source._Plot(Figure=Figure1, Origin=(0,-3,-2))
mlab.show()
def __repr__(self):
return f"""
Object: Dictionary
Keys: EPhi, ETheta, Theta, Phi, Distance
Structured data: Yes
Method: <Plot>
Shape: {self['Theta'].shape}"""
class Footprint(dict):
"""Dict subclass representing footprint of the scatterer.
The footprint usually depend on the scatterer and the detector.
For more information see references in the
`documentation <https://pymiesim.readthedocs.io/en/latest>`_
The footprint is defined as:
.. math::
\\text{Footprint} = \\big| \\mathscr{F}^{-1} \\big\\{ \\tilde{ \\psi }\
(\\xi, \\nu), \\tilde{ \\phi}_{l,m}(\\xi, \\nu) \\big\\} \
(\\delta_x, \\delta_y) \\big|^2
Parameters
----------
Scatterer : :class:`Scatterer`
The scatterer.
Detector : :class:`Detector`
The detector.
Num : :class:`int`
Number of point to evaluate the footprint in cartesian coord.
Returns
-------
:class:`dict`
Representation of footprint.
"""
def __init__(self, Scatterer, Detector):
Num = 251
PaddingFactor = 10
TotalSize = Num*PaddingFactor
MaxAngle = NA2Angle(Detector.NA)
phi, theta = np.linspace(0, np.pi, Num), np.linspace(-np.pi, np.pi, Num)
Phi, Theta = np.meshgrid(phi, theta)
MaxDirect = 1 / (np.sin(MaxAngle.Radian) * Scatterer.Source.k / (2*np.pi))
X = Y = np.linspace(-1, 1, Num) * Num/2 * MaxDirect/PaddingFactor
FarFieldPara, FarFieldPerp = Scatterer._FarField(Phi.flatten(), Theta.flatten(), 1.0, Structured=False)
ScalarField = Detector.GetScalarField(Sampling=Num, Structured=True)
Perp = ScalarField * FarFieldPerp.reshape(Theta.shape)
Para = ScalarField * FarFieldPara.reshape(Theta.shape)
FourierPara = np.fft.ifft2(Para, s=[TotalSize, TotalSize])
FourierPara = np.fft.fftshift(FourierPara).__abs__()**2
FourierPerp = np.fft.ifft2(Perp, s=[TotalSize, TotalSize])
FourierPerp = np.fft.fftshift(FourierPerp).__abs__()**2
start = int(TotalSize/2-np.floor(Num/2))
end = int(TotalSize/2+np.ceil(Num/2))
FourierPara = FourierPara[start: end, start: end]
FourierPerp = FourierPerp[start: end, start: end]
self['Map'] = (FourierPara + FourierPerp)
self['DirectX'] = X
self['DirectY'] = Y
def Plot(self):
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.pcolormesh(self['DirectX']*1e6, self['DirectY']*1e6, self['Map'], cmap='gray', shading='auto')
ax.set_xlabel(r'Offset distance in X-axis [$\mu$m]')
ax.set_ylabel(r'Offset distance in Y-axis [$\mu$m]')
ax.set_title('Scatterer Footprint')
plt.colorbar(im, ax=ax)
plt.show()
def __repr__(self):
return f"""
Object: Dictionary
Keys: Map, DirectX, DirectY
Structured data: Yes
Method: <Plot>
Shape: {self['Map'].shape}"""
class ScatteringElement(dict):
def __init__(self, Element, Phi, Name=None):
self.Element = Element
self.Name = Name
self.Phi = Phi
def Plot(self, ax=None, ColorMap='C0'):
self._Plot(ax=ax, ColorMap=ColorMap)
plt.show()
def _Plot(self, ax=None, ColorMap='C0'):
Phi = np.deg2rad(self.Phi)
if ax is None:
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (7,4), subplot_kw = {'projection':'polar'})
ax.set_title(self.Name);
ax.plot(Phi, np.abs(self.Element), color = 'k')
ax.fill_between(x = Phi, y2 = 0, y1 = np.abs(self.Element), color = ColorMap, alpha = 0.4)
def __repr__(self):
return f"""
Object:
Name: {self.Name}
Structured data: Yes
Method: <Plot>
Shape: {self.Element.shape}"""
class ScalarAmplitude(dict):
def __init__(self, Field, Phi, Theta, Mesh=None, Name=None):
if Mesh is None:
Mesh = np.ones(Field.shape)
self.Field = Field
self.Name = Name
self.Mesh = Mesh
self.Phi = Phi
self.Theta = Theta
def Plot(self, Source=True, Axes=True, Figure=None, Origin=(0,0,0), ColorBar=True, label=''):
visual.set_viewer(Figure)
self._Plot(Source=Source, Axes=Axes, Figure=Figure, Origin=Origin, ColorBar=ColorBar, label=label)
mlab.show()
def _Plot(self, Source=True, Axes=True, Figure=None, Origin=(0,0,0), ColorBar=True, label=''):
if Figure is None:
self.Figure = mlab.figure(figure=self.Name, size=(600,300), bgcolor=(1,1,1), fgcolor=(0.,0.,0.))
else:
self.Figure = Figure
OriginReal = [ Origin[0], Origin[1] + 3, Origin[2] ]
OriginImag = [ Origin[0], Origin[1] - 3, Origin[2] ]
Phi, Theta = np.meshgrid(self.Phi, self.Theta)
Coord = Sp2Cart(self.Mesh, Phi, Theta)
self.Figure, Real = StructuredMesh(*Coord, Source, Axes, OriginReal, self.Figure, Scalar=self.Field.real)
self.Figure, Imag = StructuredMesh(*Coord, Source, Axes, OriginImag, self.Figure, Scalar=self.Field.imag )
mlab.text3d(x = OriginReal[0], y = OriginReal[1], z = OriginReal[2]+5, text = 'Real', line_width = 0.1, figure = Figure, scale = 0.25, color = (0,0,0))
mlab.text3d(x = OriginImag[0], y = OriginImag[1], z = OriginImag[2]+5, text = 'Imag', line_width = 0.1, figure = Figure, scale = 0.25, color = (0,0,0))
if ColorBar:
Max = np.abs(self.Field).max()
lut_manager = mlab.colorbar(object = Imag, label_fmt = "%.0e", nb_labels = | |
'''VOC and COC Dataloader for Object Detection'''
import glob
import json
import torch
import numpy as np
from sys import stderr
from PIL import Image
from PIL import ImageDraw
from contextlib import contextmanager
from xml.etree import ElementTree as ET
from torch.utils.data import Dataset, DataLoader
try:
from .util import prep_image, xyxy2xywh, draw_boxes
except ImportError:
from util import prep_image, xyxy2xywh, draw_boxes
class VOC(Dataset):
r"""Dataset Class for PASCAL VOC Dataset which is used for darknet training
Attributes:
xml_directory (str): Directory of the ground truth folder
img_directory (str): Directory of the images in the dataset
resolution (int): Input image dimensions of the darknet
fformat (str): format of the image files (default='.jpg')
train_set (torch.utils.data.Subset): Training set
val_set (torch.utils.data.Subset): Validation set
train_set_loader (DataLoader): Training set loader
val_set_loader (DataLoader): Validation set loader
split (Bool): whether the dataset is splitted to train and valid sets
"""
def __init__(self, xml_directory, img_directory, resolution=416,
fformat='.jpg') -> None:
r"""
Constructor of the VOCDataset Class
"""
assert isinstance(fformat, str)
assert isinstance(resolution, (int))
self.xml_path_list = glob.glob(xml_directory+'/*'+'.xml')
self.resolution = resolution
self.split = False
if self.xml_path_list == []:
raise FileNotFoundError("""FileNotFoundError: For the given
directory {} and file format {}, no file was
found.""".format(xml_directory, fformat))
self.data = dict()
for element in self.xml_path_list:
value = img_directory + '/' + element[-15:-4] + fformat
self.data[element] = value
print('{} number of given data is loaded and ready!\n'
.format(len(self.xml_path_list)))
def __len__(self) -> int:
r"""The function to learn length of the adjusted dataset
Returns:
Integer: Length of the dataset
"""
return len(self.data)
def read_xml(self, filename) -> dict:
r"""The function to read xml file and extract ground truth
information for PASCAL VOC Dataset
Parameters:
filename (str): destination of the xml file
Returns:
List: Bounding box of objects (person)
"""
doc = ET.parse(filename).getroot()
bboxes = []
self.fetch_boxes_from_xml(bboxes, doc)
if bboxes == []:
return None
else:
return bboxes
@staticmethod
def fetch_boxes_from_xml(bboxes, doc):
for elem in doc.findall('object'):
# because we want only person detections
if elem.find('name').text == 'person':
bboxes.append([float(elem.find('bndbox/xmin').text),
float(elem.find('bndbox/ymin').text),
float(elem.find('bndbox/xmax').text),
float(elem.find('bndbox/ymax').text)])
def __getitem__(self, i):
r"""The function to get an item from the dataset
Parameters:
i (int): index integer to get file from list
Returns:
torch.tensor: Given image data in a torch.tensor form
"""
assert isinstance(i, int)
assert i < len(self.xml_path_list)
bbox, img = self.load_image(i)
pad, ratio = self.configure_image(img)
if bbox is not None:
bbox = self.configure_boun_box(bbox, pad, ratio)
img = np.asarray(img)
img = prep_image(img, self.resolution, mode='RGB').squeeze(0)
return img, bbox
def configure_image(self, img):
max_im_size = max(img.size)
w, h = img.size
ratio = float(self.resolution / max_im_size)
pad = [int((max_im_size - w) * ratio / 2), int((max_im_size - h) * ratio / 2)]
return pad, ratio
def load_image(self, i):
bbox = self.read_xml(self.xml_path_list[i])
img_path = self.data[self.xml_path_list[i]]
img = Image.open(img_path)
return bbox, img
@staticmethod
def configure_boun_box(bbox, pad, ratio):
for b in bbox:
b.extend([1, 1])
b.extend([0] * 79)
bbox = torch.tensor(bbox)
bbox = xyxy2xywh(bbox)
bbox[..., :4] *= ratio
bbox[:, 0] += pad[0]
bbox[:, 1] += pad[1]
return bbox
@staticmethod
def collate_fn(batch):
"""
Collate function for the dataloader of the dataset
Parameters:
batch (list): data samples of the current batch
Returns:
img (torch.Tensor): image samples of the current batch
bndbox (list): list of bounding box tensors for every image
"""
img, bndbox = zip(*batch)
img = torch.stack(img, dim=0)
return img, bndbox
def get_dataloader(self, batch_size, shuffle=True,
num_workers=4) -> DataLoader:
r"""The function to create a dataloader for the dataset class
Parameters:
batch_size (int): Batch size of the training set
shuffle (bool): Whether you want shuffling or not
split (bool): If the dataset is splitted, it returns 2 dataloaders
num_workers (int): Number of subprocesses to use for data loading.
Returns:
DataLoader, DataLoader: torch DataLoader object for training and
validation sets
"""
return DataLoader(self, batch_size=batch_size,
shuffle=shuffle,
collate_fn=self.collate_fn,
num_workers=num_workers)
class COCO(Dataset):
"""COCO Dataset DataLoader for Object Detection
Attributes:
img_ids (list): list of image ids of the COCO dataset
img_annotations (dict): dictionary of the image annotations
images (dict): information about images and their URLs
resolution (int): resolution of the training
img_dir (str): path of the folder containing the COCO images
deleted_cls (list): list of the deletec class for the corresponding dataset
keep_img_name (bool): flag to return image names for each sample
only_gt (bool): flag to return ground truth of the images without image data
"""
def __init__(self, anotations_json, img_dir,
resolution=416, keep_img_name=False,
only_ground_truth=False):
'''Constructor of COCO Class'''
super(COCO, self).__init__()
self.resolution = resolution
self.img_dir = img_dir
if self.img_dir[-1] != '/':
self.img_dir += '/'
self.read_annotations(anotations_json)
self.deleted_cls = [12, 26, 29, 30, 45, 66, 68, 69, 71, 83, 91]
self.keep_img_name = keep_img_name
self.only_gt = only_ground_truth
def read_annotations(self, anotations_json, non_crowd=True):
"""The method to read annotation files of the COCO dataset
and store them in the dictionary and list objects
Parameters:
anotations_json (str): annotation file directory
non_crowd (bool): flag to choose only non_crowd images
"""
ann = json.load(open(anotations_json))
if non_crowd:
img_ids = [i['image_id'] for i in ann['annotations']
if not i['iscrowd']]
else:
img_ids = [i['image_id'] for i in ann['annotations']]
self.img_ids = list(set(img_ids))
self.img_annotations = ann['annotations']
self.images = {i['id']: i for i in ann['images']}
def coco2yolo(self, category_id):
"""This function converts the COCO dataset labels for the corresponding
Darknet YOLO detector network label
Parameters:
category_id (int): category_id label for the corresponding bounding box
"""
ex = 0
for i in range(len(self.deleted_cls)):
if category_id < self.deleted_cls[i]:
return category_id - ex
ex += 1
if category_id - ex < 0:
print('CATEGORY_ID ERROR', file=stderr)
exit()
return category_id - ex
def __len__(self):
r"""The function to learn length of the adjusted dataset
Returns:
Integer: Length of the dataset
"""
return len(self.img_ids)
def __getitem__(self, index):
r"""The function to get an item from the dataset
Parameters:
i (int): index integer to get file from list
Returns:
torch.tensor: Given image data in a torch.tensor form
"""
id_, img = self.fetch_image(index)
if self.keep_img_name:
img_name = self.images[id_]['file_name']
pad, ratio = self.configure_padding(img)
if not self.only_gt:
img = np.asarray(img)
img = prep_image(img, self.resolution, mode='RGB').squeeze(0)
bbox = self.fetch_bounding_boxes(id_, pad, ratio)
# draw_boxes(img, bbox, 'coco_val_with_box/'+img_name)
if bbox != []:
bbox = torch.stack(bbox, dim=0)
if not self.keep_img_name:
if not self.only_gt:
return img, bbox
else:
return bbox
else:
if not self.only_gt:
return img_name, img, bbox
else:
return img_name, bbox
def fetch_bounding_boxes(self, id_, pad, ratio):
bbox = []
for annot in self.img_annotations:
if annot['image_id'] == id_:
cls_encoding = [1.0]
cls_encoding.extend([0] * 80)
# print(obj['category_id'], self.coco2yolo(obj['category_id']))
cls_encoding[self.coco2yolo(annot['category_id'])] = 1.0
box = annot['bbox'][:5]
box.extend(cls_encoding)
box = torch.FloatTensor(box)
box[:4] *= ratio
box[0] += box[2] / 2 + pad[0]
box[1] += box[3] / 2 + pad[1]
bbox.append(box)
return bbox
def configure_padding(self, img):
# obtaining the image size
w, h = img.size
max_im_size = max(w, h)
ratio = float(self.resolution / max_im_size)
# calculating paddings for bboxes
pad = [int((max_im_size - w) * ratio / 2), int((max_im_size - h) * ratio / 2)]
return pad, ratio
def fetch_image(self, index):
id_ = self.img_ids[index]
img = self.img_dir + self.images[id_]['file_name']
img = Image.open(img).convert('RGB')
return id_, img
def collate_fn(self, batch):
"""
Collate function for the dataloader of the dataset
Parameters:
batch (list): data samples of the current batch
Returns:
img (torch.Tensor): image samples of the current batch
bndbox (list): list of bounding box tensors for every image
"""
if not self.only_gt:
if not self.keep_img_name:
img, bbox = zip(*batch)
img = torch.stack(img, dim=0)
return img, bbox
else:
img_name, img, bbox = zip(*batch)
img = torch.stack(img, dim=0)
return img_name, img, bbox
else:
if not self.keep_img_name:
bbox = zip(*batch)
return bbox
else:
img_name, bbox = zip(*batch)
return img_name, bbox
@contextmanager
def only_ground_truth(self):
"""Activates the only ground truth mode for the COCO dataset in which
dataloader only load the ground truth of the corresponding images
"""
try:
self.only_gt = True
yield
finally:
self.only_gt = False
def get_dataloader(self, batch_size, shuffle=True, num_workers=4):
r"""The function to create a dataloader for the dataset class
Parameters:
batch_size (int): Batch size of the training set
shuffle (bool): Whether you want shuffling or not
split (bool): If the dataset is splitted, it returns 2 dataloaders
num_workers (int): Number of subprocesses to use for data loading.
Returns:
DataLoader, DataLoader: torch DataLoader object for training and
validation sets
"""
dloader = DataLoader(self, batch_size=batch_size,
collate_fn=self.collate_fn,
shuffle=shuffle,
num_workers=num_workers)
return dloader
if __name__ == '__main__':
# dataset testing | |
import os
import copy
from uuid import uuid4
import shutil
import random
import logging
from foresite import utils, Aggregation, AggregatedResource, RdfLibSerializer
from rdflib import Namespace, URIRef
from django.db import models
from django.core.files.uploadedfile import UploadedFile
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.forms.models import model_to_dict
from django.contrib.postgres.fields import HStoreField, ArrayField
from mezzanine.conf import settings
from dominate.tags import div, legend, table, tr, tbody, thead, td, th, \
span, a, form, button, label, textarea, h4, input, ul, li, p
from lxml import etree
from hs_core.hydroshare.utils import current_site_url, get_resource_file_by_id, \
set_dirty_bag_flag, add_file_to_resource, resource_modified, get_file_from_irods
from hs_core.models import ResourceFile, AbstractMetaDataElement, Coverage, CoreMetaData
from hs_core.hydroshare.resource import delete_resource_file
from hs_core.signals import post_remove_file_aggregation
RESMAP_FILE_ENDSWITH = "_resmap.xml"
METADATA_FILE_ENDSWITH = "_meta.xml"
class AbstractFileMetaData(models.Model):
""" base class for HydroShare file type metadata """
# one temporal coverage and one spatial coverage
coverages = GenericRelation(Coverage)
# key/value metadata
extra_metadata = HStoreField(default={})
# keywords
keywords = ArrayField(models.CharField(max_length=100, null=True, blank=True), default=[])
# to track if any metadata element has been modified to trigger file update
is_dirty = models.BooleanField(default=False)
class Meta:
abstract = True
@classmethod
def get_metadata_model_classes(cls):
return {'coverage': Coverage}
def get_metadata_elements(self):
"""returns a list of all metadata elements (instances of AbstractMetaDataElement)
associated with this file type metadata object.
"""
return list(self.coverages.all())
def dict(self):
dict = {}
metadata = self.get_metadata_elements()
for element in metadata:
dict.update(element.dict)
return dict
def delete_all_elements(self):
self.coverages.all().delete()
self.extra_metadata = {}
self.keywords = []
self.save()
def get_html(self, include_extra_metadata=True, **kwargs):
"""Generates html for displaying all metadata elements associated with this logical file.
Subclass must override to include additional html for additional metadata it supports.
:param include_extra_metadata: a flag to control if necessary html for displaying key/value
metadata will be included
"""
root_div = div()
if self.logical_file.dataset_name:
root_div.add(self.get_dataset_name_html())
if self.keywords:
root_div.add(self.get_keywords_html())
if self.extra_metadata and include_extra_metadata:
root_div.add(self.get_key_value_metadata_html())
return root_div.render()
def get_dataset_name_html(self):
"""generates html for viewing dataset name (title)"""
if self.logical_file.dataset_name:
dataset_name_div = div(cls="content-block")
with dataset_name_div:
legend("Title")
p(self.logical_file.dataset_name)
return dataset_name_div
def get_keywords_html(self):
"""generates html for viewing keywords"""
keywords_div = div(cls='content-block')
if self.keywords:
with keywords_div:
legend('Keywords')
with div(cls="tags"):
with ul(id="list-keywords-file-type", cls="tag-list custom-well"):
for kw in self.keywords:
with li():
a(kw, cls="tag",
href="/search/?q=&selected_facets=subject_exact:" + kw)
return keywords_div
def get_key_value_metadata_html(self):
"""generates html for viewing key/vale extra metadata"""
extra_metadata_div = div()
if self.extra_metadata:
extra_metadata_div = div(cls="content-block")
with extra_metadata_div:
legend('Extended Metadata')
with table(cls="hs-table table dataTable no-footer", style="width: 100%"):
with thead():
with tr(cls="header-row"):
th("Key")
th("Value")
with tbody():
for k, v in self.extra_metadata.iteritems():
with tr(data_key=k):
td(k)
td(v)
return extra_metadata_div
def get_html_forms(self, dataset_name_form=True, temporal_coverage=True, **kwargs):
"""generates html forms for all the metadata elements associated with this logical file
type
:param dataset_name_form: If True then a form for editing dataset_name (title) attribute is
included
:param temporal_coverage: if True then form elements for editing temporal coverage are
included
"""
root_div = div()
with root_div:
if dataset_name_form:
self.get_dataset_name_form()
self.get_keywords_html_form()
self.get_extra_metadata_html_form()
if temporal_coverage:
# for aggregation that contains other aggregations with temporal data,
# show option to update temporal coverage from contained aggregations
if self.logical_file.has_children_temporal_data:
with self.get_temporal_coverage_html_form():
with div():
button("Set temporal coverage from folder contents",
type="button",
cls="btn btn-primary",
id="btn-update-aggregation-temporal-coverage")
else:
self.get_temporal_coverage_html_form()
return root_div
def get_keywords_html_form(self):
keywords_div = div(cls="content-block", id="filetype-keywords")
action = "/hsapi/_internal/{0}/{1}/add-file-keyword-metadata/"
action = action.format(self.logical_file.__class__.__name__, self.logical_file.id)
delete_action = "/hsapi/_internal/{0}/{1}/delete-file-keyword-metadata/"
delete_action = delete_action.format(self.logical_file.__class__.__name__,
self.logical_file.id)
with keywords_div:
legend("Keywords")
with form(id="id-keywords-filetype", action=action, method="post",
enctype="multipart/form-data"):
input(id="id-delete-keyword-filetype-action", type="hidden",
value=delete_action)
with div(cls="tags"):
with div(id="add-keyword-wrapper", cls="input-group"):
input(id="txt-keyword-filetype", cls="form-control",
placeholder="keyword",
type="text", name="keywords")
with span(cls="input-group-btn"):
a("Add", id="btn-add-keyword-filetype", cls="btn btn-success",
type="button")
with ul(id="lst-tags-filetype", cls="custom-well tag-list"):
for kw in self.keywords:
with li(cls="tag"):
span(kw)
with a():
span(cls="glyphicon glyphicon-remove-circle icon-remove")
p("Duplicate. Keywords not added.", id="id-keywords-filetype-msg",
cls="text-danger small", style="display: none;")
def get_spatial_coverage_form(self, allow_edit=False):
return Coverage.get_spatial_html_form(resource=None, element=self.spatial_coverage,
allow_edit=allow_edit, file_type=True)
def get_temporal_coverage_form(self, allow_edit=True):
return Coverage.get_temporal_html_form(resource=None, element=self.temporal_coverage,
file_type=True, allow_edit=allow_edit)
def get_extra_metadata_html_form(self):
def get_add_keyvalue_button():
add_key_value_btn = a(cls="btn btn-success", type="button", data_toggle="modal",
data_target="#add-keyvalue-filetype-modal",
style="margin-bottom:20px;")
with add_key_value_btn:
with span(cls="glyphicon glyphicon-plus"):
span("Add Key/Value", cls="button-label")
return add_key_value_btn
if self.extra_metadata:
root_div_extra = div(id="filetype-extra-metadata")
with root_div_extra:
legend('Extended Metadata')
get_add_keyvalue_button()
with table(cls="hs-table table dataTable no-footer",
style="width: 100%"):
with thead():
with tr(cls="header-row"):
th("Key")
th("Value")
th("Edit/Remove")
with tbody():
counter = 0
for k, v in self.extra_metadata.iteritems():
counter += 1
with tr(data_key=k):
td(k)
td(v)
with td():
span(data_toggle="modal", data_placement="auto", title="Edit",
cls="btn-edit-icon glyphicon glyphicon-pencil "
"icon-blue table-icon",
data_target="#edit-keyvalue-filetype-modal"
"-{}".format(counter))
span(data_toggle="modal", data_placement="auto",
title="Remove",
cls="btn-remove-icon glyphicon glyphicon-trash "
"btn-remove table-icon",
data_target="#delete-keyvalue-filetype-modal"
"-{}".format(counter))
self._get_add_key_value_modal_form()
self._get_edit_key_value_modal_forms()
self._get_delete_key_value_modal_forms()
return root_div_extra
else:
root_div_extra = div(id="filetype-extra-metadata", cls="content-block")
with root_div_extra:
legend('Extended Metadata')
get_add_keyvalue_button()
self._get_add_key_value_modal_form()
return root_div_extra
def get_temporal_coverage_html_form(self):
# Note: When using this form layout the context variable 'temp_form' must be
# set prior to calling the template.render(context)
root_div = div(id="temporal-coverage-filetype", cls='content-block')
with root_div:
with form(id="id-coverage-temporal-file-type", action="{{ temp_form.action }}",
method="post", enctype="multipart/form-data"):
div("{% crispy temp_form %}")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 "
"col-md-2 col-xs-6"):
button("Save changes", type="button",
cls="btn btn-primary pull-right",
style="display: none;")
return root_div
def has_all_required_elements(self):
return True
@classmethod
def get_supported_element_names(cls):
return ['Coverage']
def get_required_missing_elements(self):
return []
@property
def has_metadata(self):
if not self.coverages.all() and not self.extra_metadata \
and not self.logical_file.dataset_name:
return False
return True
@property
def spatial_coverage(self):
return self.coverages.exclude(type='period').first()
@property
def temporal_coverage(self):
return self.coverages.filter(type='period').first()
def get_xml(self, pretty_print=True):
"""Generates ORI+RDF xml for this aggregation metadata"""
RDF_ROOT = etree.Element('{%s}RDF' % CoreMetaData.NAMESPACES['rdf'],
nsmap=CoreMetaData.NAMESPACES)
# create the Description element
rdf_Description = etree.SubElement(RDF_ROOT, '{%s}Description' %
CoreMetaData.NAMESPACES['rdf'])
resource = self.logical_file.resource
aggregation_map_file_path = '{}#aggregation'.format(self.logical_file.map_file_path)
aggregation_map_uri = current_site_url() + "/resource/{}".format(aggregation_map_file_path)
rdf_Description.set('{%s}about' % CoreMetaData.NAMESPACES['rdf'], aggregation_map_uri)
# add aggregation title
if self.logical_file.dataset_name:
dc_datatitle = etree.SubElement(rdf_Description, '{%s}title' %
CoreMetaData.NAMESPACES['dc'])
dc_datatitle.text = self.logical_file.dataset_name
# add aggregation type
aggregation_term_uri = current_site_url() + "/terms/{}"
aggregation_term_uri = aggregation_term_uri.format(
self.logical_file.get_aggregation_type_name())
dc_type = etree.SubElement(rdf_Description, '{%s}type' % CoreMetaData.NAMESPACES['dc'])
dc_type.set('{%s}resource' % CoreMetaData.NAMESPACES['rdf'], aggregation_term_uri)
# add lang element
dc_lang = etree.SubElement(rdf_Description, '{%s}language' % CoreMetaData.NAMESPACES['dc'])
dc_lang.text = resource.metadata.language.code
# add rights element
dc_rights = etree.SubElement(rdf_Description, '{%s}rights' % CoreMetaData.NAMESPACES['dc'])
dc_rights_rdf_Description = etree.SubElement(dc_rights,
'{%s}Description' %
CoreMetaData.NAMESPACES['rdf'])
hsterms_statement = etree.SubElement(dc_rights_rdf_Description,
'{%s}rightsStatement' %
CoreMetaData.NAMESPACES['hsterms'])
hsterms_statement.text = resource.metadata.rights.statement
if resource.metadata.rights.url:
hsterms_url = etree.SubElement(dc_rights_rdf_Description,
'{%s}URL' % CoreMetaData.NAMESPACES['hsterms'])
hsterms_url.set('{%s}resource' % CoreMetaData.NAMESPACES['rdf'],
resource.metadata.rights.url)
# add keywords
for kw in self.keywords:
dc_subject = etree.SubElement(rdf_Description, '{%s}subject' %
CoreMetaData.NAMESPACES['dc'])
dc_subject.text = kw
# add any key/value metadata items
for key, value in self.extra_metadata.iteritems():
hsterms_key_value = etree.SubElement(
rdf_Description, '{%s}extendedMetadata' % CoreMetaData.NAMESPACES['hsterms'])
hsterms_key_value_rdf_Description = etree.SubElement(
hsterms_key_value, '{%s}Description' % CoreMetaData.NAMESPACES['rdf'])
hsterms_key = etree.SubElement(hsterms_key_value_rdf_Description,
'{%s}key' % CoreMetaData.NAMESPACES['hsterms'])
hsterms_key.text = key
hsterms_value = etree.SubElement(hsterms_key_value_rdf_Description,
'{%s}value' % CoreMetaData.NAMESPACES['hsterms'])
hsterms_value.text = value
# add coverages
for coverage in self.coverages.all():
coverage.add_to_xml_container(rdf_Description)
# create the Description element for aggregation type
rdf_Description_aggr_type = etree.SubElement(RDF_ROOT, '{%s}Description' %
CoreMetaData.NAMESPACES['rdf'])
rdf_Description_aggr_type.set('{%s}about' % CoreMetaData.NAMESPACES['rdf'],
aggregation_term_uri)
rdfs_label = etree.SubElement(rdf_Description_aggr_type, '{%s}label' %
CoreMetaData.NAMESPACES['rdfs1'])
rdfs_label.text = self.logical_file.get_aggregation_display_name()
rdfs_isDefinedBy = etree.SubElement(rdf_Description_aggr_type, '{%s}isDefinedBy' %
CoreMetaData.NAMESPACES['rdfs1'])
rdfs_isDefinedBy.text = current_site_url() + "/terms"
return CoreMetaData.XML_HEADER + '\n' + etree.tostring(RDF_ROOT, encoding='UTF-8',
pretty_print=pretty_print)
def _get_xml_containers(self):
"""Helper for the subclasses to get the xml containers element to which the sub classes
can then add any additional elements for metadata xml generation"""
xml_string = super(type(self), self).get_xml(pretty_print=False)
RDF_ROOT = etree.fromstring(xml_string)
# get root 'Description' element that contains all other elements
container_to_add_to = RDF_ROOT.find('rdf:Description', namespaces=CoreMetaData.NAMESPACES)
return RDF_ROOT, container_to_add_to
def create_element(self, element_model_name, **kwargs):
model_type = self._get_metadata_element_model_type(element_model_name)
kwargs['content_object'] = self
element = model_type.model_class().create(**kwargs)
if element_model_name.lower() == "coverage":
aggr = element.metadata.logical_file
# aggregation won't have resource files in case of coverage element being
# created as part of copying a resource that supports logical file
# types - in that case no need for updating resource lever coverage
if aggr.files.all().count() > 0:
resource = aggr.resource
resource.update_coverage()
# if the aggregation (logical file) for which coverage data is created
# has a parent aggregation then coverage needs to be updated for that
# parent aggregation except in the case of metadata element being created as
# part of copying a resource.
# aggregation won't have resource files when copying a resource
if aggr.files.all().count() > 0:
parent_aggr = aggr.get_parent()
if parent_aggr is not None:
parent_aggr.update_coverage()
return element
def update_element(self, element_model_name, element_id, **kwargs):
model_type = self._get_metadata_element_model_type(element_model_name)
kwargs['content_object'] = self
model_type.model_class().update(element_id, **kwargs)
self.is_dirty = True
self.save()
if element_model_name.lower() == "coverage":
element = model_type.model_class().objects.get(id=element_id)
resource = element.metadata.logical_file.resource
resource.update_coverage()
# if the aggregation (logical file) for which coverage data is updated
# has a parent aggregation then coverage needs to be updated for that
# parent aggregation
aggr = element.metadata.logical_file
parent_aggr = aggr.get_parent()
if parent_aggr is not None:
parent_aggr.update_coverage()
def delete_element(self, element_model_name, element_id):
model_type = self._get_metadata_element_model_type(element_model_name)
model_type.model_class().remove(element_id)
self.is_dirty = True
self.save()
def _get_metadata_element_model_type(self, element_model_name):
element_model_name = element_model_name.lower()
if not self._is_valid_element(element_model_name):
raise ValidationError("Metadata element type:%s is | |
<reponame>jfcoz/azure-cli
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import re
from six.moves.urllib.parse import quote # pylint: disable=import-error
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.tools import parse_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.mgmt.eventgrid.models import (
EventSubscription,
EventSubscriptionUpdateParameters,
WebHookEventSubscriptionDestination,
RetryPolicy,
EventHubEventSubscriptionDestination,
StorageQueueEventSubscriptionDestination,
HybridConnectionEventSubscriptionDestination,
StorageBlobDeadLetterDestination,
EventSubscriptionFilter)
logger = get_logger(__name__)
EVENTGRID_NAMESPACE = "Microsoft.EventGrid"
RESOURCES_NAMESPACE = "Microsoft.Resources"
SUBSCRIPTIONS = "subscriptions"
RESOURCE_GROUPS = "resourcegroups"
EVENTGRID_TOPICS = "topics"
WEBHOOK_DESTINATION = "webhook"
EVENTHUB_DESTINATION = "eventhub"
STORAGEQUEUE_DESTINATION = "storagequeue"
HYBRIDCONNECTION_DESTINATION = "hybridconnection"
GLOBAL = "global"
def cli_topic_list(
client,
resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def cli_topic_create_or_update(
client,
resource_group_name,
topic_name,
location,
tags=None):
async_topic_create = client.create_or_update(
resource_group_name,
topic_name,
location,
tags)
created_topic = async_topic_create.result()
return created_topic
def cli_eventgrid_event_subscription_create( # pylint: disable=too-many-locals
cmd,
client,
event_subscription_name,
endpoint,
resource_id=None,
source_resource_id=None,
resource_group_name=None,
topic_name=None,
endpoint_type=WEBHOOK_DESTINATION,
included_event_types=None,
subject_begins_with=None,
subject_ends_with=None,
is_subject_case_sensitive=False,
max_delivery_attempts=30,
event_ttl=1440,
deadletter_endpoint=None,
labels=None):
scope = _get_scope_for_event_subscription(
cli_ctx=cmd.cli_ctx,
source_resource_id=source_resource_id,
resource_id=resource_id,
topic_name=topic_name,
resource_group_name=resource_group_name)
# Construct RetryPolicy based on max_delivery_attempts and event_ttl
max_delivery_attempts = int(max_delivery_attempts)
event_ttl = int(event_ttl)
_validate_retry_policy(max_delivery_attempts, event_ttl)
retry_policy = RetryPolicy(max_delivery_attempts=max_delivery_attempts, event_time_to_live_in_minutes=event_ttl)
destination = _get_endpoint_destination(endpoint_type, endpoint)
event_subscription_filter = EventSubscriptionFilter(
subject_begins_with=subject_begins_with,
subject_ends_with=subject_ends_with,
included_event_types=included_event_types,
is_subject_case_sensitive=is_subject_case_sensitive)
deadletter_destination = None
if deadletter_endpoint is not None:
deadletter_destination = _get_deadletter_destination(deadletter_endpoint)
event_subscription_info = EventSubscription(
destination=destination,
filter=event_subscription_filter,
labels=labels,
retry_policy=retry_policy,
dead_letter_destination=deadletter_destination)
_warn_if_manual_handshake_needed(endpoint_type, endpoint)
return client.create_or_update(
scope,
event_subscription_name,
event_subscription_info).result()
def cli_eventgrid_event_subscription_delete(
cmd,
client,
event_subscription_name,
resource_id=None,
source_resource_id=None,
resource_group_name=None,
topic_name=None):
scope = _get_scope_for_event_subscription(
cli_ctx=cmd.cli_ctx,
source_resource_id=source_resource_id,
resource_id=resource_id,
topic_name=topic_name,
resource_group_name=resource_group_name)
async_event_subscription_delete = client.delete(
scope,
event_subscription_name)
return async_event_subscription_delete.result()
def event_subscription_setter(
cmd,
client,
parameters,
event_subscription_name,
source_resource_id=None,
resource_id=None,
resource_group_name=None,
topic_name=None):
scope = _get_scope_for_event_subscription(
cli_ctx=cmd.cli_ctx,
source_resource_id=source_resource_id,
resource_id=resource_id,
topic_name=topic_name,
resource_group_name=resource_group_name)
async_event_subscription_update = client.update(
scope,
event_subscription_name,
parameters)
updated_event_subscription = async_event_subscription_update.result()
return updated_event_subscription
def cli_eventgrid_event_subscription_get(
cmd,
client,
event_subscription_name,
source_resource_id=None,
resource_id=None,
resource_group_name=None,
topic_name=None,
include_full_endpoint_url=False):
scope = _get_scope_for_event_subscription(
cli_ctx=cmd.cli_ctx,
source_resource_id=source_resource_id,
resource_id=resource_id,
topic_name=topic_name,
resource_group_name=resource_group_name)
retrieved_event_subscription = client.get(scope, event_subscription_name)
destination = retrieved_event_subscription.destination
if include_full_endpoint_url and isinstance(destination, WebHookEventSubscriptionDestination):
full_endpoint_url = client.get_full_url(scope, event_subscription_name)
destination.endpoint_url = full_endpoint_url.endpoint_url
return retrieved_event_subscription
def cli_event_subscription_list( # pylint: disable=too-many-return-statements
client,
resource_id=None,
source_resource_id=None,
topic_name=None,
resource_group_name=None,
location=None,
topic_type_name=None):
if source_resource_id is not None:
# If Source Resource ID is specified, we need to list event subscriptions for that particular resource.
# Since a full resource ID is specified, it should override all other defaults such as default location and RG
# No other parameters must be specified
if (topic_type_name is not None or resource_id is not None):
raise CLIError('usage error: Since --source-resource-id is specified, none of the other parameters must '
'be specified.')
return _list_event_subscriptions_by_resource_id(client, source_resource_id)
if resource_id is not None:
# DEPRECATED
# If resource ID is specified, we need to list event subscriptions for that particular resource.
# Since a full resource ID is specified, it should override all other defaults such as default location and RG
# No other parameters must be specified
if topic_type_name is not None:
raise CLIError('usage error: Since --resource-id is specified, none of the other parameters must '
'be specified.')
return _list_event_subscriptions_by_resource_id(client, resource_id)
if topic_name:
# DEPRECATED
if resource_group_name is None:
raise CLIError('Since --topic-name is specified, --resource-group must also be specified.')
return client.list_by_resource(
resource_group_name,
EVENTGRID_NAMESPACE,
EVENTGRID_TOPICS,
topic_name)
if location is None:
# Since resource-id was not specified, location must be specified: e.g. "westus2" or "global". If not error OUT.
raise CLIError('usage error: --source-resource-id ID | --location LOCATION'
' [--resource-group RG] [--topic-type-name TOPIC_TYPE_NAME]')
if topic_type_name is None:
# No topic-type is specified: return event subscriptions across all topic types for this location.
if location.lower() == GLOBAL.lower():
if resource_group_name:
return client.list_global_by_resource_group(resource_group_name)
return client.list_global_by_subscription()
if resource_group_name:
return client.list_regional_by_resource_group(resource_group_name, location)
return client.list_regional_by_subscription(location)
# Topic type name is specified
if location.lower() == GLOBAL.lower():
if not _is_topic_type_global_resource(topic_type_name):
raise CLIError('Invalid usage: Global cannot be specified for the location '
'as the specified topic type is a regional topic type with '
'regional event subscriptions. Specify a location value such '
'as westus. Global can be used only for global topic types: '
'Microsoft.Resources.Subscriptions and Microsoft.Resources.ResourceGroups.')
if resource_group_name:
return client.list_global_by_resource_group_for_topic_type(resource_group_name, topic_type_name)
return client.list_global_by_subscription_for_topic_type(topic_type_name)
if resource_group_name:
return client.list_regional_by_resource_group_for_topic_type(resource_group_name, location, topic_type_name)
return client.list_regional_by_subscription_for_topic_type(location, topic_type_name)
def _get_scope(
cli_ctx,
resource_group_name,
provider_namespace,
resource_type,
resource_name):
subscription_id = get_subscription_id(cli_ctx)
if provider_namespace == RESOURCES_NAMESPACE:
if resource_group_name:
scope = (
'/subscriptions/{}/resourceGroups/{}'
.format(quote(subscription_id),
quote(resource_group_name)))
else:
scope = (
'/subscriptions/{}'
.format(quote(subscription_id)))
else:
scope = (
'/subscriptions/{}/resourceGroups/{}/providers/{}/{}/{}'
.format(quote(subscription_id),
quote(resource_group_name),
quote(provider_namespace),
quote(resource_type),
quote(resource_name)))
return scope
def _get_scope_for_event_subscription(
cli_ctx,
resource_id,
source_resource_id,
topic_name,
resource_group_name):
if all([resource_id, source_resource_id]):
raise CLIError('usage error: specify either "--resource-id" or "--source-resource-id", not both.')
if all([resource_id, topic_name]):
raise CLIError('usage error: specify either "--topic-name" or "--resource-id", not both.')
if all([source_resource_id, topic_name]):
raise CLIError('usage error: specify either "--topic-name" or "--source-resource-id", not both.')
# A default resource Group Name could have been configured
# but if --resource-id or --source-resource-id is provided, it always overrides it.
if source_resource_id:
# Source Resource ID is provided, use that as the scope for the event subscription.
# This is the latest non-deprecated way of specifying the source resource.
scope = source_resource_id
elif resource_id:
# Deprecated
scope = resource_id
elif topic_name:
# DEPRECATED: Topic name is provided, use the topic and resource group to build a scope for the user topic
if resource_group_name is None:
raise CLIError("When --topic-name is specified, the --resource-group-name must also be specified.")
scope = _get_scope(cli_ctx, resource_group_name, EVENTGRID_NAMESPACE, EVENTGRID_TOPICS, topic_name)
elif resource_group_name:
# DEPRECATED: Event subscription to a resource group.
scope = _get_scope(cli_ctx, resource_group_name, RESOURCES_NAMESPACE, RESOURCE_GROUPS, resource_group_name)
else:
# DEPRECATED
logger.warning('This default option uses Azure subscription as the source resource.'
' This is deprecated and will be removed in a future release.'
' Use `--source-resource-id /subscriptions/{subid}` instead.')
scope = _get_scope(cli_ctx, None, RESOURCES_NAMESPACE, SUBSCRIPTIONS, get_subscription_id(cli_ctx))
return scope
def event_subscription_getter(
cmd,
client,
event_subscription_name,
source_resource_id=None,
resource_id=None,
resource_group_name=None,
topic_name=None):
scope = _get_scope_for_event_subscription(
cli_ctx=cmd.cli_ctx,
source_resource_id=source_resource_id,
resource_id=resource_id,
topic_name=topic_name,
resource_group_name=resource_group_name)
return client.get(scope, event_subscription_name)
def update_event_subscription(
instance,
endpoint=None,
endpoint_type=WEBHOOK_DESTINATION,
subject_begins_with=None,
subject_ends_with=None,
included_event_types=None,
labels=None,
deadletter_endpoint=None):
event_subscription_destination = None
deadletter_destination = None
event_subscription_labels = instance.labels
event_subscription_filter = instance.filter
retry_policy = instance.retry_policy
if endpoint_type.lower() != WEBHOOK_DESTINATION.lower() and endpoint is None:
raise CLIError('Invalid usage: Since --endpoint-type is specified, a valid endpoint must also be specified.')
if endpoint is not None:
event_subscription_destination = _get_endpoint_destination(endpoint_type, endpoint)
if deadletter_endpoint is not None:
deadletter_destination = _get_deadletter_destination(deadletter_endpoint)
if subject_begins_with is not None:
event_subscription_filter.subject_begins_with = subject_begins_with
if subject_ends_with is not None:
event_subscription_filter.subject_ends_with = subject_ends_with
if included_event_types is not None:
event_subscription_filter.included_event_types = included_event_types
if labels is not None:
event_subscription_labels = labels
params = EventSubscriptionUpdateParameters(
destination=event_subscription_destination,
filter=event_subscription_filter,
labels=event_subscription_labels,
retry_policy=retry_policy,
dead_letter_destination=deadletter_destination
)
return params
def _get_endpoint_destination(endpoint_type, endpoint):
if endpoint_type.lower() == WEBHOOK_DESTINATION.lower():
destination = WebHookEventSubscriptionDestination(endpoint_url=endpoint)
elif endpoint_type.lower() == EVENTHUB_DESTINATION.lower():
destination = EventHubEventSubscriptionDestination(resource_id=endpoint)
elif endpoint_type.lower() == HYBRIDCONNECTION_DESTINATION.lower():
destination = HybridConnectionEventSubscriptionDestination(resource_id=endpoint)
elif endpoint_type.lower() == STORAGEQUEUE_DESTINATION.lower():
destination = _get_storage_queue_destination(endpoint)
return destination
def _get_storage_queue_destination(endpoint):
# Supplied endpoint would be in the following format:
# /subscriptions/.../storageAccounts/sa1/queueServices/default/queues/{queueName}))
# and we need to break it up into:
# /subscriptions/.../storageAccounts/sa1 and queueName
queue_items = re.split(
"/queueServices/default/queues/", endpoint, flags=re.IGNORECASE)
if len(queue_items) != 2 or queue_items[0] is None or queue_items[1] is None:
raise CLIError('Argument Error: Expected format of --endpoint for storage queue is:' +
'/subscriptions/id/resourceGroups/rg/providers/Microsoft.Storage/' +
'storageAccounts/sa1/queueServices/default/queues/queueName')
return StorageQueueEventSubscriptionDestination(resource_id=queue_items[0], queue_name=queue_items[1])
def _get_deadletter_destination(deadletter_endpoint):
blob_items = re.split(
"/blobServices/default/containers/", deadletter_endpoint, flags=re.IGNORECASE)
if len(blob_items) != 2 or blob_items[0] is None or blob_items[1] is None:
raise CLIError('Argument Error: Expected format of --deadletter-endpoint is:' +
'/subscriptions/id/resourceGroups/rg/providers/Microsoft.Storage/' +
'storageAccounts/sa1/blobServices/default/containers/containerName')
return StorageBlobDeadLetterDestination(resource_id=blob_items[0], blob_container_name=blob_items[1])
def _validate_retry_policy(max_delivery_attempts, event_ttl):
if max_delivery_attempts < 1 or max_delivery_attempts > 30:
raise CLIError('--max-delivery-attempts should be a number between 1 and 30.')
if event_ttl < 1 or event_ttl > 1440:
raise CLIError('--event-ttl should be a number between 1 and 1440.')
def _warn_if_manual_handshake_needed(endpoint_type, endpoint):
# If the endpoint belongs to a service that we know implements the subscription validation
# handshake, there's no need to show this message, hence we check for those services
# before showing this message. This list includes Azure Automation, EventGrid Trigger based
# Azure functions, and Azure Logic Apps.
if endpoint_type.lower() == WEBHOOK_DESTINATION.lower() and \
"azure-automation" not in endpoint.lower() and \
"eventgridextension" not in endpoint.lower() and \
"logic.azure" not in endpoint.lower():
logger.warning('If the provided endpoint does not support subscription validation '
'handshake, navigate to the validation URL that you receive in the '
'subscription validation event, in order to complete the event '
'subscription creation or update. For more details, '
'please visit http://aka.ms/esvalidation')
def _list_event_subscriptions_by_resource_id(client, resource_id):
# parse_resource_id doesn't handle resource_ids for Azure subscriptions and RGs
# so, first | |
<reponame>caiorss/m2py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import __xsteam__ as xst
def tsat_p(p):
"""
Saturation Temperature as function of Pressure [kPa]
:param p:
:return:
"""
global fromSIunit_T
# Kpa to MPa
p = p / 1000.0
if 0.000611657 < p < 22.06395:
out = xst.fromSIunit_T(xst.T4_p(p))
else:
out = None
return out
def tsat_s(s):
"""
Saturation Temperature as Function of Entropy [°C]
:param s:
:return:
"""
s = xst.toSIunit_s(s)
if -0.0001545495919 < s < 9.155759395:
ps = xst.p4_s(s)
Out = xst.fromSIunit_T(xst.T4_p(ps))
else:
Out = None
return Out
# case 'psat_t'
def psat_t(T):
"""
Saturation Pressure as function of Temperature [kPa]
:param T: Temperature in degC
:return: Saturation Pressure in kPa
"""
T = xst.toSIunit_T(T)
if 647.096 > T > 273.15:
# Out = x.fromSIunit_p(x.p4_T(T))
Out = xst.p4_T(T) * 1000.0
else:
Out = None
return Out
def psat_s(s):
"""
Saturation Pressure as Function of entropy [kPa]
:param s:
:return:
"""
s = xst.toSIunit_s(s)
if -0.0001545495919 < s < 9.155759395:
Out = xst.p4_s(s) * 1000.0
else:
Out = None
return Out
# case 'h_pt'
def h_pt(p, T):
"""
Superheated Vapor Entalhpy kJ/kg
:param p: Pressure in kPa
:param T: Temperature in K
:return: Enthalpy in kJ/kg.K
"""
# p = xst.toSIunit_p(p)
p /= 1e3
T = xst.toSIunit_T(T)
Region = xst.region_pT(p, T)
print("Region = ", Region)
if Region == 1:
Out = xst.fromSIunit_h(xst.h1_pT(p, T))
elif Region == 2:
Out = xst.fromSIunit_h(xst.h2_pT(p, T))
elif Region == 3:
Out = xst.fromSIunit_h(xst.h3_pT(p, T))
elif Region == 4:
Out = None
elif Region == 5:
Out = xst.fromSIunit_h(xst.h5_pT(p, T))
else:
Out = None
return Out
def hv_t(T):
"""
Saturated Vapor Entalphy as function of Temperature
:param T: Temperature in °C
:return: Enthalpy in kJ/(kg.K)
"""
T = xst.toSIunit_T(T)
if T > 273.15 and T < 647.096:
p = xst.p4_T(T)
Out = xst.fromSIunit_h(xst.h4V_p(p))
else:
Out = None
return Out
def h_px(p, x):
"""
:param p: Pressure in kPa
:param x:
:return:
"""
global xst
p = p/1000.0
if x > 1 or x < 0 or p >= 22.064:
return
hL = xst.h4L_p(p)
hV = xst.h4V_p(p)
Out = hL + x * (hV - hL)
return Out
def h_tx(T, x):
"""
Entalphy of Saturated Steam as function of T and x
:param T: Temperature in °C
:param x: Vapor Quality 0 <= x <= 1
:return: Enthalphy kJ/kg.K
"""
T = xst.toSIunit_T(T)
x = xst.toSIunit_x(x)
if x > 1 or x < 0 or T >= 647.096:
return
p = xst.p4_T(T)
hL = xst.h4L_p(p)
hV = xst.h4V_p(p)
Out = hL + x * (hV - hL)
return Out
def x_ph(p, h):
"""
Vapor fraction - fucntion of p - kPa, h kJ/kg
:param p:
:param h:
:return:
"""
p = p / 1000
h = xst.toSIunit_h(h)
if p > 0.000611657 and p < 22.06395:
Out = xst.fromSIunit_x(xst.x4_ph(p, h))
else:
Out = None
return Out
# case {'v_pt','rho_pt'}
def v_pt(p, T):
"""
Superheated Steam specific Volume as function of p and T
:param p: Absolute Pressure in kPa
:param T: Temperatur
:return: v Specific Volume of Superheated Steam in m3/kg
"""
p = p / 1000.0
# p = xst.toSIunit_p(p)
T = xst.toSIunit_T(T)
Region = xst.region_pT(p, T)
print("Region = ", Region)
if Region == 1:
Out = xst.v1_pT(p, T)
elif Region == 2:
Out = xst.v2_pT(p, T)
elif Region == 3:
Out = xst.v3_ph(p, xst.h3_pT(p, T))
elif Region == 4:
Out = None
elif Region == 5:
Out = xst.v5_pT(p, T)
else:
Out = None
return Out
def vl_t(T):
T = xst.toSIunit_T(T)
if T > 273.15 and T < 647.096:
if T <= 623.15:
Out = xst.v1_pT(xst.p4_T(T), T)
else:
Out = xst.v3_ph(xst.p4_T(T), xst.h4L_p(xst.p4_T(T)))
else:
Out = None
return Out
def sv_p(p):
from .__xsteam__ import toSIunit_p, fromSIunit_s, s2_pT, T4_p, s3_rhoT, v3_ph, h4V_p
p = p/1000.0
if p > 0.000611657 and p < 22.06395:
if p < 16.529:
Out = fromSIunit_s(s2_pT(p, T4_p(p)))
else:
Out = fromSIunit_s(s3_rhoT(1 / (v3_ph(p, h4V_p(p))), T4_p(p)))
else:
Out = None
return Out
def s_pt(p, T):
from .__xsteam__ import toSIunit_p, toSIunit_T, region_pT, v3_ph, h3_pT
from .__xsteam__ import fromSIunit_s, s1_pT, s2_pT, h3_rhoT, s5_pT, s3_rhoT
p = p/1000.0
T = toSIunit_T(T)
Region = region_pT(p, T)
if Region == 1:
Out = s1_pT(p, T)
elif Region == 2:
Out = s2_pT(p, T)
elif Region == 3:
hs = h3_pT(p, T)
rhos = 1 / v3_ph(p, hs)
Out = s3_rhoT(rhos, T)
elif Region == 4:
Out = None
elif Region == 5:
Out = s5_pT(p, T)
else:
Out = None
return Out
def t_ph(p, h):
p = p/1000.0
h = xst.toSIunit_h(h)
Region = xst.region_ph(p, h)
if Region == 1:
Out = xst.fromSIunit_T(xst.T1_ph(p, h))
elif 2:
Out = xst.fromSIunit_T(xst.T2_ph(p, h))
elif 3:
Out = xst.fromSIunit_T(xst.T3_ph(p, h))
elif 4:
Out = xst.fromSIunit_T(xst.T4_p(p))
elif 5:
Out = xst.fromSIunit_T(xst.T5_ph(p, h))
else:
Out = None
return Out
def t_ps(p, s):
p = p/1000.0
#s = toSIunit_s(In2)
Region = xst.region_ps(p, s)
if Region == 1:
Out = fromSIunit_T(xst.T1_ps(p, s))
elif Region == 2:
Out = fromSIunit_T(xst.T2_ps(p, s))
elif Region == 3:
Out = fromSIunit_T(xst.T3_ps(p, s))
elif Region == 4:
Out = fromSIunit_T(xst.T4_p(p))
elif Region == 5:
Out = fromSIunit_T(xst.T5_ps(p, s))
else:
Out = None
return Out
def t_hs(h, s):
#h = toSIunit_h(In1)
#s = toSIunit_s(In2)
Region = xst.region_hs(h, s)
if Region == 1:
p1 = xst.p1_hs(h, s)
Out = fromSIunit_T(xst.T1_ph(p1, h))
elif Region == 2:
p2 = xst.p2_hs(h, s)
Out = fromSIunit_T(xst.T2_ph(p2, h))
elif Region == 3:
p3 = xst.p3_hs(h, s)
Out = fromSIunit_T(xst.T3_ph(p3, h))
elif Region == 4:
Out = fromSIunit_T(xst.T4_hs(h, s))
elif Region == 5:
Exception('functions of hs is not avlaible in region 5')
else:
Out = None
return Out
#case 'sv_t'
def sv_t(T):
T = xst.toSIunit_T(T)
if T > 273.15 and T < 647.096:
if T <= 623.15:
Out = xst.fromSIunit_s(xst.s2_pT(xst.p4_T(T), T))
else:
Out = xst.fromSIunit_s(xst.s3_rhoT(1 / (xst.v3_ph(xst.p4_T(T), xst.h4V_p(xst.p4_T(T)))), T))
else:
Out = None
return Out
def vx_ph(p, h):
"""
Vapour Volume Fraction
:param p:
:param h:
:return:
"""
p = p/1000.0
if 0.000611657 < p < 22.06395:
if p < 16.529:
vL = xst.v1_pT(p, xst.T4_p(p))
vV = xst.v2_pT(p, xst.T4_p(p))
else:
vL = xst.v3_ph(p, xst.h4L_p(p))
vV = xst.v3_ph(p, xst.h4V_p(p))
xs = xst.x4_ph(p, h)
Out = xst.fromSIunit_vx((xs * vV / (xs * vV + (1 - xs) * vL)))
else:
Out = None
return Out
def x_ps(p, s):
p /= 1000.0
if 0.000611657 < p < 22.06395:
Out = xst.fromSIunit_x(xst.x4_ps(p, s))
else:
Out = None
#case 'p_hs'
def p_hs(h, s):
#h = toSIunit_h(In1)
#s = toSIunit_s(In2)
Region = xst.region_hs(h, s)
#switch Region
if Region == 1:
Out = xst.fromSIunit_p(xst.p1_hs(h, s))
elif Region == 2:
Out = xst.fromSIunit_p(xst.p2_hs(h, s))
elif Region == 3:
Out = xst.fromSIunit_p(xst.p3_hs(h, s))
elif Region == 4:
tSat = xst.T4_hs(h, s)
Out = xst.fromSIunit_p(xst.p4_T(tSat))
elif Region == 5:
raise Exception('functions of hs is not avlaible in region 5')
else:
Out = None
return Out
#case 'hv_p'
def hv_p(p):
p = p/1000.0
if 0.000611657 < p < 22.06395:
Out = xst.fromSIunit_h(xst.h4V_p(p))
else:
Out = None
return Out
def hl_p(p):
p = p / 1000.0
if p > 0.000611657 and p < 22.06395:
Out = xst.fromSIunit_h(xst.h4L_p(p))
else:
Out = None
return Out
def hl_t(T):
T = xst.toSIunit_T(T)
if T > 273.15 and T < 647.096:
p = xst.p4_T(T)
Out = xst.fromSIunit_h(xst.h4L_p(p))
else:
Out = None
return Out
#case 'h_ps'
def h_ps(p, s):
p = p /1000.0
#s = toSIunit_s(In2)
Region = xst.region_ps(p, s)
if Region == 1:
Out = xst.h1_pT(p, xst.T1_ps(p, s))
elif Region == 2:
Out = xst.h2_pT(p, xst.T2_ps(p, s))
elif Region == 3:
Out = xst.h3_rhoT(1 / xst.v3_ps(p, s), xst.T3_ps(p, s))
elif Region == 4:
xs = xst.x4_ps(p, s)
Out = xs * xst.h4V_p(p) + (1 - xs) * xst.h4L_p(p)
elif Region == 5:
Out = xst.h5_pT(p, xst.T5_ps(p, s))
else:
Out = None
return Out
def vx_ps(p, s):
p = p / 1000.0
#s = toSIunit_s(In2)
if 0.000611657 < p < 22.06395:
if p < 16.529:
vL = xst.v1_pT(p, xst.T4_p(p))
vV = xst.v2_pT(p, xst.T4_p(p))
else:
vL = xst.v3_ph(p, xst.h4L_p(p))
vV = xst.v3_ph(p, | |
<reponame>russellromney/dash-brain<filename>brain_plasma/brain.py
import traceback
from typing import ByteString, Iterable
import hashlib
from pyarrow import plasma
import os
import random
import string
import time
from .brain_client import BrainClient
from .exceptions import (
BrainNameNotExistError,
BrainNamespaceNameError,
BrainNamespaceNotExistError,
BrainNamespaceRemoveDefaultError,
BrainNameLengthError,
BrainNameTypeError,
BrainClientDisconnectedError,
BrainRemoveOldNameValueError,
BrainLearnNameError,
BrainUpdateNameError,
)
# apache plasma documentation
# https://arrow.apache.org/docs/python/plasma.html
class Brain:
def __init__(
self, namespace="default", path="/tmp/plasma", ClientClass=BrainClient
):
self.path = path
self.namespace = namespace
self.client = ClientClass(path)
self.bytes = self.size()
self.mb = "{} MB".format(round(self.bytes / 1000000))
self.set_namespace(namespace)
##########################################################################################
# CORE FUNCTIONS
##########################################################################################
def __setitem__(self, name, item):
self.learn(name, item)
def __getitem__(self, name):
return self.recall(name)
def __delitem__(self, name):
return self.forget(name)
def __contains__(self, name):
return name in self.names()
def __len__(self):
return len(self.names())
@property
def reserved_names(self):
return ["brain_namespaces_set"]
def learn(self, name: str, thing: str, description: str = None):
"""
put a given object to the plasma store
if the name exists already:
if a new description is provided, uses it; else uses old description
stores the new value to a new ID
stores the updated metadata to the same metadata ID
deletes the old value at the old ID
Errors:
BrainNameTypeError
BrainRemoveOldNameValueError
BrainLearnNameError
BrainUpdateNameError
"""
# CHECK THAT NAME IS STRING
if not type(name) == str:
raise BrainNameTypeError(
f'Type of name "{name}" must be str, not {type(name)}'
)
name_exists = self.exists(name)
### GET NAMES AND METADATA OBJECT
metadata_id = self._name_to_namespace_hash(name)
if name_exists:
old_metadata = self.client.get(metadata_id)
old_value_hash = old_metadata["value_id"]
old_value_id = plasma.ObjectID(old_value_hash)
description = description or old_metadata["description"]
value_id = plasma.ObjectID.from_random()
# CREATE METADATA OBJECT
metadata = {
"name": name,
"value_id": value_id.binary(),
"description": description or "",
"metadata_id": metadata_id.binary(),
"namespace": self.namespace,
}
if name_exists:
# IF NAME EXISTS ALREADY,
# STORE THE NEW VALUE AT A NEW LOCATION
# DELETE ITS VALUE STORED AT ITS OBJECTID
# DELETE ITS BRAIN_OBJECT NAME INDEX
# (1)
try:
self.client.put(thing, value_id)
# IF THERE'S AN ERROR, JUST STOP
except:
traceback.print_exc()
raise BrainUpdateNameError(
f"Unable to update value with name: {name}. Rolled back"
)
# (2)
# REPLACE THE METADATA OBJECT
self.client.delete([metadata_id])
self.client.put(metadata, metadata_id)
# (3)
# TRY TO DELETE THE OLD NAME
try:
self.client.delete([old_value_id])
# TELL THE USER WHAT WENT WRONG IF THAT DIDN'T WORK
except:
traceback.print_exc()
raise BrainRemoveOldNameValueError(
f"Unable to remove old value for name {name} at {old_value_id}"
)
else:
# STORE THE VALUE AND METADATA - IT'S NEW!
try:
self.client.put(thing, value_id)
self.client.put(metadata, metadata_id)
# IF SOMETHING GOES WRONG, CLEAR UP
except:
traceback.print_exc()
self.client.delete([value_id, metadata_id])
raise BrainLearnNameError(
f"Unable to set value with name: {name}. Rolled back"
)
def recall(self, name):
"""
get an object value based on its Brain name
Errors:
KeyError
"""
if not self.exists(name):
raise KeyError(f"Name {name} does not exist.")
metadata_id = self._name_to_namespace_hash(name)
metadata = self.client.get(metadata_id, timeout_ms=100)
value_hash = metadata["value_id"]
value_id = plasma.ObjectID(value_hash)
return self.client.get(value_id, timeout_ms=100)
def exists(self, name: str):
"""
confirm that the plasma ObjectID for a given name
"""
id_hash = self._name_to_namespace_hash(name)
return self.client.contains(id_hash)
def forget(self, name: str):
"""
delete an object based on its name
also deletes the metadata object associated with the name
does it in a transactional way
if the name does not exist, doesn't do anything
"""
if not self.exists(name):
pass
else:
metadata_id = self._name_to_namespace_hash(name)
metadata = self.client.get(metadata_id, timeout_ms=100)
value_hash = metadata["value_id"]
value_id = plasma.ObjectID(value_hash)
self.client.delete([metadata_id, value_id])
def names(self, namespace=None):
"""
return a list of the names that brain knows
in all namespaces or only in current (default)
if namespace = "all", returns names from all namespaces
"""
current_namespace = self.namespace
if namespace is None:
namespace = self.namespace
names = []
if namespace == "all":
# FOR EACH NAMESPACE, ADD THE NAME OBJECTS TO THE LIST OF NAMES
for namespace in self.namespaces():
self.namespace = namespace
names.extend([x["name"] for x in self.metadata(output="list")])
else:
# RETURN ALL THE NAMES AND OBJECT_IDS IN THAT NAMESPACE ONLY
names = [
x["name"]
for x in self.metadata(output="list")
if x["namespace"] == self.namespace
]
self.namespace = current_namespace
return names
def ids(self):
"""return list of Object IDs the brain knows that are attached to names"""
names_ = self.metadata()
return [plasma.ObjectID(x["value_id"]) for x in names_.values()]
def sleep(self):
"""disconnect from the client"""
self.client.disconnect()
def wake_up(self):
"""reconnect to the client"""
self.client = plasma.connect(self.path)
time.sleep(0.2)
self.bytes = self.size()
self.mb = "{} MB".format(round(self.bytes / 1000000))
def size(self):
"""
show the available bytes of the underlying plasma_store;
wrapper for PlasmaClient.store_capacity()
Errors:
BrainClientDisconnectedError
"""
try:
# IF THIS DOESN'T WORK, CLIENT IS DISCONNECTED
temp = plasma.ObjectID.from_random()
self.client.put(5, temp)
self.client.delete([temp])
except:
traceback.print_exc()
#raise BrainClientDisconnectedError
self.bytes = self.client.store_capacity()
self.mb = "{} MB".format(round(self.bytes / 1000000))
return self.bytes
def object_id(self, name: str) -> plasma.ObjectID:
"""
get the ObjectId of the value in the store for name
returns None if it doesn't exist
"""
if not self.exists(name):
return None
metadata = self.metadata(name)
return plasma.ObjectID(metadata["value_id"])
def object_ids(self) -> dict:
"""
return a dictionary of names and their ObjectIDs
limited to names in the current namespace
"""
names_ = self.metadata().values()
return {x["name"]: plasma.ObjectID(x["value_id"]) for x in names_}
def metadata(self, *names, output: str = "dict") -> Iterable:
"""
return a dict/list of all names and their associated metadata in current namespace
accepts one or many names
if only one name, only grabs one metadata
otherwise, grabs all the metadata and returns them in a dictionary/list
note on this:
every name metadata is stored as a metadata object with the prefix b'<namespace>'
so brain gets the names by
getting all object ids
any that have b'<namespace>' in them will be dictionaries of metadata
Errors:
TypeError
"""
if output not in ["dict", "list"]:
raise TypeError('Output must be "list" or "dict"')
if len(names) == 1:
name = names[0]
if not self.exists(name):
return None
metadata_id = self._name_to_namespace_hash(name)
metadata = self.client.get(metadata_id)
return metadata
# GET ALL IDS IN THE STORE
all_ids = list(self.client.list().keys())
# GET THE FIRST SEVERAL CHARACTERS OF THE OBJECTID REPRESENTATION TO USE TO FILTER NAMES
namespace_str = self.namespace.encode()
# GET ALL IDS THAT CONTAIN THE NAMESPACE REPRESENTATION
# I.E. ALL THE METADATA
known_ids = [x for x in all_ids if x.binary().startswith(namespace_str)]
# GET ALL ACTUAL OBJECTS (NAMES AND TYPE) WITH THOSE IDS
all_metadata = self.client.get(known_ids, timeout_ms=100)
if output == "dict":
all_metadata = {meta["name"]: meta for meta in all_metadata}
# RETURNS ALL NAMES IF NO NAMES ARE SPECIFIED
if len(names) == 0:
return all_metadata
# RETURN ONLY THE NAMES SPECIFIED; NONE IF DOESN'T EXIST
else:
if output == "dict":
return {name: all_metadata.get(name) for name in names}
return all_metadata
def used(self):
"""get the total used bytes in the underlying plasma_store"""
total = 0
l = self.client.list()
for x in l.keys():
total += l[x]["data_size"] + l[x]["metadata_size"]
return total
def free(self):
"""get the total unused bytes in the underlying plasma_store"""
return self.size() - self.used()
def set_namespace(self, namespace=None):
"""
either return the current namespace or change the current namespace to something new
"""
if namespace is None:
return self.namespace
# MUST BE AT LEAST FIVE CHARACTERS AND FEWER THAN 15
if len(namespace) < 5:
raise BrainNamespaceNameError(
f"Namespace wrong length; 5 >= namespace >= 15; name {namespace} is {len(namespace)}"
)
elif len(namespace) > 15:
raise BrainNamespaceNameError(
f"Namespace wrong length; 5 >= namespace >= 15; name {namespace} is {len(namespace)}"
)
# CHANGE THE NAMESPACE AND ACKNOWLEDGE THE CHANGE
self.namespace = namespace
# IF THE NAMESPACE OBJECT EXISTS ALREADY, JUST ADD THE NEW NAMESPACE
if plasma.ObjectID(b"brain_namespaces_set") in self.client.list().keys():
# ADD TO NAMESPACES
namespaces = self.client.get(
plasma.ObjectID(b"brain_namespaces_set")
).union([self.namespace, "default"])
# REMOVE OLD NAMESPACES OBJECT
self.client.delete([plasma.ObjectID(b"brain_namespaces_set")])
# ASSIGN NEW NAMESPACES OBJECT
self.client.put(namespaces, plasma.ObjectID(b"brain_namespaces_set"))
# OTHERWISE, CREATE THE NAMESPACES OBJECT AND ADD TO PLASMA
else:
self.client.put(
set([self.namespace, "default"]),
plasma.ObjectID(b"brain_namespaces_set"),
)
# RETURN THE CURRENT NAMESPACE
return self.namespace
def namespaces(self):
"""
return set of all namespaces available in the store
"""
return self.client.get(plasma.ObjectID(b"brain_namespaces_set"))
def remove_namespace(self, namespace=None) -> str:
"""
remove a namespace and all its values from Plasma
Errors:
BrainNamespaceRemoveDefaultError
BrainNamespaceNotExistError
"""
# IF NO NAMESPACE IS DEFINED, JUST REMOVE THE CURRENT NAMESPACE
if namespace == None:
namespace == self.namespace
# CANNOT DELETE THE DEFAULT NAMESPACE
if namespace == "default":
raise BrainNamespaceRemoveDefaultError("Cannot remove default namespace")
# CANNOT DELETE A NAMESPACE THAT DOESN'T EXIST
if namespace not in self.namespaces():
raise BrainNamespaceNotExistError(f'Namespace "{namespace}" | |
urllib.request.urlopen(createEntityURL)
createResponse2 = urllib.request.urlopen(createEntityURL)
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
createResponseJson1B = createResponse1.read()
createResponseJson2B = createResponse2.read()
entityUUID1Json = json.loads(createResponseJson1B)
entityUUID2Json = json.loads(createResponseJson2B)
if testResult != False:
#Link the two
postFieldsDict1 = {
"sourceEntityID" : entityUUID1Json["entityUUID"],
"targetEntityID" : entityUUID2Json["entityUUID"],
"linkAttributes" : linkAttributes,
"linkType" : linkType
}
requestURL = serverURL + "/modeling/addEntityLink"
try:
#urllib GET request
#entityMemeType = urllib.request.urlopen(createEntityMemeTypeURL)
#urllib POST request
request = Request(url=requestURL, data=bytes(json.dumps(postFieldsDict1), encoding='utf-8'))
response1 = urlopen(request).read().decode('utf8')
responseStr1= json.loads(response1)
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
resultSet = []
testResult = str(testResult)
expectedResult = str('True')
results = [1, "Link Entities", testResult, expectedResult, [notes]]
resultSet.append(results)
return resultSet
def testServerAPIGetAreEntitiesLinked(serverURL = None, memePath = "Graphyne.Generic"):
"""
Tests the /modeling/createEntityFromMeme/<memePath> and /modeling/getEntityMemeType/<entityUUID> REST API calls
1 - Create two entities of meme type memePath using /modeling/createEntityFromMeme/<memePath>
2 - Link them via via /modeling/getEntityMemeType/<entityUUID>
3 - Check to see that they are linked via /modeling/getAreEntitiesLinked
"""
#"NumericValue.nv_intValue_3"
method = moduleName + '.' + '/modeling/createEntityFromMeme/<memePath>'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
createEntityURL = serverURL + "/modeling/createEntityFromMeme/%s" %memePath
notes = ""
try:
#create two generic entities
createResponse1 = urllib.request.urlopen(createEntityURL)
createResponse2 = urllib.request.urlopen(createEntityURL)
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
createResponseJson1B = createResponse1.read()
createResponseJson2B = createResponse2.read()
entityUUID1Json = json.loads(createResponseJson1B)
entityUUID2Json = json.loads(createResponseJson2B)
if testResult != False:
#Link the two
postFieldsDict1 = {
"sourceEntityID" : entityUUID1Json["entityUUID"],
"targetEntityID" : entityUUID2Json["entityUUID"]
}
requestURL = serverURL + "/modeling/addEntityLink"
try:
#urllib GET request
#entityMemeType = urllib.request.urlopen(createEntityMemeTypeURL)
#urllib POST request
request = Request(url=requestURL, data=bytes(json.dumps(postFieldsDict1), encoding='utf-8'))
unusedResponse = urlopen(request).read().decode('utf8')
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
if testResult != False:
#Link the two
getLinkedURL = serverURL + "/modeling/getAreEntitiesLinked/%s/%s" %(entityUUID1Json["entityUUID"], entityUUID2Json["entityUUID"])
try:
#urllib GET request
getLinkedResponse = urllib.request.urlopen(getLinkedURL)
getLinkedResponseJsonB = getLinkedResponse.read()
getLinkedResponseJson = json.loads(getLinkedResponseJsonB)
linkExists = getLinkedResponseJson["linkExists"]
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
resultSet = []
try:
testResult = linkExists
except:
testResult = "No value returned"
expectedResult = str(True)
results = [1, "Unlink Entities", testResult, expectedResult, [notes]]
resultSet.append(results)
return resultSet
def testServerAPIRemoveEntityLink(serverURL = None, memePath = "Graphyne.Generic"):
"""
Tests the /modeling/createEntityFromMeme/<memePath> and /modeling/getEntityMemeType/<entityUUID> REST API calls
1 - Create two entities of meme type memePath using /modeling/createEntityFromMeme/<memePath>
2 - Link them via via /modeling/getEntityMemeType/<entityUUID>
3 - Check to see if they are linked via /modeling/getAreEntitiesLinked. (should be True)
4 - Remove the link via /modeling/removeEntityLink/
5 - Check again to see if they are linked via /modeling/getAreEntitiesLinked. (should be False)
"""
#"NumericValue.nv_intValue_3"
method = moduleName + '.' + '/modeling/createEntityFromMeme/<memePath>'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
createEntityURL = serverURL + "/modeling/createEntityFromMeme/%s" %memePath
notes = ""
try:
#create two generic entities
createResponse1 = urllib.request.urlopen(createEntityURL)
createResponse2 = urllib.request.urlopen(createEntityURL)
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
createResponseJson1B = createResponse1.read()
createResponseJson2B = createResponse2.read()
entityUUID1Json = json.loads(createResponseJson1B)
entityUUID2Json = json.loads(createResponseJson2B)
if testResult != False:
#Link the two
postFieldsDict1 = {
"sourceEntityID" : entityUUID1Json["entityUUID"],
"targetEntityID" : entityUUID2Json["entityUUID"]
}
requestURL = serverURL + "/modeling/addEntityLink"
try:
#urllib GET request
#entityMemeType = urllib.request.urlopen(createEntityMemeTypeURL)
#urllib POST request
request = Request(url=requestURL, data=bytes(json.dumps(postFieldsDict1), encoding='utf-8'))
unusedResponse = urlopen(request).read().decode('utf8')
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
#first check to see that they are linked. Should be True
getLinkedURL = serverURL + "/modeling/getAreEntitiesLinked/%s/%s" %(entityUUID1Json["entityUUID"], entityUUID2Json["entityUUID"])
if testResult != False:
try:
#urllib GET request
getLinkedResponse = urllib.request.urlopen(getLinkedURL)
getLinkedResponseJsonB = getLinkedResponse.read()
getLinkedResponseJson = json.loads(getLinkedResponseJsonB)
linkExists = getLinkedResponseJson["linkExists"]
if linkExists == str(False):
#This should be true. If False, we have a problem
testResult = False
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
#Now unlink them
if testResult != False:
#Link the two
removeEntityMemeTypeURL = serverURL + "/modeling/removeEntityLink/%s/%s" %(entityUUID1Json["entityUUID"], entityUUID2Json["entityUUID"])
try:
#urllib GET request
unusedRemovalResult = urllib.request.urlopen(removeEntityMemeTypeURL)
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
#Now check again to see if they are linked. Should be False
getLinkedURL = serverURL + "/modeling/getAreEntitiesLinked/%s/%s" %(entityUUID1Json["entityUUID"], entityUUID2Json["entityUUID"])
if testResult != False:
try:
#urllib GET request
getLinkedResponse = urllib.request.urlopen(getLinkedURL)
getLinkedResponseJsonB = getLinkedResponse.read()
getLinkedResponseJson = json.loads(getLinkedResponseJsonB)
linkExists = getLinkedResponseJson["linkExists"]
if linkExists == str(True):
#This should be False this time. If True, then /modeling/removeEntityLink/ failed
testResult = False
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
resultSet = []
testResult = str(testResult)
expectedResult = str(True)
results = [1, "Unlink Entities", testResult, expectedResult, [notes]]
resultSet.append(results)
return resultSet
def testServerAPIGetLinkCounterpartsByType(serverURL = None, fName = "Entity_Phase7.atest"):
''' This is a modified version of testEntityPhase7() from Graphyne's Smoketest.py.
Instead of direct API access, it uses the server REST API
Create entities from the meme in the first two colums.
Add a link between the two at the location on entity in from column 3.
Check and see if each is a counterpart as seen from the other using the addresses in columns 4&5 (CheckPath & Backpath)
& the filter.
The filter must be the same as the type of link (or None)
The check location must be the same as the added loation.
Note! Most operations are not exhausively tested for different internal permutations and we just trust that Graphyne works.
What is different here is that we still expect Graphyne to act as it should, but we need to make sure that the traverse path
queries reach Graphyne intact.
'''
results = []
lresultSet = []
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
testResult = False
try:
createEntityURL0 = serverURL + "/modeling/createEntityFromMeme/%s" %stringArray[0]
createEntityURL1 = serverURL + "/modeling/createEntityFromMeme/%s" %stringArray[1]
queryURL = serverURL + "/modeling/query"
attachURL = serverURL + "/modeling/addEntityLink"
#entityID0 = Graph.api.createEntityFromMeme(stringArray[0])
#entityID1 = Graph.api.createEntityFromMeme(stringArray[1])
createResponse0 = urllib.request.urlopen(createEntityURL0)
createResponseJson0B = createResponse0.read()
entityUUID0Json = json.loads(createResponseJson0B)
entityID0 = entityUUID0Json["entityUUID"]
createResponse1 = urllib.request.urlopen(createEntityURL1)
createResponseJson1B = createResponse1.read()
entityUUID1Json = json.loads(createResponseJson1B)
entityID1 = entityUUID1Json["entityUUID"]
#Attach entityID1 at the mount point specified in stringArray[2]
if stringArray[2] != "X":
postFieldsDictAttachQuery = {
"originEntityID" : entityID0,
"query" : stringArray[2]
}
request = Request(url=queryURL, data=bytes(json.dumps(postFieldsDictAttachQuery), encoding='utf-8'))
attachPointResponse = urlopen(request).read().decode('utf8')
try:
attachPointResponseJson = json.loads(attachPointResponse)
except:
attachPointResponseJsonB = attachPointResponse.read()
attachPointResponseJson = json.loads(attachPointResponseJsonB)
mountPoints = attachPointResponseJson["entityIDList"]
#mountPoints = api.getLinkCounterpartsByType(entityID0, stringArray[2], 0)
unusedMountPointsOverview = {}
for mountPoint in mountPoints:
postFieldsDictAttach = {
"sourceEntityID" : mountPoint,
"targetEntityID" : entityID1,
"query" : stringArray[2],
"linkType" : int(stringArray[5])
}
request = Request(url=attachURL, data=bytes(json.dumps(postFieldsDictAttach), encoding='utf-8'))
unusedAttachPointResponse = urlopen(request).read().decode('utf8')
else:
raise ValueError("Testcase with invalid attachment point")
backTrackCorrect = False
linkType = None
if stringArray[6] != "X":
linkType = int(stringArray[6])
#see if we can get from entityID0 to entityID1 via stringArray[3]
addLocationCorrect = False
if linkType is not None:
postFieldsDictForwardQuery = {
"originEntityID" : entityID0,
"query" : stringArray[3],
"linkType" : int(stringArray[6])
}
else:
postFieldsDictForwardQuery = {
"originEntityID" : entityID0,
"query" : stringArray[3]
}
request = Request(url=queryURL, data=bytes(json.dumps(postFieldsDictForwardQuery), encoding='utf-8'))
forwardQueryResponse = urlopen(request).read().decode('utf8')
try:
forwardQueryResponseJson = json.loads(forwardQueryResponse)
except:
forwardQueryResponseJsonB = forwardQueryResponse.read()
forwardQueryResponseJson = json.loads(forwardQueryResponseJsonB)
addLocationList = forwardQueryResponseJson["entityIDList"]
if len(addLocationList) > 0:
addLocationCorrect = True
#see if we can get from entityID1 to entityID0 via stringArray[4]
backTrackCorrect = False
if linkType is not None:
postFieldsDictBacktrackQuery = {
"originEntityID" : entityID1,
"query" : stringArray[4],
| |
<filename>ocr/ocr_crnn_training.py
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05_ocr_crnn_training.ipynb (unless otherwise specified).
__all__ = ['PAD', 'PAD', 'DATA_PATH', 'crnn_config', 'allowed_chars', 'allowed_fonts', 'TextlineProcessor',
'TextlineAndFont', 'one_hot_text', 'decode_single_ctc', 'decode_ctc', 'TextlineList', 'im2seq_data_collate',
'str2lines', 'MyImageList', 'gaussian_blur', 'resize_tfm', 'rand_resize', 'resize_one_img',
'train_transforms', 'valid_transforms', 'normalize_images', 'denormalize_images', 'opencv_transform_images',
'threshold_image', 'create_data', 'conv_output', 'CNN', 'RevConv', 'get_normal_cnn', 'get_partially_rev_cnn',
'CRNN', 'image_width2seq_len', 'CTCFontLoss', 'AddLossMetrics', 'wer', 'word_error', 'char_error',
'decode_true', 'WordErrorRate']
# Cell
from fastai import *
from fastai.vision import *
import pandas as pd
import numpy as np
import cv2
from tqdm.notebook import tqdm
# Cell
from .core import save_inference, load_inference
from .ocr_dataset_fontsynth import create_df as create_fontsynth_df
from .ocr_dataset_sroie2019 import create_df as create_sroie_df
from .ocr_dataset_brno import create_df as create_brno_df
from .ocr_dataset_sroie2019 import sroie_ocr_config, DATA_PATH, char_freq
from .ocr_dataset_fontsynth import fontsynth_config, char_freq
from .ocr_dataset_brno import brno_ocr_config
PAD = sroie_ocr_config.PAD # PAD - how much is data padded
PAD = 0
DATA_PATH = fontsynth_config.LINES_DIR
# Cell
allowed_chars = {'N', '3', 'V', 'P', '7', '1', '#', '9', '"', 'C', 'Q', 'B', 'E', '>', '@', ',', 'M', '{', ']',
';', '^', "'", '&', '6', 'Z', '*', '<', '+', 'G', 'X', '!', ':', '-', '[', '|', '$', '5', 'I',
'H', '=', 'Y', '.', 'R', 'S', '/', 'T', '}', 'K', '0', '?', 'U', ')', '_', 'D', 'J', 'L', '4',
'W', '%', '(', ' ', 'F', '8', '~', '\\', 'A', '2', 'O'}
# allowed_chars = fontsynth_config.allowed_chars
allowed_fonts = ['Unknown', 'Andale_Mono', 'Arial', 'Arial_Black', 'Arial_Bold', 'Arial_Bold_Italic', 'Arial_Italic',
'Comic_Sans_MS_Bold', 'Courier_New', 'Courier_New_Bold', 'Courier_New_Bold_Italic', 'Courier_New_Italic',
'Georgia', 'Georgia_Bold', 'Georgia_Bold_Italic', 'Georgia_Italic', 'Impact', 'Times_New_Roman',
'Times_New_Roman_Bold', 'Times_New_Roman_Bold_Italic', 'Times_New_Roman_Italic', 'Trebuchet_MS',
'Trebuchet_MS_Bold', 'Trebuchet_MS_Bold_Italic', 'Trebuchet_MS_Italic', 'Verdana', 'Verdana_Bold',
'Verdana_Bold_Italic', 'Verdana_Italic', 'brno_easy', 'brno_medium', 'sroie2019', 'Comic_Sans_MS']
class crnn_config:
LINE_HEIGHT = 48
USE_DEFAULT_CLASSES = True
label_delim = '`'
pad_idx = 0 # aka: label_delim idx
allowed_chars = allowed_chars
allowed_fonts = allowed_fonts
# Cell
# label_delim = '`' # '<pad>''
class TextlineProcessor(PreProcessor):
"`PreProcessor` that create `classes` from `ds.items` and handle the mapping."
def __init__(self, ds:ItemList):
self.create_classes(ds.classes, ds.font_classes)
self.use_default_classes = crnn_config.USE_DEFAULT_CLASSES
self.default_classes = crnn_config.allowed_chars
self.default_font_classes = crnn_config.allowed_fonts
# optional
def create_classes(self, classes, font_classes):
self.classes, self.font_classes = classes, font_classes
if classes is not None:
self.classes = [crnn_config.label_delim] + classes
self.c2i = {v:k for k,v in enumerate(self.classes)}
self.f2i = {v:k for k,v in enumerate(font_classes)}
def process_one(self,item):
string, font = item
return [ self.c2i[c] for c in string ], self.f2i[font]
def process(self, ds):
if self.classes is None: self.create_classes(*self.generate_classes(ds.items))
ds.classes = self.classes
ds.c2i = self.c2i
ds.font_classes = self.font_classes
ds.f2i = self.f2i
super().process(ds)
# optional
def generate_classes(self, items):
if self.use_default_classes:
classes = list(self.default_classes)
font_classes = list(self.default_font_classes)
else:
classes, font_classes = set(), set()
for c,font in items:
classes = classes.union(set(c))
font_classes.add(font)
classes, font_classes = list(classes), list(font_classes)
classes.sort(); font_classes.sort()
return classes, font_classes
# Cell
class TextlineAndFont(ItemBase):
''' F = font, S = string
data: tensor(S), tensor(F)
obj: str(S), str(F)
raw: str(S), list(F)
'''
def __init__(self, data, obj, raw):self.data, self.obj, self.raw = data, obj, raw
def __str__(self, n=20):
string = self.obj[0][:n]+['...'] if len(self.obj[0]) > n else self.obj[0]
return self.obj[1][:5] +'...'+ crnn_config.label_delim.join([str(o) for o in string])
def __hash__(self): return hash(str(self))
# Cell
def one_hot_text(x:Collection[int], c:int):
"One-hot encode `x` with `c` classes."
''' x w/ len of n returns [n,c] shape arr '''
res = np.zeros((len(x),c), np.float32)
res[np.arange(len(x)), listify(x)] = 1.
return res
# Cell
def decode_single_ctc(t, blank_char=0): # [s_e] -> [s_d], where s_d < s_e
char_list = []
for i in range(len(t)):
if t[i] != blank_char and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank.
char_list.append(t[i])
return char_list
def decode_ctc(texts, blank_char=0): # [b,s_e] -> [b,s_d], where s_d < s_e
return [tensor(decode_single_ctc(t, blank_char=blank_char)) for t in texts ]
# Cell
class TextlineList(ItemList):
_processor = TextlineProcessor
def __init__(self, items:Iterator, classes=None, font_classes=None, label_delim:str=None, one_hot:bool=False, **kwargs):
self.classes = classes
self.font_classes = font_classes
items = [(string.split(crnn_config.label_delim),font) for string,font in items] # CHANGED
super().__init__(items, **kwargs)
self.processor = [TextlineProcessor(self)]
def get(self, i):
stridxs, fontidx = self.items[i] # int, list of ints
return TextlineAndFont( (tensor(stridxs), tensor(fontidx)),
([self.classes[c] for c in stridxs], self.font_classes[fontidx]), self.items[i])
def analyze_pred(self, nn_output, thresh=0.5, _=None):
font_pred, y_pred = nn_output # [c1], [s_e,c2]
assert len(listify(y_pred.shape)) == 2 # (no batch inputs)
return font_pred.argmax(dim=-1), decode_single_ctc(y_pred.argmax(dim=-1)), _, _ # [1], [seq_len], _, _
def reconstruct(self, data_out):
fontidx, t_argmax, _, lengths = data_out # output from data / output from nn_out -> analyze_pred
stridxs = [int(i) for i in t_argmax]
fontidx = int(fontidx)
return TextlineAndFont((one_hot_text(stridxs, self.c), fontidx),
([self.classes[c] for c in stridxs], self.font_classes[fontidx]), data_out)
@property
def c(self): return len(self.classes)
# Cell
def im2seq_data_collate(batch:ItemsList, pad_idx:int=0)->Tensor:
if isinstance(batch[0][1], int): return data_collate(batch)
"Convert `batch` items to tensor data."
data = to_data(batch) # list of (image, text) pairs
# image: [3,48,w], text: [n,c], where n's and w's are different
max_w = max([image.shape[2] for image, (text,font) in data])
max_h = max([image.shape[1] for image, (text,font) in data])
max_n = max([text.shape[0] for image, (text,font) in data])
# _, num_classes = data[0][1].shape
images = torch.zeros(len(batch), 3, max_h, max_w)
fonts = torch.zeros(len(batch)).long()
# texts = torch.zeros(len(batch), max_n, num_classes)
texts = []
nn_out_seq_len, texts_len = [], []
for i, (image, (text,font)) in enumerate(data):
fonts[i] = font
c,h,w = image.shape
images[i, : , : , :w ] = image
images[i, : , : , w: ] = image[:,:,w-1].unsqueeze(2).expand(c,h,max_w-w)
nn_out_seq_len.append( image_width2seq_len(w) )
n = text.size(0)
texts.append( tensor(text) )
# texts[i, :n , : ] = tensor(text)
# texts[i, n: , -1 ] = 1
texts_len.append(n)
# texts = torch.cat(texts, axis=0)
return images, (fonts, texts, tensor(nn_out_seq_len).type(torch.int), tensor(texts_len).type(torch.int))
# Cell
def str2lines(string, n=50):
return ''.join([s+'\n' if (i+1)%n == 0 else s for i,s in enumerate(string)])
str2lines('asdasdasdasdasdasdasdasdasdasdasdasdasdasdasdasdasdasdasdasdasdasdasdasd')
# Cell
class MyImageList(ImageList):
def show_xys(self, xs, ys, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Show the `xs` (inputs) and `ys` (targets) on a figure of `figsize`."
rows = int(np.ceil(math.sqrt(len(xs))))
axs = subplots(rows, 1, imgsize=imgsize, figsize=figsize) # CHANGED rows -> 1
for x,y,ax in zip(xs, ys, axs.flatten()): x.show(ax=ax, y=y, **kwargs)
for ax in axs.flatten()[len(xs):]: ax.axis('off')
plt.tight_layout()
def show_xyzs(self, xs, ys, zs, imgsize:int=10, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`."
title = 'Ground truth\nPredictions'
rows = int(np.ceil(math.sqrt(len(xs))))
axs = subplots(rows, 1, imgsize=imgsize, figsize=figsize, title=title, weight='bold', size=12).flatten()
for x,y,z,ax in zip(xs,ys,zs,axs):
x.show(ax=ax, title=f'y_true: {str2lines(str(y))}\ny_pred: {str2lines(str(z))}', **kwargs)
# for ax in axs.flatten()[len(xs):]: ax.axis('off')
# Cell
def _gaussian_blur(x, size:uniform_int):
blurred = cv2.blur(image2np(x), (size,size)) # np.arr
# blurred = cv2.GaussianBlur(image2np(x), (size,size), 0)
return tensor(blurred).permute(2,0,1)
def gaussian_blur(size, p=1.0):
return RandTransform(tfm=TfmPixel(_gaussian_blur), kwargs={'size':size}, p=p, resolved={}, do_run=True, is_random=True, use_on_y=False)
# Cell
resize_one_img = lambda x, size: F.interpolate(x[None], size=size, mode='bilinear', align_corners=True)[0]
def resize_tfm(x, pad:uniform_int, line_height=crnn_config.LINE_HEIGHT):
''' size of subtracted padding '''
c,h,w = x.shape
x = x[ : , pad:h-pad , pad:w-pad ]
new_w = int(w * line_height / float(h))
return resize_one_img(x, size=(line_height, new_w))
def rand_resize(pad, p=1.0):
return RandTransform(tfm=TfmPixel(resize_tfm), kwargs={'pad':pad}, p=p, resolved={}, do_run=True, is_random=True, use_on_y=False)
# Cell
train_transforms = [
rand_resize(pad=(0,PAD), p=1.0),
rotate(degrees=(-3, 3), p=0.6),
symmetric_warp(magnitude=(-0.03, 0.03), p=0.1),
rand_zoom(scale=(0.9,1.03), p=0.5),
brightness(change=(0.35, 0.65), p=0.4),
contrast(scale=(0.7,1.3), p=0.4),
gaussian_blur(size=(1, 7), p=0.2),
# squish(scale=(0.85,1.15), p=0.3),
# cutout(n_holes=(0,6), length=(1,10)), # black rect
# tilt(direction=(0,3), magnitude=(-0.2,0.2), p=0.3)
]
valid_transforms = [
rand_resize(pad=(0,0), p=1.0) # (no padding, but need to resize)
]
# Cell
def normalize_images(ims):
_min = ims.min()
ims = ims - _min
_max = ims.max()
return ims/_max, _min, _max
def denormalize_images(ims, _min=None, _max=None):
return (ims * _max) + _min
# Cell
def opencv_transform_images(im_fun):
def transform(ims, **kwargs):
device, dtype = ims.device, ims.dtype
ims, _min, _max = normalize_images(ims)
out_ims = []
for im in (ims*255.).long():
# plot(image2np(im))
im = im_fun(image2np(im).astype(np.uint8), **kwargs)
# plot(im)
out_ims.append( tensor(im).permute(2,0,1)[None] )
ims = torch.cat(out_ims, dim=0) / 255.
# ims = normalize_images(ims)[0]
ims = denormalize_images(ims, _min, _max)
return ims.to(device=device, dtype=dtype)
return transform
def threshold_image(im_orig): # [h,w,3]
im_grey = cv2.cvtColor(im_orig, cv2.COLOR_BGR2GRAY)
_,th = cv2.threshold(im_grey,0,1,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) # [h,w]
mask = cv2.dilate(th, (5,5), iterations=3).astype(bool)
out = np.zeros_like(im_orig) + 255
text_im,_,_ = normalize_images(im_orig[mask].astype(float))
out[mask] = (text_im*255.).astype(np.uint8)
return out
# Cell
def create_data(df, bs=32):
''' DataFrame (df) -> Dataloader (dl) '''
data = (MyImageList.from_df(df, path='.', cols='image_path')
.split_from_df(col='valid')
.label_from_df(cols='string', label_cls=TextlineList, label_delim=crnn_config.label_delim)
.transform((train_transforms, valid_transforms), tfm_y=False)
.databunch(bs=bs, collate_fn=partial(im2seq_data_collate, pad_idx=crnn_config.pad_idx))
.normalize()
)
def preprocessing(b):
x,y = b
x = opencv_transform_images(threshold_image)(x)
# x = opencv_transform_images(lambda im: normalize_images(im)[0]*255)(x)
return x,y
# data.add_tfm(preprocessing)
return data
# Cell
def conv_output(w, ss, ps=None, ks=3):
''' image width, strides, pools, kernel sizes '''
for s,p,k in zip(ss,ps,ks):
s = s[1] if isinstance(s, tuple) else s
w = w if w%s == 0 else w + 1
w = (w - k + 2*p)/s + 1 if p is not None else w/s
return int(w)
# Cell
class CNN(nn.Module):
def __init__(self, d_model, cnn_layers, kernels, strides, channels, padding, nc=3):
super().__init__()
layers = []
for layer,i,o,k,s,p in zip(cnn_layers, [nc] + channels[:-1], channels, kernels, strides, padding):
layers.append( layer(ni=i, nf=o, ks=k, stride=s, padding=p) )
self.cnn = nn.Sequential(*layers)
| |
import os
import sys
import re
import time
import yaml
import fileinput
import distutils.spawn
from git import Repo
import mysql.connector
import colorama
from colorama import Fore, Style
from mysql.connector import Error, errorcode
from migrations import spell_blobs_to_spell_table
from migrations import unnamed_flags
from migrations import char_unlock_table_columns
from migrations import HP_masks_to_blobs
from migrations import crystal_storage
from migrations import broken_linkshells
from migrations import spell_family_column
from migrations import mission_blob_extra
from migrations import cop_mission_ids
from migrations import extend_mission_log
# Append new migrations to this list and import above
migrations = [
unnamed_flags,
spell_blobs_to_spell_table,
char_unlock_table_columns,
HP_masks_to_blobs,
crystal_storage,
broken_linkshells,
spell_family_column,
extend_mission_log,
mission_blob_extra,
cop_mission_ids,
]
# These are the default 'protected' files
player_data = [
'accounts.sql',
'accounts_banned.sql',
'auction_house.sql',
'char_blacklist.sql',
'char_effects.sql',
'char_equip.sql',
'char_exp.sql',
'char_inventory.sql',
'char_jobs.sql',
'char_look.sql',
'char_merit.sql',
'char_pet.sql',
'char_points.sql',
'char_profile.sql',
'char_skills.sql',
'char_spells.sql',
'char_stats.sql',
'char_storage.sql',
'char_style.sql',
'char_unlocks.sql',
'char_vars.sql',
'chars.sql',
'conquest_system.sql',
'delivery_box.sql',
'linkshells.sql',
'server_variables.sql',
]
import_files = []
backups = []
database = host = port = login = password = None
db = cur = None
repo = Repo('../')
current_version = current_client = release_version = release_client = None
express_enabled = False
auto_backup = auto_update_client = True
mysql_bin = ''
mysql_env = distutils.spawn.find_executable('mysql')
if mysql_env:
mysql_bin = os.path.dirname(mysql_env).replace('\\','/')
if mysql_bin[-1] != '/':
mysql_bin = mysql_bin + '/'
if os.name == 'nt':
exe = '.exe'
else:
exe = ''
log_errors = ' 2>>error.log'
colorama.init(autoreset=True)
# Redirect errors through this to hide annoying password warning
def fetch_errors():
try:
with open('error.log') as f:
while True:
line = f.readline()
if not line: break
if 'Using a password on the command line interface can be insecure.' in line:
continue
print(Fore.RED + line)
os.remove('error.log')
except:
return
def fetch_credentials():
global database, host, port, login, password
credentials = {}
# Grab mysql credentials
filename = '../conf/map.conf'
try:
with open(filename) as f:
while True:
line = f.readline()
if not line: break
match = re.match(r'(mysql_\w+):\s+(\S+)', line)
if match:
credentials[match.group(1)] = match.group(2)
database = credentials['mysql_database']
host = credentials['mysql_host']
port = int(credentials['mysql_port'])
login = credentials['mysql_login']
password = credentials['<PASSWORD>']
except:
print(Fore.RED + 'Error fetching credentials.\nCheck ../conf/map.conf.')
return False
def fetch_versions():
global current_version, current_client, release_version, release_client
current_version = current_client = release_version = release_client = None
try:
release_version = repo.git.rev_parse(repo.head.object.hexsha, short=4)
except:
print(Fore.RED + 'Unable to read current version hash.')
try:
with open('../conf/default/version.conf') as f:
while True:
line = f.readline()
if not line: break
match = re.match(r'\S?CLIENT_VER:\s+(\S+)', line)
if match:
release_client = match.group(1)
except:
print(Fore.RED + 'Unable to read ../conf/default/version.conf.')
try:
with open('../conf/version.conf') as f:
while True:
line = f.readline()
if not line: break
match = re.match(r'\S?DB_VER:\s+(\S+)', line)
if match:
current_version = match.group(1)
else:
match = re.match(r'\S?CLIENT_VER:\s+(\S+)', line)
if match:
current_client = match.group(1)
except:
print(Fore.RED + 'Unable to read ../conf/version.conf.')
if current_version and release_version:
fetch_files(True)
else:
fetch_files()
def fetch_configs():
global player_data, mysql_bin, auto_backup, auto_update_client
try:
with open(r'config.yaml') as file:
configs = yaml.full_load(file)
for config in configs:
for key, value in config.items():
if key == 'mysql_bin':
if value != '':
mysql_bin = value
if key == 'auto_backup':
auto_backup = bool(value)
if key == 'auto_update_client':
auto_update_client = bool(value)
if key == 'player_data':
player_data = value
except:
write_configs()
def write_configs():
with open(r'config.yaml', 'w') as file:
dump = [{'mysql_bin' : mysql_bin}, {'auto_backup' : auto_backup}, {'auto_update_client' : auto_update_client},{'player_data' : player_data}]
yaml.dump(dump, file)
def fetch_files(express=False):
import_files.clear()
if express:
try:
global express_enabled
sql_diffs = repo.commit(current_version).diff(release_version,paths='sql/')
if len(sql_diffs) > 0:
for diff in sql_diffs:
import_files.append(diff.a_path[4:])
express_enabled = True
else:
express_enabled = False
except:
print(Fore.RED + 'Error checking diffs.\nCheck that hash is valid in ../conf/version.conf.')
else:
for (_, _, filenames) in os.walk('../sql/'):
import_files.extend(filenames)
break
backups.clear()
for (_, _, filenames) in os.walk('../sql/backups/'):
backups.extend(filenames)
break
backups.sort()
import_files.sort()
try:
import_files.append(import_files.pop(import_files.index('triggers.sql')))
except:
return
def write_version(silent=False):
success = False
update_client = auto_update_client
if not silent and current_client != release_client:
update_client = input('Update client version? [y/N] ').lower() == 'y'
try:
for line in fileinput.input('../conf/version.conf', inplace=True):
match = re.match(r'\S?DB_VER:\s+(\S+)', line)
if match:
success = True
line = '#DB_VER: ' + release_version
elif update_client:
if current_client != release_client:
match = re.match(r'\S?CLIENT_VER:\s+(\S+)', line)
if match:
line = 'CLIENT_VER: ' + release_client + '\n'
else:
update_client = False
print(line, end='')
if not success:
with open('../conf/version.conf', 'a') as vfile:
vfile.write('\n#DB_VER: ' + release_version)
if update_client:
print(Fore.GREEN + 'Updated client version!')
fetch_versions()
except:
print(Fore.RED + 'Error writing version.')
def import_file(file):
updatecmd = '"' + mysql_bin + 'mysql' + exe + '" -h ' + host + ' -u ' + login + ' -p' + password + ' ' + database
print('Importing ' + file + '...')
os.system(updatecmd + ' < ../sql/' + file + log_errors)
fetch_errors()
def connect():
global db, cur
try:
db = mysql.connector.connect(host=host,
user=login,
passwd=password,
db=database,
port=port,
use_pure=True)
cur = db.cursor()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print(Fore.RED + 'Incorrect mysql_login or mysql_password, update ../conf/map.conf.')
close()
return False
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print(Fore.RED + 'Database ' + database + ' does not exist.')
if input('Would you like to create new database: ' + database + '? [y/N] ').lower() == 'y':
create_command = '"' + mysql_bin + 'mysqladmin' + exe + '" -h ' + host + ' -u ' + login + ' -p' + password + ' CREATE ' + database
os.system(create_command + log_errors)
fetch_errors()
setup_db()
connect()
else:
print(Fore.RED + err)
return False
def close():
if db:
print('Closing connection...')
cur.close()
db.close()
time.sleep(0.5)
def setup_db():
fetch_files()
for sql_file in import_files:
import_file(sql_file)
print(Fore.GREEN + 'Finished importing!')
write_version()
def backup_db(silent=False,lite=False):
if silent or input('Would you like to backup your database? [y/N] ').lower() == 'y':
if lite:
tables = ' '
for table in player_data:
tables += table[:-4] + ' '
dumpcmd = '"' + mysql_bin + 'mysqldump' + exe + '" --hex-blob --add-drop-trigger -h ' + host + ' -u ' + login + ' -p' + password + ' ' + database +\
tables + '> ../sql/backups/' + database + '--lite--' + time.strftime('%Y%m%d-%H%M%S') + '.sql'
else:
if current_version:
dumpcmd = '"' + mysql_bin + 'mysqldump' + exe + '" --hex-blob --add-drop-trigger -h ' + host + ' -u ' + login + ' -p' + password + ' ' + database +\
' > ../sql/backups/' + database + '-' + current_version + '-' + time.strftime('%Y%m%d-%H%M%S') + '.sql'
else:
dumpcmd = '"' + mysql_bin + 'mysqldump' + exe + '" --hex-blob --add-drop-trigger -h ' + host + ' -u ' + login + ' -p' + password + ' ' + database +\
' > ../sql/backups/' + database + '-full-' + time.strftime('%Y%m%d-%H%M%S') + '.sql'
os.system(dumpcmd + log_errors)
fetch_errors()
print(Fore.GREEN + 'Database saved!')
time.sleep(0.5)
def express_update(silent=False):
update_db(silent, True)
def update_db(silent=False,express=False):
if not silent or auto_backup:
backup_db(silent)
if not express:
fetch_files()
if not silent:
print(Fore.GREEN + 'The following files will be imported:')
for sql_file in import_files:
if sql_file not in player_data:
print(sql_file)
if silent or input('Proceed with update? [y/N] ').lower() == 'y':
for sql_file in import_files:
if sql_file not in player_data:
import_file(sql_file)
print(Fore.GREEN + 'Finished importing!')
run_all_migrations(silent or express)
write_version(silent)
def adjust_mysql_bin():
global mysql_bin
while True:
choice = input('Please enter the path to your MySQL bin directory or press enter to check PATH.\ne.g. C:\\Program Files\\MariaDB 10.5\\bin\\\n> ').replace('\\', '/')
if choice == '':
choice = os.path.dirname(distutils.spawn.find_executable('mysql')).replace('\\','/')
if choice and choice[-1] != '/':
choice = choice + '/'
if os.path.exists(choice + 'mysql' + exe):
mysql_bin = choice
break
def adjust_auto_backup():
global auto_backup
while True:
choice = input('Would you like a backup to automatically be created when running an update from the command line? [y/n] ')
if choice == 'y':
auto_backup = True
break
elif choice == 'n':
auto_backup = False
break
bad_selection()
def adjust_auto_update_client():
global auto_update_client
while True:
choice = input('Would you like to automatically update the client version when running an update from the command line? [y/n] ')
if choice == 'y':
auto_update_client = True
break
elif choice == 'n':
auto_update_client = False
break
bad_selection()
def adjust_imports():
while True:
print(Fore.GREEN + 'The following files are marked as protected and will not be imported:')
for i, safe_file in enumerate(player_data):
print(Fore.GREEN + str(i + 1) + Style.RESET_ALL + '. ' + safe_file)
choice = input('Choose a number to remove it from this list, or type a file name to include it.\n> ')
if not choice:
return
if choice.isnumeric() and 0 < int(choice) <= len(player_data):
player_data.pop(int(choice) - 1)
else:
player_data.append(choice)
def run_all_migrations(silent=False):
migrations_needed = []
print(Fore.GREEN + 'Checking migrations...')
for migration in migrations:
check_migration(migration, migrations_needed, silent)
if len(migrations_needed) > 0:
if not silent:
| |
<reponame>Wenhao-Yang/DeepSpeaker-pytorch
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: <EMAIL>
@Software: PyCharm
@File: test_accuracy.py
@Time: 19-8-6 下午1:29
@Overview: Train the resnet 10 with asoftmax.
"""
from __future__ import print_function
import argparse
import os
import sys
import time
# Version conflict
import warnings
from collections import OrderedDict
import kaldi_io
import numpy as np
import psutil
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torchvision.transforms as transforms
from kaldi_io import read_mat, read_vec_flt
from torch.autograd import Variable
from tqdm import tqdm
# from Define_Model.Loss.SoftmaxLoss import AngleLinear, AdditiveMarginLinear
import Define_Model
from Define_Model.model import PairwiseDistance
from Eval.eval_metrics import evaluate_kaldi_eer, evaluate_kaldi_mindcf
from Process_Data.Datasets.KaldiDataset import ScriptTrainDataset, ScriptValidDataset, KaldiExtractDataset, \
ScriptVerifyDataset
<<<<<<< HEAD:TrainAndTest/test_vox1.py
from Process_Data.audio_processing import varLengthFeat, concateinputfromMFB, mvnormal
=======
from Process_Data.audio_processing import ConcateOrgInput, ConcateVarInput, mvnormal
>>>>>>> Server/Server:TrainAndTest/test_egs.py
from TrainAndTest.common_func import create_model
from logger import NewLogger
warnings.filterwarnings("ignore")
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Speaker Recognition TEST')
# Data options
<<<<<<< HEAD:TrainAndTest/test_vox1.py
parser.add_argument('--train-dir', type=str,
default='/home/yangwenhao/local/project/lstm_speaker_verification/data/timit/spect/train_noc',
help='path to dataset')
parser.add_argument('--test-dir', type=str,
default='/home/yangwenhao/local/project/lstm_speaker_verification/data/timit/spect/train_noc',
help='path to voxceleb1 test dataset')
parser.add_argument('--feat-format', type=str, default='kaldi', choices=['kaldi', 'npy'],
help='number of jobs to make feats (default: 10)')
parser.add_argument('--trials', type=str, default='trials',
help='path to voxceleb1 test dataset')
=======
parser.add_argument('--train-dir', type=str, required=True, help='path to dataset')
parser.add_argument('--train-test-dir', type=str, help='path to dataset')
parser.add_argument('--valid-dir', type=str, help='path to dataset')
parser.add_argument('--test-dir', type=str, required=True, help='path to voxceleb1 test dataset')
parser.add_argument('--log-scale', action='store_true', default=False, help='log power spectogram')
>>>>>>> Server/Server:TrainAndTest/test_egs.py
parser.add_argument('--trials', type=str, default='trials', help='path to voxceleb1 test dataset')
parser.add_argument('--train-trials', type=str, default='trials', help='path to voxceleb1 test dataset')
parser.add_argument('--test-input', type=str, default='fix', help='path to voxceleb1 test dataset')
parser.add_argument('--remove-vad', action='store_true', default=False, help='using Cosine similarity')
parser.add_argument('--extract', action='store_false', default=True, help='need to make mfb file')
parser.add_argument('--frame-shift', default=200, type=int, metavar='N', help='acoustic feature dimension')
<<<<<<< HEAD:TrainAndTest/test_vox1.py
parser.add_argument('--nj', default=12, type=int, metavar='NJOB', help='num of job')
parser.add_argument('--inst-norm', action='store_true', default=False,
help='replace batchnorm with instance norm')
parser.add_argument('--xvector-dir', help='folder to output model checkpoints')
parser.add_argument('--resume', default='Data/checkpoint/LoResNet10/timit_spect/soft_var/checkpoint_15.pth',
metavar='PATH', help='path to latest checkpoint (default: none)')
=======
parser.add_argument('--nj', default=10, type=int, metavar='NJOB', help='num of job')
parser.add_argument('--feat-format', type=str, default='kaldi', choices=['kaldi', 'npy'],
help='number of jobs to make feats (default: 10)')
parser.add_argument('--check-path', default='Data/checkpoint/GradResNet8/vox1/spect_egs/soft_dp25',
help='folder to output model checkpoints')
parser.add_argument('--save-init', action='store_true', default=True, help='need to make mfb file')
parser.add_argument('--resume',
default='Data/checkpoint/GradResNet8/vox1/spect_egs/soft_dp25/checkpoint_10.pth', type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
>>>>>>> Server/Server:TrainAndTest/test_egs.py
parser.add_argument('--start-epoch', default=1, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--epochs', type=int, default=20, metavar='E',
help='number of epochs to train (default: 10)')
parser.add_argument('--xvector-dir', type=str, help='The dir for extracting xvectors')
parser.add_argument('--scheduler', default='multi', type=str,
metavar='SCH', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--patience', default=2, type=int,
metavar='PAT', help='patience for scheduler (default: 4)')
parser.add_argument('--gamma', default=0.75, type=float,
metavar='GAMMA', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--milestones', default='10,15', type=str,
metavar='MIL', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--min-softmax-epoch', type=int, default=40, metavar='MINEPOCH',
help='minimum epoch for initial parameter using softmax (default: 2')
parser.add_argument('--veri-pairs', type=int, default=20000, metavar='VP',
help='number of epochs to train (default: 10)')
# Training options
# Model options
<<<<<<< HEAD:TrainAndTest/test_vox1.py
# ALSTM ASiResNet34 ExResNet34 LoResNet10 ResNet20 SiResNet34 SuResCNN10 TDNN
parser.add_argument('--model', type=str, default='LoResNet', help='path to voxceleb1 test dataset')
parser.add_argument('--resnet-size', default=8, type=int,
metavar='RES', help='The channels of convs layers)')
parser.add_argument('--fast', action='store_true', default=False, help='max pooling for fast')
parser.add_argument('--encoder-type', type=str, default='SAP', help='path to voxceleb1 test dataset')
=======
parser.add_argument('--model', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--resnet-size', default=8, type=int, metavar='RES', help='The channels of convs layers)')
parser.add_argument('--filter', type=str, default='None', help='replace batchnorm with instance norm')
parser.add_argument('--mask-layer', type=str, default='None', help='replace batchnorm with instance norm')
parser.add_argument('--mask-len', type=int, default=20, help='maximum length of time or freq masking layers')
parser.add_argument('--block-type', type=str, default='basic', help='replace batchnorm with instance norm')
parser.add_argument('--relu-type', type=str, default='relu', help='replace batchnorm with instance norm')
parser.add_argument('--transform', type=str, default="None", help='add a transform layer after embedding layer')
parser.add_argument('--vad', action='store_true', default=False, help='vad layers')
parser.add_argument('--inception', action='store_true', default=False, help='multi size conv layer')
parser.add_argument('--inst-norm', action='store_true', default=False, help='batchnorm with instance norm')
parser.add_argument('--input-norm', type=str, default='Mean', help='batchnorm with instance norm')
parser.add_argument('--encoder-type', type=str, default='None', help='path to voxceleb1 test dataset')
>>>>>>> Server/Server:TrainAndTest/test_egs.py
parser.add_argument('--channels', default='64,128,256', type=str,
metavar='CHA', help='The channels of convs layers)')
parser.add_argument('--context', default='5,3,3,5', type=str, metavar='KE', help='kernel size of conv filters')
parser.add_argument('--feat-dim', default=64, type=int, metavar='N', help='acoustic feature dimension')
parser.add_argument('--input-dim', default=257, type=int, metavar='N', help='acoustic feature dimension')
parser.add_argument('--input-length', type=str, help='batchnorm with instance norm')
parser.add_argument('--accu-steps', default=1, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('--alpha', default=12, type=float, metavar='FEAT', help='acoustic feature dimension')
parser.add_argument('--kernel-size', default='5,5', type=str, metavar='KE', help='kernel size of conv filters')
parser.add_argument('--padding', default='', type=str, metavar='KE', help='padding size of conv filters')
parser.add_argument('--stride', default='2', type=str, metavar='ST', help='stride size of conv filters')
parser.add_argument('--fast', type=str, default='None', help='max pooling for fast')
parser.add_argument('--cos-sim', action='store_true', default=False, help='using Cosine similarity')
parser.add_argument('--avg-size', type=int, default=4, metavar='ES', help='Dimensionality of the embedding')
parser.add_argument('--time-dim', default=1, type=int, metavar='FEAT', help='acoustic feature dimension')
parser.add_argument('--embedding-size', type=int, default=128, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--batch-size', type=int, default=1, metavar='BS',
help='input batch size for training (default: 128)')
parser.add_argument('--input-per-spks', type=int, default=224, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--num-valid', type=int, default=5, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--test-input-per-file', type=int, default=4, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--test-batch-size', type=int, default=1, metavar='BST',
help='input batch size for testing (default: 64)')
parser.add_argument('--dropout-p', type=float, default=0.25, metavar='BST',
help='input batch size for testing (default: 64)')
# loss configure
parser.add_argument('--loss-type', type=str, default='soft',
help='path to voxceleb1 test dataset')
parser.add_argument('--num-center', type=int, default=2, help='the num of source classes')
parser.add_argument('--source-cls', type=int, default=1951,
help='the num of source classes')
parser.add_argument('--finetune', action='store_true', default=False,
help='using Cosine similarity')
parser.add_argument('--loss-ratio', type=float, default=0.1, metavar='LOSSRATIO',
help='the ratio softmax loss - triplet loss (default: 2.0')
# args for additive margin-softmax
parser.add_argument('--margin', type=float, default=0.3, metavar='MARGIN',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--s', type=float, default=15, metavar='S',
help='the margin value for the angualr softmax loss function (default: 3.0')
# args for a-softmax
parser.add_argument('--m', type=int, default=3, metavar='M',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--lambda-min', type=int, default=5, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--lambda-max', type=float, default=1000, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: 0.125)')
parser.add_argument('--lr-decay', default=0, type=float, metavar='LRD',
help='learning rate decay ratio (default: 1e-4')
parser.add_argument('--weight-decay', default=5e-4, type=float,
metavar='WEI', help='weight decay (default: 0.0)')
parser.add_argument('--momentum', default=0.9, type=float,
metavar='MOM', help='momentum for sgd (default: 0.9)')
parser.add_argument('--dampening', default=0, type=float,
metavar='DAM', help='dampening for sgd (default: 0.0)')
parser.add_argument('--optimizer', default='sgd', type=str,
metavar='OPT', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--grad-clip', default=0., type=float,
help='momentum for sgd (default: 0.9)')
# Device options
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--seed', type=int, default=123456, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=10, metavar='LI',
help='how many batches to wait before logging training status')
parser.add_argument('--acoustic-feature', choices=['fbank', 'spectrogram', 'mfcc'], default='fbank',
help='choose the acoustic features type.')
parser.add_argument('--makemfb', action='store_true', default=False,
help='need to make mfb file')
parser.add_argument('--makespec', action='store_true', default=False,
help='need to make spectrograms file')
parser.add_argument('--mvnorm', action='store_true', default=False,
help='need to make spectrograms file')
parser.add_argument('--valid', action='store_true', default=False,
help='need to make spectrograms file')
args = parser.parse_args()
# Set the device to use by setting CUDA_VISIBLE_DEVICES env variable in
# order to prevent any memory allocation on unused GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.multiprocessing.set_sharing_strategy('file_system')
if args.cuda:
torch.cuda.manual_seed_all(args.seed)
cudnn.benchmark = True
# create logger
# Define visulaize SummaryWriter instance
kwargs = {'num_workers': args.nj, 'pin_memory': False} if args.cuda else {}
sys.stdout = NewLogger(os.path.join(os.path.dirname(args.resume), 'test.log'))
l2_dist = nn.CosineSimilarity(dim=1, eps=1e-6) if args.cos_sim else PairwiseDistance(2)
if args.input_length == 'var':
transform = transforms.Compose([
<<<<<<< HEAD:TrainAndTest/test_vox1.py
# concateinputfromMFB(num_frames=c.NUM_FRAMES_SPECT, remove_vad=False),
varLengthFeat(remove_vad=args.remove_vad),
])
transform_T = transforms.Compose([
# concateinputfromMFB(num_frames=c.NUM_FRAMES_SPECT, input_per_file=args.test_input_per_file, remove_vad=False),
varLengthFeat(remove_vad=args.remove_vad),
=======
ConcateOrgInput(remove_vad=args.remove_vad),
])
transform_T = transforms.Compose([
ConcateOrgInput(remove_vad=args.remove_vad),
>>>>>>> Server/Server:TrainAndTest/test_egs.py
])
elif args.input_length == 'fix':
transform = transforms.Compose([
<<<<<<< HEAD:TrainAndTest/test_vox1.py
concateinputfromMFB(remove_vad=args.remove_vad),
])
transform_T = transforms.Compose([
concateinputfromMFB(input_per_file=args.test_input_per_file, remove_vad=args.remove_vad),
=======
ConcateVarInput(frame_shift=args.frame_shift, remove_vad=args.remove_vad),
])
transform_T = transforms.Compose([
ConcateVarInput(frame_shift=args.frame_shift, remove_vad=args.remove_vad),
>>>>>>> Server/Server:TrainAndTest/test_egs.py
])
else:
raise ValueError('input length must be var or fix.')
if args.mvnorm:
transform.transforms.append(mvnormal())
transform_T.transforms.append(mvnormal())
# pdb.set_trace()
if args.feat_format == 'kaldi':
file_loader = read_mat
torch.multiprocessing.set_sharing_strategy('file_system')
elif args.feat_format == 'npy':
file_loader = np.load
if not args.valid:
args.num_valid = 0
train_dir = ScriptTrainDataset(dir=args.train_dir, samples_per_speaker=args.input_per_spks, loader=file_loader,
transform=transform, num_valid=args.num_valid)
verfify_dir = KaldiExtractDataset(dir=args.test_dir, transform=transform_T, filer_loader=file_loader)
if args.valid:
<<<<<<< HEAD:TrainAndTest/test_vox1.py
valid_dir = ScriptValidDataset(valid_set=train_dir.valid_set, loader=file_loader, spk_to_idx=train_dir.spk_to_idx,
valid_uid2feat=train_dir.valid_uid2feat, valid_utt2spk_dict=train_dir.valid_utt2spk_dict,
transform=transform)
def main():
# Views the training images and displays the distance on anchor-negative and anchor-positive
# test_display_triplet_distance = False
# print the experiment configuration
print('\nCurrent time is \33[91m{}\33[0m.'.format(str(time.asctime())))
print('Parsed options: {}'.format(vars(args)))
# print('Number of Speakers: {}.\n'.format(train_dir.num_spks))
# instantiate model and initialize weights
kernel_size = args.kernel_size.split(',')
kernel_size = [int(x) for x in kernel_size]
padding = [int((x - 1) / 2) for x in kernel_size]
kernel_size = tuple(kernel_size)
padding = tuple(padding)
channels = args.channels.split(',')
channels = [int(x) for x in channels]
model_kwargs = {'embedding_size': args.embedding_size,
'resnet_size': args.resnet_size,
'inst_norm': args.inst_norm,
'input_dim': args.feat_dim,
'fast': args.fast,
'num_classes': train_dir.num_spks,
'alpha': args.alpha,
'channels': channels,
'stride': args.stride,
'avg_size': args.avg_size,
'time_dim': args.time_dim,
'encoder_type': args.encoder_type,
'kernel_size': kernel_size,
'padding': padding,
'dropout_p': args.dropout_p}
print('Model options: {}'.format(model_kwargs))
if args.valid or args.extract:
model = create_model(args.model, **model_kwargs)
if args.loss_type == 'asoft':
model.classifier = AngleLinear(in_features=args.embedding_size, out_features=train_dir.num_spks, m=args.m)
elif args.loss_type == 'amsoft':
model.classifier = AdditiveMarginLinear(feat_dim=args.embedding_size, n_classes=train_dir.num_spks)
assert os.path.isfile(args.resume)
print('=> loading checkpoint {}'.format(args.resume))
checkpoint = torch.load(args.resume)
# start_epoch = checkpoint['epoch']
filtered = {k: v for k, v in checkpoint['state_dict'].items() if 'num_batches_tracked' not in k}
# model_dict = model.state_dict()
# model_dict.update(filtered)
model.load_state_dict(filtered)
#
try:
model.dropout.p = args.dropout_p
except:
pass
start = args.start_epoch
print('Epoch is : ' + str(start))
if args.cuda:
model.cuda()
# train_loader = torch.utils.data.DataLoader(train_dir, batch_size=args.batch_size, shuffle=True, **kwargs)
if args.valid:
valid_loader = torch.utils.data.DataLoader(valid_dir, batch_size=args.test_batch_size, shuffle=False,
**kwargs)
valid(valid_loader, model)
if args.extract:
verify_loader = torch.utils.data.DataLoader(verfify_dir, batch_size=args.test_batch_size, shuffle=False,
**kwargs)
extract(verify_loader, model, args.xvector_dir)
file_loader | |
use:
* 'system': this account is associated directly with the bot (in bots.cfg),
and can be used at any time (when running a task or not).
* 'task': this account is associated with the task currently executing on
the bot, and may be used only when bot is actually running this task.
The flavor of account is specified via 'account_id' request field. See
ACCEPTED_KEYS for format of other keys.
The returned token is expected to be alive for at least ~5 min, but can live
longer (but no longer than ~1h). In general assume the token is short-lived.
Multiple bots may share exact same access token if their configuration match
(the token is cached by Swarming for performance reasons).
Besides the token, the response also contains the actual service account
email (if it is really configured), or two special strings in place of the
email:
* "none" if the bot is not configured to use service accounts at all.
* "bot" if the bot should use tokens produced by bot_config.py hook.
The response body on success is a JSON dict:
{
"service_account": <str email> or "none" or "bot",
"access_token": <str with actual token (if account is configured)>,
"expiry": <int with unix timestamp in seconds (if account is configured)>
}
May also return:
HTTP 400 - on a bad request or if the service account is misconfigured.
HTTP 403 - if the caller is not allowed to use the service account.
HTTP 500 - on retriable transient errors.
"""
TOKEN_KIND = service_accounts.TOKEN_KIND_ACCESS_TOKEN
TOKEN_RESPONSE_KEY = 'access_token'
ACCEPTED_KEYS = {
u'account_id', # 'system' or 'task'
u'id', # bot ID
u'scopes', # list of requested OAuth scopes
u'task_id', # optional task ID, required if using 'task' account
}
REQUIRED_KEYS = {u'account_id', u'id', u'scopes'}
def extract_token_params(self, request):
scopes = request['scopes']
if (not scopes or not isinstance(scopes, list) or
not all(isinstance(s, basestring) for s in scopes)):
self.abort_with_error(400, error='"scopes" must be a list of strings')
return scopes, None
class BotIDTokenHandler(_BotTokenHandler):
"""Called when bot wants to get a service account ID token.
Similar to BotOAuthTokenHandler, except returns ID tokens instead of OAuth
tokens. See BotOAuthTokenHandler doc for details.
The response body on success is a JSON dict:
{
"service_account": <str email> or "none" or "bot",
"id_token": <str with actual token (if account is configured)>,
"expiry": <int with unix timestamp in seconds (if account is configured)>
}
May also return:
HTTP 400 - on a bad request or if the service account is misconfigured.
HTTP 403 - if the caller is not allowed to use the service account.
HTTP 500 - on retriable transient errors.
"""
TOKEN_KIND = service_accounts.TOKEN_KIND_ID_TOKEN
TOKEN_RESPONSE_KEY = 'id_token'
ACCEPTED_KEYS = {
u'account_id', # 'system' or 'task'
u'id', # bot ID
u'audience', # the string audience to put into the token
u'task_id', # optional task ID, required if using 'task' account
}
REQUIRED_KEYS = {u'account_id', u'id', u'audience'}
def extract_token_params(self, request):
audience = request['audience']
if not audience or not isinstance(audience, basestring):
self.abort_with_error(400, error='"audience" must be a string')
return None, audience
### Bot Task API RPC handlers
class BotTaskUpdateHandler(_BotApiHandler):
"""Receives updates from a Bot for a task.
The handler verifies packets are processed in order and will refuse
out-of-order packets.
"""
ACCEPTED_KEYS = {
u'bot_overhead',
u'cache_trim_stats',
u'cas_output_root',
u'cipd_pins',
u'cipd_stats',
u'cleanup_stats',
u'cost_usd',
u'duration',
u'exit_code',
u'hard_timeout',
u'id',
u'io_timeout',
u'isolated_stats',
u'named_caches_stats',
u'output',
u'output_chunk_start',
u'task_id',
}
REQUIRED_KEYS = {u'id', u'task_id'}
@decorators.silence(apiproxy_errors.RPCFailedError)
@auth.public # auth happens in bot_auth.validate_bot_id_and_fetch_config()
def post(self, task_id=None):
# Unlike handshake and poll, we do not accept invalid keys here. This code
# path is much more strict.
# Take the time now - for measuring pubsub task change latency.
now = utils.milliseconds_since_epoch()
request = self.parse_body()
msg = log_unexpected_subset_keys(self.ACCEPTED_KEYS, self.REQUIRED_KEYS,
request, self.request, 'bot', 'keys')
if msg:
self.abort_with_error(400, error=msg)
# TODO(crbug.com/1015701): take from X-Luci-Swarming-Bot-ID header.
bot_id = request['id']
task_id = request['task_id']
# Make sure bot self-reported ID matches the authentication token. Raises
# auth.AuthorizationError if not.
bot_auth.validate_bot_id_and_fetch_config(bot_id)
bot_overhead = request.get('bot_overhead')
cipd_pins = request.get('cipd_pins')
cipd_stats = request.get('cipd_stats')
cost_usd = request.get('cost_usd', 0)
duration = request.get('duration')
exit_code = request.get('exit_code')
hard_timeout = request.get('hard_timeout')
io_timeout = request.get('io_timeout')
isolated_stats = request.get('isolated_stats')
cache_trim_stats = request.get('cache_trim_stats')
named_caches_stats = request.get('named_caches_stats')
cleanup_stats = request.get('cleanup_stats')
output = request.get('output')
output_chunk_start = request.get('output_chunk_start')
cas_output_root = request.get('cas_output_root')
canceled = request.get('canceled')
if (isolated_stats or cipd_stats) and bot_overhead is None:
ereporter2.log_request(
request=self.request,
source='server',
category='task_failure',
message='Failed to update task: %s' % task_id)
self.abort_with_error(
400,
error='isolated_stats and cipd_stats require bot_overhead to be set'
'\nbot_overhead: %s\nisolate_stats: %s' %
(bot_overhead, isolated_stats))
run_result_key = task_pack.unpack_run_result_key(task_id)
performance_stats = None
if bot_overhead is not None:
performance_stats = task_result.PerformanceStats(
bot_overhead=bot_overhead)
if isolated_stats:
download = isolated_stats.get('download') or {}
upload = isolated_stats.get('upload') or {}
def unpack_base64(d, k):
x = d.get(k)
if x:
return base64.b64decode(x)
performance_stats.isolated_download = task_result.CASOperationStats(
duration=download.get('duration'),
initial_number_items=download.get('initial_number_items'),
initial_size=download.get('initial_size'),
items_cold=unpack_base64(download, 'items_cold'),
items_hot=unpack_base64(download, 'items_hot'))
performance_stats.isolated_upload = task_result.CASOperationStats(
duration=upload.get('duration'),
items_cold=unpack_base64(upload, 'items_cold'),
items_hot=unpack_base64(upload, 'items_hot'))
if cipd_stats:
performance_stats.package_installation = task_result.OperationStats(
duration=cipd_stats.get('duration'))
if cache_trim_stats:
performance_stats.cache_trim = task_result.OperationStats(
duration=cache_trim_stats.get('duration'))
if named_caches_stats:
install = named_caches_stats.get('install', {})
uninstall = named_caches_stats.get('uninstall', {})
performance_stats.named_caches_install = task_result.OperationStats(
duration=install.get('duration'))
performance_stats.named_caches_uninstall = task_result.OperationStats(
duration=uninstall.get('duration'))
if cleanup_stats:
performance_stats.cleanup = task_result.OperationStats(
duration=cleanup_stats.get('duration'))
if output is not None:
try:
output = base64.b64decode(output)
except UnicodeEncodeError as e:
logging.error('Failed to decode output\n%s\n%r', e, output)
output = output.encode('ascii', 'replace')
except TypeError as e:
# Save the output as-is instead. The error will be logged in ereporter2
# and returning a HTTP 500 would only force the bot to stay in a retry
# loop.
logging.error('Failed to decode output\n%s\n%r', e, output)
if cas_output_root:
cas_output_root = task_request.CASReference(
cas_instance=cas_output_root['cas_instance'],
digest=task_request.Digest(**cas_output_root['digest']))
if cipd_pins:
cipd_pins = task_result.CipdPins(
client_package=task_request.CipdPackage(
**cipd_pins['client_package']),
packages=[
task_request.CipdPackage(**args) for args in cipd_pins['packages']
])
# Tell the task queues management engine that the bot is still alive, and
# it shall refresh the task queues.
bot_root_key = bot_management.get_root_key(bot_id)
task_queues.get_queues(bot_root_key)
try:
state = task_scheduler.bot_update_task(
run_result_key=run_result_key,
bot_id=bot_id,
output=output,
output_chunk_start=output_chunk_start,
exit_code=exit_code,
duration=duration,
hard_timeout=hard_timeout,
io_timeout=io_timeout,
cost_usd=cost_usd,
cas_output_root=cas_output_root,
cipd_pins=cipd_pins,
performance_stats=performance_stats,
canceled=canceled,
start_time=now)
if not state:
logging.info('Failed to update, please retry')
self.abort_with_error(500, error='Failed to update, please retry')
if state in (task_result.State.COMPLETED, task_result.State.TIMED_OUT):
action = 'task_completed'
elif state == task_result.State.KILLED:
action = 'task_killed'
else:
assert state in (task_result.State.BOT_DIED,
task_result.State.RUNNING), state
action = 'task_update'
bot_management.bot_event(
event_type=action,
bot_id=bot_id,
external_ip=self.request.remote_addr,
authenticated_as=auth.get_peer_identity().to_bytes(),
dimensions=None,
state=None,
version=None,
quarantined=None,
maintenance_msg=None,
task_id=task_id,
task_name=None,
register_dimensions=False)
except ValueError as e:
ereporter2.log_request(
request=self.request,
source='server',
category='task_failure',
message='Failed to update task: %s' % e)
self.abort_with_error(400, error=str(e))
except webob.exc.HTTPException:
raise
except Exception as e:
logging.exception('Internal error: %s', e)
self.abort_with_error(500, error=str(e))
# - BOT_DIED will occur when the following conditions are true:
# - The bot polled correctly, but then stopped updating for at least
# task_result.BOT_PING_TOLERANCE. (It can occur if the host went to
# sleep, or the OS was overwhelmed).
# - /internal/cron/abort_bot_died runs, detects the bot is MIA, kills the
# task.
# - Bot wakes up, starts sending updates again.
# - KILLED is when the client uses the kill API to forcibly stop a running
# task.
must_stop = state in (task_result.State.BOT_DIED, task_result.State.KILLED)
if must_stop:
logging.info('asking bot to kill the task')
self.send_response({'must_stop': must_stop, 'ok': True})
class BotTaskErrorHandler(_BotApiHandler):
"""It is a specialized version of ereporter2's /ereporter2/api/v1/on_error
that also attaches a task id to it.
This formally terminates the task, marking it as an internal failure.
This can be used by bot_main.py to kill the task when task_runner misbehaved.
"""
EXPECTED_KEYS = {u'id', u'message', u'task_id'}
@auth.public # auth happens in bot_auth.validate_bot_id_and_fetch_config
def post(self, task_id=None):
start_time = utils.milliseconds_since_epoch()
request = self.parse_body()
# TODO(crbug.com/1015701): take from X-Luci-Swarming-Bot-ID header.
bot_id = request.get('id')
task_id = request.get('task_id', '')
message = request.get('message', 'unknown')
# Make sure bot self-reported ID matches the authentication token. Raises
# auth.AuthorizationError if not.
bot_auth.validate_bot_id_and_fetch_config(bot_id)
bot_management.bot_event(
event_type='task_error',
bot_id=bot_id,
external_ip=self.request.remote_addr,
authenticated_as=auth.get_peer_identity().to_bytes(),
dimensions=None,
state=None,
version=None,
quarantined=None,
maintenance_msg=None,
task_id=task_id,
task_name=None,
message=message,
register_dimensions=False)
line = ('Bot: https://%s/restricted/bot/%s\n'
'Task failed: https://%s/user/task/%s\n'
'%s') % (app_identity.get_default_version_hostname(), bot_id,
app_identity.get_default_version_hostname(), task_id,
message)
ereporter2.log_request(self.request, source='bot', message=line)
msg = log_unexpected_keys(self.EXPECTED_KEYS, request, self.request, 'bot',
'keys')
if msg:
self.abort_with_error(400, error=msg)
msg = task_scheduler.bot_terminate_task(
task_pack.unpack_run_result_key(task_id), bot_id, start_time)
if msg:
logging.error(msg)
self.abort_with_error(400, error=msg)
self.send_response({})
def get_routes():
routes = [
# Generic handlers (no auth)
('/swarming/api/v1/bot/server_ping', ServerPingHandler),
# Bot code | |
list of the time derivatives for q, stored
# in order from present to the past
def ab_blend(dqdt,order):
if order==1:
return dqdt[0]
elif order==2:
return 1.5*dqdt[0]-.5*dqdt[1]
elif order==3:
return (23*dqdt[0]-16*dqdt[1]+5*dqdt[2])/12.
else:
print("order", order ," not supported ")
# In[5]:
def advect(q,u,v,dx,dy):
# third-order upwind advection
# q,u,v are co-located
dqdt = np.zeros(q.shape)
dqmx = np.zeros(q.shape)
dqpx = np.zeros(q.shape)
dqmy = np.zeros(q.shape)
dqpy = np.zeros(q.shape)
dqmx[:,1] = -q[:,0] + q[:,1] # 1st order, plus side at left wall
dqmx[:,2:-1] = (q[:,:-3] - 6*q[:,1:-2] + 3*q[:,2:-1] + 2*q[:,3:])/6. # 3rd order, minus side
dqpx[:,-2] = -q[:,-2] + q[:,-1] # 1st order, plus side at right wall
dqpx[:,1:-2] = (-2*q[:,0:-3] - 3*q[:,1:-2] + 6*q[:,2:-1] -1*q[:,3:])/6. #3rd order, plus side
dqmy[1,:] = -q[0,:] + q[1,:] # 1st order, minus side at bottom wall
dqmy[2:-1,:] = (q[:-3,:] - 6*q[1:-2,:] + 3*q[2:-1,:] + 2*q[3:,:])/6. # 3rd order, minus side
dqpy[-2,:] = -q[-2,:] + q[-1,:] # 1st order, plus side at top wall
dqpy[1:-2,:] = ( - 2*q[0:-3,:] - 3*q[1:-2,:] + 6*q[2:-1,:] - q[3:,:] )/6. # 3rd order, plus side
dqdx = np.where(u>0.,dqmx,dqpx)/dx # upwind, emphasize side from where fluid is coming from
dqdy = np.where(v>0.,dqmy,dqpy)/dy # ditto
dqdt += -u*dqdx
dqdt += -v*dqdy
return dqdt
# In[6]:
#############################################################
def divergence(u,v,dx,dy):
# du/dx + dv/dy at p-grid
div = .5*( u[:-1,1:] + u[1:,1:] - u[:-1,:-1] - u[1:,:-1])/dx + .5*( v[1:,:-1] + v[1:,1:] - v[:-1,:-1] - v[:-1,1:])/dy
return div
#############################################################
def vortp(u,v,dx,dy):
# dv/dx - du/dy at p-grid
vort = .5*( v[:-1,1:] + v[1:,1:] - v[:-1,:-1] - v[1:,:-1])/dx - .5*( u[1:,:-1] + u[1:,1:] - u[:-1,:-1] - u[:-1,1:])/dy
return vort
#############################################################
def vortU(u,v,dx,dy):
# dv/dx - du/dy at U-grid interior points
vort = np.zeros(u.shape)
vort[1:-1,1:-1] = (v[1:-1,2:] - v[1:-1,:-2])/(2*dx) - (u[2:,1:-1] - u[:-2,1:-1])/(2*dy)
return vort
# In[7]:
def psi_to_uv(q,dx,dy):
# q is streamfunction (psi) on u-grid, assumed to be 0 on boundaries
# returns v = dq/dx and u= -dq/dy, on U-grid
u = 0.*q
v = 0.*q
u[1:-1,1:-1] = -( q[2:,1:-1] - q[:-2,1:-1] )/(2*dy)
u[0,1:-1] = -q[1,1:-1]/dy
u[-1,1:-1] = q[-2,1:-1]/dy
v[1:-1,1:-1] = +( q[1:-1,2:] - q[1:-1,:-2])/(2*dx)
v[1:-1,0] = q[1:-1,1]/dx
v[1:-1,-1] = -q[1:-1,-2]/dx
return u,v
# In[8]:
def laplacian(p,dx,dy,il=None, ir=None, jb=None, jt=None):
# Returns Laplacian of p, d^2p/dx^2 + d^2/dy^2.
# If needed, specify how to grab the image of a point outside
# the domain. Otherwise, the d^2p/dx^2 or d^2/dy^2 term is not included
# on the boundary.
rdx2 = 1./(dx*dx)
rdy2 = 1./(dy*dy)
lapl = np.zeros(p.shape)
lapl[:,1:-1] = rdx2*( p[:,:-2] -2*p[:,1:-1] + p[:,2:] )
lapl[1:-1,:] += rdy2*( p[:-2,:] -2*p[1:-1,:] + p[2:,:] )
if il in [-2,-1,0,1]:
lapl[:,0] += rdx2*( p[:,il] -2*p[:,0] + p[:,1] )
if ir in [-2,-1,0,1]:
lapl[:,-1] += rdx2*( p[:,-2] -2*p[:,-1] + p[:,ir] )
if jb in [-2,-1,0,1]:
lapl[0,:] += rdy2*( p[jb,: ] -2*p[0,:] + p[1,:] )
if jt in [-2,-1,0,1]:
lapl[-1,:] += rdy2*( p[-2,: ] -2*p[-1,:] + p[jt,:] )
return lapl
# In[9]:
def poisson_fft_prep(Nx,Ny,dx,dy,lapl='discrete'):
# returns the coefficients to multiply the vorticity Fourier amplitudes
L = dx*(Nx-1)
W = dy*(Ny-1)
Ka = np.arange(Nx-2) +1 # integer wavenumbers of the sine functions in the x-direction
Ma = np.arange(Ny-2) +1 # integer wavenumbers of the sine functions in the y-direction
ka = Ka*np.pi/L
ma = Ma*np.pi/W
lapl_op = np.zeros( (Ny-2,Nx-2) )
if lapl == 'discrete':
lapl_op[:] += (2*np.cos(ka*dx)-2)/dx**2 # add to every row
else: # the calculus Laplacian
lapl_op[:] += -ka**2
lapl_opT = lapl_op.T # reverse columns and rows
if lapl == 'discrete':
lapl_opT[:] += (2*np.cos(ma*dy)-2)/dy**2 # add to every row
else: # the calculus Laplacian
lapl_opT[:] += -ma**2
lapl_op = lapl_opT.T # reverse columns and rows
invlapl = 1./lapl_op #the coefficents for multiplying the vorticity Fourier amplitudes
return invlapl
def poisson_fft(vort, invlapl):
# solves for psi in del^2 psi = vort
cv = vort[1:-1,1:-1] # central vorticity
#convert gridded vorticity to gridded Fourier coefficients A_k,m
cvt = scipy.fftpack.dst( cv , axis=1 , type=1)
cvt = scipy.fftpack.dst( cvt , axis=0 , type=1)
cpsit = cvt*invlapl # Calculate B_k,m from A_k,m
# convert array of Fourier coefficents for psi to gridded central psi
cpsit = scipy.fftpack.idst(cpsit,axis=0,type=1) # inverse transform
cpsi = scipy.fftpack.idst(cpsit,axis=1,type=1) # inverse transform
sh = vort.shape
psi = np.zeros(sh) # we need 0 on boundaries, next line fills the center
psi[1:-1,1:-1] = cpsi/(4*(sh[0]-1)*(sh[1]-1)) # apply normalization convention of FFT
return psi
# <hr/>
# ## Specify the grid:
# Select a grid size that allows the Poisson solver to be fast.
#
# 2<sup>n</sup> +1 for `Nx` and `Ny` seems to be ideal.
# In[10]:
# Make the grid. 257x257 allows for speed of the FFT
Nx = 257
Ny = 257
xmax = 1. # 0 <= x <= xmax
ymax = 1.
dx = xmax/(Nx-1.) # grid width
dy = ymax/(Ny-1.)
x1U = np.linspace(0,xmax,Nx)
y1U = np.linspace(0,ymax,Ny)
x1p = .5*(x1U[:-1]+x1U[1:])
y1p = .5*(y1U[:-1]+y1U[1:])
xU,yU = np.meshgrid(x1U,y1U)
xp,yp = np.meshgrid(x1p,y1p)
# An array of the inverse Laplacian,
# to be applied to the Fourier components of the r.h.s. of the Poisson equation.
# This is calculated once, and used throughout the notebook.
invlapl = poisson_fft_prep(Nx,Ny,dx,dy)#,lapl='discrete') #lapl='calculus' or lapl='discrete'
# ### Test the Poisson solver
# In[11]:
np.random.seed(2)
psi_test = np.zeros((Ny,Nx))
psi_test[1:-1,1:-1] = np.random.random((Ny-2,Nx-2))
vort_test = laplacian(psi_test,dx,dy)
# In[12]:
#%%timeit
#psi_solved = poisson_fft(vort_test,invlapl)
# Results from `%%timeit` study.
#
# Note the great speed when using 2<sup>n</sup> +1 for `Nx` and `Ny` seems to be ideal.
#
# | Nx × Ny | ms per loop |
# |---|---|---|
# | 127 x 127 | .876 |
# | 128 x 128 | 13.5 |
# | 129 x 129 | .747 |
# | 130 x 130 | 2.76 |
# | 255 x 255 | 31.7 |
# | 256 x 256 | 4.51 |
# | 257 x 257 | 2.74 |
# | 258 x 258 | 109 |
# | 512 x 512 | 52.2 |
# | 513 x 513 | 15.4 |
# | 514 x 514 | 22.5 |
# | 515 x 515 | 285|
# | 1023 x 1023 | 209 |
# | 1024 x 1024 |134 |
# | 1025 x 1025 | 75.8 |
# | 1026 x 1026 | 172 |
# In[13]:
# did the Poisson solver work??
psi_solved = poisson_fft(vort_test,invlapl)
diff = psi_test - psi_solved
diff2 = diff**2
print( "\nr.m.s error should be much less than 1:",diff2.mean() )
# <hr/>
#
# ## Specify initial vorticity:
# In[14]:
# choose an experiment number
nexp = 1
######
if nexp == 1: # shows vortex merger at about t=3
vortorb =.11 # displacement of a vortex from center
gwidth = .05 # width of vortex
vortamp = 40 # amplitude of vorticity in vortex
elif nexp == 2: # shows vortex merger at about t=1.5
vortorb =.11
gwidth =.05
vortamp = 80
elif nexp == 3: # shows a vortex merger at t=10
vortorb = .12
gwidth =.05
vortamp = 80
elif nexp == 4: # shows no vortex merger before t=20
vortorb = .22 # twice the orbital radous
gwidth = .10 # twice the gaussian width of 2
vortamp = 20 # so same circulation as 2
elif nexp == 10: # thick unstable vortex sheet
vortamp =4.
gwidth =.1
xcen = .5
ycen = .5
elif nexp == 11: # thick unstable vortex sheet, off-center intialization
vortamp = 4.
gwidth = .1
xcen = .53 # use .53 to break symmetry
ycen = .51 # use .51 to break symmetry
elif nexp == 12: # thin unstable vortex sheet
vortamp = 8.
gwidth = .05
xcen = .5
ycen = .5
elif nexp==13: # thin unstable vortex sheet, off-center intialization
vortamp = 8.
gwidth =.05
xcen = .53 # use .53 to break symmetry
ycen = .51 # use .51 to break symmetry
elif nexp == 14: # very thin unstable vortex sheet
vortamp = 16.
gwidth = .025
xcen = .5
ycen = .5
elif nexp == 15: # very thin unstable vortex sheet
vortamp = 16.
gwidth = .025
xcen = .53 # use .53 to break symmetry
ycen = .51# use .51 to break symmetry
else:
print(nexp," not valid")
if | |
boolean indicating if the buffer data should be updated even
if `scan_time` is <= that in the database.
"""
self._acquire_lock()
try:
# TODO: Canonicalize path (or assert that it is canonicalized)
dir, base = split(buf.path)
# Get the current data, if any.
res_index = self.load_index(dir, "res_index", {})
res_index_has_changed = False
blob_index = self.load_index(dir, "blob_index", {})
blob_index_has_changed = False
is_hits_from_lpath_lang = self.lang in self.db.import_everything_langs
if is_hits_from_lpath_lang:
# TODO: Not sure {} for a default is correct here.
toplevelname_index = self.load_index(
dir, "toplevelname_index", {})
toplevelname_index_has_changed = False
try:
(old_scan_time, old_scan_error, old_res_data) = res_index[base]
except KeyError: # adding a new entry
(old_scan_time, old_scan_error, old_res_data) = None, None, {}
else: # updating an existing entry
if not skip_scan_time_check and scan_time is not None \
and scan_time <= old_scan_time:
log.debug("skipping db update for '%s': %s < %s and "
"no 'skip_scan_time_check' option",
base, scan_time, old_scan_time)
return
log.debug("update from %s buf '%s'", buf.lang, buf.path)
# Parse the tree and get the list of blobnames.
# res_data: {blobname -> ilk -> toplevelnames}
new_res_data = {}
new_blobnames_and_blobs = []
if scan_tree:
for blob in scan_tree[0]:
lang = blob.get("lang")
assert blob.get("lang") == self.lang, "'%s' != '%s' (blob %r)" % (
blob.get("lang"), self.lang, blob)
blobname = blob.get("name")
toplevelnames_from_ilk = new_res_data.setdefault(
blobname, {})
for toplevelname, elem in blob.names.items():
if "__file_local__" in elem.get("attributes", "").split():
# don't put file local things in toplevel names
continue
ilk = elem.get("ilk") or elem.tag
if ilk not in toplevelnames_from_ilk:
toplevelnames_from_ilk[ilk] = set([toplevelname])
else:
toplevelnames_from_ilk[ilk].add(toplevelname)
new_blobnames_and_blobs.append((blobname, blob))
# Determine necessary changes to res_index.
if scan_error:
if (scan_time != old_scan_time
or scan_error != old_scan_error):
res_index[base] = (scan_time, scan_error,
old_res_data)
res_index_has_changed = True
else:
# Only consider new blobs if there wasn't a scan error.
# I.e., we want to preserve the last good scan info.
if (scan_time != old_scan_time
or scan_error != old_scan_error
or new_res_data != old_res_data):
res_index[base] = (scan_time, scan_error,
new_res_data)
res_index_has_changed = True
if is_hits_from_lpath_lang:
if new_res_data != old_res_data:
toplevelname_index.update(base,
old_res_data, new_res_data)
toplevelname_index_has_changed = True
# Determine necessary changes to blob_index and the
# dbfiles and then make them.
dbfile_changes = []
for blobname, blob in new_blobnames_and_blobs:
if blobname in old_res_data:
dbfile_changes.append(("update", blobname, blob))
else:
dbfile_changes.append(("add", blobname, blob))
for blobname in old_res_data:
if blobname not in new_res_data:
dbfile_changes.append(("remove", blobname, None))
dhash = self.dhash_from_dir(dir)
for action, blobname, blob in dbfile_changes:
if action == "add":
dbfile = self.db.bhash_from_blob_info(
buf.path, self.lang, blobname)
blob_index[blobname] = dbfile
blob_index_has_changed = True
dbdir = join(self.base_dir, dhash)
if not exists(dbdir):
self._mk_dbdir(dbdir, dir)
# XXX What to do on write failure?
log.debug("fs-write: %s blob '%s/%s'",
self.lang, dhash, dbfile)
if blob.get("src") is None:
blob.set(
"src", buf.path) # for defns_from_pos() support
ET.ElementTree(blob).write(join(dbdir, dbfile+".blob"))
elif action == "remove":
dbfile = blob_index[blobname]
del blob_index[blobname]
blob_index_has_changed = True
# XXX What to do on removal failure?
log.debug("fs-write: remove %s blob '%s/%s'",
self.lang, dhash, dbfile)
os.remove(join(self.base_dir, dhash, dbfile+".blob"))
elif action == "update":
# Try to only change the dbfile on disk if it is
# different.
s = BytesIO()
if blob.get("src") is None:
blob.set(
"src", buf.path) # for defns_from_pos() support
ET.ElementTree(blob).write(s)
new_dbfile_content = s.getvalue()
dbfile = blob_index[blobname]
dbpath = join(self.base_dir, dhash, dbfile+".blob")
# PERF: Might be nice to cache the new dbfile
# content for the next time this resource is
# updated. For files under edit this will be
# common. I.e. just for the "editset".
try:
fin = open(dbpath, 'rb')
except (OSError, IOError) as ex:
# Technically if the dbfile doesn't exist, this
# is a sign of database corruption. No matter
# though (for this blob anyway), we are about to
# replace it.
old_dbfile_content = None
else:
try:
old_dbfile_content = fin.read()
finally:
fin.close()
if new_dbfile_content != old_dbfile_content:
if not exists(dirname(dbpath)):
self._mk_dbdir(dirname(dbpath), dir)
# XXX What to do if fail to write out file?
log.debug("fs-write: %s blob '%s/%s'",
self.lang, dhash, dbfile)
fout = open(dbpath, 'wb')
try:
fout.write(new_dbfile_content)
finally:
fout.close()
if res_index_has_changed:
self.changed_index(dir, "res_index")
if blob_index_has_changed:
self.changed_index(dir, "blob_index")
if is_hits_from_lpath_lang and toplevelname_index_has_changed:
self.changed_index(dir, "toplevelname_index")
finally:
self._release_lock()
# TODO Database.clean() should remove dirs that have no
# blob_index entries.
def _mk_zone_skel(self):
log.debug("fs-write: mkdir '%s'", self.base_dir)
os.makedirs(self.base_dir)
log.debug("fs-write: create 'lang'")
fout = codecs.open(join(self.base_dir, "lang"), 'wb', 'utf-8')
try:
fout.write(self.lang)
finally:
fout.close()
def _mk_dbdir(self, dbdir, dir):
if not exists(self.base_dir):
self._mk_zone_skel()
log.debug("fs-write: mkdir '%s'", dbdir[len(self.base_dir)+1:])
os.mkdir(dbdir)
log.debug("fs-write: '%s/path'", dbdir[len(self.base_dir)+1:])
fout = codecs.open(join(dbdir, "path"), 'wb', 'utf-8')
try:
fout.write(dir)
finally:
fout.close()
def load_blob(self, dbsubpath):
"""This must be called with the lock held."""
log.debug("TODO: LangZone.load_blob: add blob caching!")
log.debug("fs-read: load %s blob '%s'", self.lang, dbsubpath)
dbpath = join(self.base_dir, dbsubpath+".blob")
blob = ET.parse(dbpath).getroot()
for hook_handler in self._hook_handlers:
try:
hook_handler.post_db_load_blob(blob)
except:
log.exception("error running hook: %r.post_db_load_blob(%r)",
hook_handler, blob)
return blob
def load_index(self, dir, index_name, default=None):
"""Get the indicated index.
"dir" is the dir path this index represents.
"index_name" is the name of the index.
"default" (default None) indicate the value to return for
the index if the index doesn't exist. If not set (or
None) then an OSError is raised if the index doesn't exist.
The index is loaded from a pickle on disk, if necessary, put
into the cache system, and returned.
This must be called with the lock held.
"""
self._acquire_lock()
try:
dbsubpath = join(self.db.dhash_from_dir(dir), index_name)
# If index path is in the cache: return it, update its atime.
now = time.time()
if dbsubpath in self._index_and_atime_from_dbsubpath:
log.debug(
"cache-read: load %s index '%s'", self.lang, dbsubpath)
self._index_and_atime_from_dbsubpath[dbsubpath][1] = now
return self._index_and_atime_from_dbsubpath[dbsubpath][0]
# Otherwise, load it.
log.debug("fs-read: load %s index '%s'", self.lang, dbsubpath)
dbpath = join(self.base_dir, dbsubpath)
index = self.db.load_pickle(dbpath, default)
if index_name == "toplevelname_index":
index = self.toplevelname_index_class(index)
self._index_and_atime_from_dbsubpath[dbsubpath] = [index, now]
return index
finally:
self._release_lock()
def changed_index(self, dir, index_name):
"""Note that we've changed this index (so it can be saved as
appropriate).
"""
self._acquire_lock()
try:
now = time.time()
dbsubpath = join(self.db.dhash_from_dir(dir), index_name)
self._index_and_atime_from_dbsubpath[dbsubpath][1] = now
self._is_index_dirty_from_dbsubpath[dbsubpath] = True
finally:
self._release_lock()
def save_index(self, dbsubpath, index):
if isinstance(index, self.toplevelname_index_class):
index = index.data
self.db.save_pickle(join(self.base_dir, dbsubpath), index)
def save(self):
self._acquire_lock()
try:
for dbsubpath in self._is_index_dirty_from_dbsubpath:
self.save_index(dbsubpath,
self._index_and_atime_from_dbsubpath[dbsubpath][0])
self._is_index_dirty_from_dbsubpath = {}
finally:
self._release_lock()
def cull_mem(self):
"""Drop indeces and tree from cache that have not been
accessed in over 5 minutes.
To attempt to keep memory consumption under control we want to
ensure we don't keep everything cached from the db in memory
until process completion. The plan is to have a thread
periodically cull memory.
"""
# TOTEST: Does Python/Komodo actually release this memory or
# are we kidding ourselves?
log.debug("LangZone: culling memory")
TIME_SINCE_ACCESS = 300.0 # 5 minutes since last access
self._acquire_lock()
try:
N = 30
if len(self._index_and_atime_from_dbsubpath) < N:
# Too few indeces in memory to bother culling.
return
now = time.time()
for dbsubpath, (index, atime) \
in list(self._index_and_atime_from_dbsubpath.items()):
if now - atime > TIME_SINCE_ACCESS:
if dbsubpath in self._is_index_dirty_from_dbsubpath:
self.save_index(dbsubpath, index)
del self._is_index_dirty_from_dbsubpath[dbsubpath]
del self._index_and_atime_from_dbsubpath[dbsubpath]
except:
log.exception("Exception culling memory")
finally:
self._release_lock()
# XXX Database.clean(): Go through each $lang/dir/res_index and
# clean out files in the index but that don't actually exist
# anymore.
# XXX Database.clean(): drop memory for indeces that are quite
# old (say haven't been accessed in 20 minutes).
# XXX Database.check(): Shouldn't have too many cached indeces in
# memory. How old is the oldest one? Estimate memory size
# used by all loaded indeces?
# TODO: When a directory no longer exists on the filesystem - should we
# 1) remove the db data, or
# 2) mark it as expired.
# Option 2 would work better for (network) mounted filesystems, as it
# could just be an intermittent issue.
def clean(self):
"""Clean out any expired/old codeintel information."""
base_dir = self.base_dir
if not exists(base_dir):
return
for d in os.listdir(base_dir):
path_path = join(base_dir, d, "path")
if not exists(path_path):
continue
path = codecs.open(path_path, encoding="utf-8").read()
if not exists(path):
# Referenced directory no longer exists - so remove the db
# info.
log.debug("clean:: scanned directory no longer exists: %r",
path)
rmdir(join(base_dir, d))
def get_lib(self, name, dirs):
"""
Dev Notes:
We make a lib for a particular sequence of dirs a singleton because:
1. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.