code stringlengths 101 5.91M |
|---|
class FiniteFieldPointEnumerator(NaiveFinitePointEnumerator):
_method
def multiplicative_generator(self):
return self.ring.multiplicative_generator()
_method
def multiplicative_group_order(self):
return self.ring.multiplicative_generator().multiplicative_order()
_method
def root_generator(self, n):
N = self.multiplicative_group_order()
k = (N // gcd(n, N))
return (self.multiplicative_generator() ** k)
def _Chow_group_free_generators(self):
result = []
null_space = self.rays().matrix().integer_kernel()
for ker in null_space.basis():
phases = tuple(((self.multiplicative_generator() ** exponent) for exponent in ker))
result.append(phases)
return tuple(sorted(result))
def _Chow_group_torsion_generators(self):
if self.fan.is_smooth():
return ()
image = self.rays().column_matrix().image()
torsion = image.saturation().quotient(image)
result = set()
for t in torsion.gens():
t_lift = t.lift()
root = self.root_generator(t.order())
if (root == 1):
continue
phases = tuple(((root ** exponent) for exponent in t_lift))
result.add(phases)
assert (tuple((self.ring.one() for r in self.rays())) not in result)
return tuple(sorted(result))
def log(self, z):
base = self.multiplicative_generator()
return tuple((zi.log(base) for zi in z))
def exp(self, powers):
base = self.multiplicative_generator()
return tuple(((base ** i) for i in powers))
_method
def rescaling_log_generators(self):
free = self._Chow_group_free_generators()
tors = self._Chow_group_torsion_generators()
result = map(self.log, (free + tors))
return tuple(sorted(result))
def cone_points_iter(self):
from sage.matrix.constructor import matrix, block_matrix, identity_matrix
from sage.rings.integer_ring import ZZ
nrays = len(self.rays())
N = self.multiplicative_group_order()
log_generators = self.rescaling_log_generators()
log_relations = block_matrix(2, 1, [matrix(ZZ, len(log_generators), nrays, log_generators), (N * identity_matrix(ZZ, nrays))])
for cone in self.cone_iter():
nrays = (self.fan().nrays() + len(self.fan().virtual_rays()))
nonzero_coordinates = [i for i in range(nrays) if (i not in cone.ambient_ray_indices())]
log_relations_nonzero = log_relations.matrix_from_columns(nonzero_coordinates)
image = log_relations_nonzero.image()
cokernel = image.ambient_module().quotient(image)
(yield (cone, nonzero_coordinates, cokernel))
def __iter__(self):
nrays = len(self.rays())
for (cone, nonzero_coordinates, cokernel) in self.cone_points_iter():
zero = ([self.ring.zero()] * nrays)
for v in cokernel:
z_nonzero = self.exp(v.lift())
z = copy(zero)
for (i, value) in zip(nonzero_coordinates, z_nonzero):
z[i] = value
(yield tuple(z))
def cardinality(self):
n = 0
for (cone, nonzero_coordinates, cokernel) in self.cone_points_iter():
n += cokernel.cardinality()
return n |
class Blip2Base(BaseModel):
def init_tokenizer(cls, truncation_side='right'):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', truncation_side=truncation_side)
tokenizer.add_special_tokens({'bos_token': '[DEC]'})
return tokenizer
def maybe_autocast(self, dtype=torch.float16):
enable_autocast = (self.device != torch.device('cpu'))
if enable_autocast:
return torch.cuda.amp.autocast(dtype=dtype)
else:
return contextlib.nullcontext()
def init_Qformer(cls, num_query_token, vision_width, cross_attention_freq=2):
encoder_config = BertConfig.from_pretrained('bert-base-uncased')
encoder_config.encoder_width = vision_width
encoder_config.add_cross_attention = True
encoder_config.cross_attention_freq = cross_attention_freq
encoder_config.query_length = num_query_token
Qformer = BertLMHeadModel.from_pretrained('bert-base-uncased', config=encoder_config)
query_tokens = nn.Parameter(torch.zeros(1, num_query_token, encoder_config.hidden_size))
query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)
return (Qformer, query_tokens)
def init_vision_encoder(self, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision):
assert (model_name in ['eva_clip_g', 'eva2_clip_L', 'clip_L']), 'vit model must be eva_clip_g, eva2_clip_L or clip_L'
if (model_name == 'eva_clip_g'):
visual_encoder = create_eva_vit_g(img_size, drop_path_rate, use_grad_checkpoint, precision)
elif (model_name == 'clip_L'):
visual_encoder = create_clip_vit_L(img_size, use_grad_checkpoint, precision)
ln_vision = LayerNorm(visual_encoder.num_features)
self.vit_name = model_name
return (visual_encoder, ln_vision)
def load_from_pretrained(self, url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
checkpoint = torch.load(cached_file, map_location='cpu')
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location='cpu')
else:
raise RuntimeError('checkpoint url or path is invalid')
state_dict = checkpoint['model']
msg = self.load_state_dict(state_dict, strict=False)
logging.info(('load checkpoint from %s' % url_or_filename))
return msg
def get_optimizer_params(self, weight_decay, lr_scale=1):
if (self.vit_name == 'eva_clip_g'):
vit_num_layers = self.visual_encoder.get_num_layer()
lr_scales = list(((lr_scale ** ((vit_num_layers + 1) - i)) for i in range((vit_num_layers + 2))))
parameter_group_names = {}
parameter_group_vars = {}
for (name, param) in self.named_parameters():
if (not param.requires_grad):
continue
if ((len(param.shape) == 1) or name.endswith('.bias')):
group_name = 'no_decay'
this_weight_decay = 0.0
else:
group_name = 'decay'
this_weight_decay = weight_decay
if ('visual_encoder' in name):
layer_id = self.visual_encoder.get_num_layer(name.replace('visual_encoder.', ''))
group_name = ('vit_layer_%d_%s' % (layer_id, group_name))
else:
layer_id = None
if (group_name not in parameter_group_names):
if (layer_id is not None):
scale = lr_scales[layer_id]
else:
scale = 1
parameter_group_names[group_name] = {'weight_decay': this_weight_decay, 'params': [], 'lr_scale': scale}
parameter_group_vars[group_name] = {'weight_decay': this_weight_decay, 'params': [], 'lr_scale': scale}
parameter_group_vars[group_name]['params'].append(param)
parameter_group_names[group_name]['params'].append(name)
optim_params = list(parameter_group_vars.values())
return optim_params
else:
return super().get_optimizer_params(weight_decay, lr_scale)
def _lemmatize(self, answers):
def apply(answer):
doc = self.lemmatizer(answer)
words = []
for token in doc:
if (token.pos_ in ['NOUN', 'VERB']):
words.append(token.lemma_)
else:
words.append(token.text)
answer = ' '.join(words)
return answer
return [apply(answer) for answer in answers]
def lemmatizer(self):
if (self._lemmatizer is None):
try:
import spacy
self._lemmatizer = spacy.load('en_core_web_sm')
except ImportError:
logging.error('\n Please install spacy and en_core_web_sm model to apply lemmatization.\n python -m spacy download en_core_web_sm\n OR\n import spacy.cli\n spacy.cli.download("en_core_web_sm")\n ')
exit(1)
return self._lemmatizer |
.parametrize('inspecs', inspecs_params())
.parametrize('shared', [True, False])
def test_activation(inspecs, shared, nnabla_opts):
fb = FunctionBenchmark(PF.prelu, inspecs, [1], {}, nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer) |
def read_mask_file(filepath, out):
f = open(filepath, 'rb')
dat = zlib.decompress(f.read())
out[:] = np.frombuffer(dat, dtype=bool).reshape((480, 480))
f.close() |
class Hovmoller():
def __init__(self, kwrgs_load: dict=None, slice_dates: tuple=None, event_dates: pd.DatetimeIndex=None, lags_prior: int=None, lags_posterior: int=None, standardize: bool=False, seldates: tuple=None, rollingmeanwindow: int=None, name=None, zoomdim: tuple=None, ignore_overlap_events: bool=False, t_test: bool=True):
self.kwrgs_load = kwrgs_load.copy()
self.slice_dates = slice_dates
self.event_dates = event_dates
self.seldates = seldates
self.standardize = standardize
self.rollingmeanwindow = rollingmeanwindow
self.zoomdim = zoomdim
self.ignore_overlap_events = ignore_overlap_events
self.t_test = t_test
if ((slice_dates is None) and (event_dates is None)):
raise ValueError('No dates to select or slice, please define slice_dates or events dates')
if (standardize and (seldates is None)):
raise ValueError('Give seldates over which the standard deviation is calculated.')
if (slice_dates is not None):
print('slice dates not supported yet')
if (lags_prior is None):
lags_prior = 10
if (lags_posterior is None):
lags_posterior = 1
self.lags_prior = lags_prior
self.lags_posterior = lags_posterior
self.lags = list(range((- abs(self.lags_prior)), (self.lags_posterior + 1)))
if ((self.rollingmeanwindow is not None) and (self.seldates is None)):
raise Exception('You cannot do a rolling mean over only event dates, specify over which dates you want to do a rolling mean, after that you the dates will be selected from the smoothened array')
self.event_lagged = np.array([(event_dates + pd.Timedelta(f'{l}d')) for l in self.lags])
if (self.ignore_overlap_events == False):
if (np.unique(self.event_lagged).size != self.event_lagged.size):
raise Exception('There are overlapping dates when shifting events dates with lags')
self.lag_axes = np.zeros_like(self.event_lagged, dtype=int)
for (i, l) in enumerate(self.lags):
self.lag_axes[i] = np.repeat(l, self.event_dates.size)
self.name = name
self._check_dates()
return
def get_HM_data(self, filepath, dim='latitude'):
self.filepath = filepath
self.dim = dim
if (self.seldates is not None):
self.kwrgs_load['seldates'] = self.seldates_ext
self.ds_seldates = functions_pp.import_ds_timemeanbins(self.filepath, **self.kwrgs_load)
ds_name = self.ds_seldates.name
if (self.rollingmeanwindow is not None):
self.ds = self.ds_seldates.rolling(time=self.rollingmeanwindow).mean()
else:
self.ds = self.ds_seldates
self.std = self.ds.sel(time=self.seldates).std(dim='time')
if (self.t_test == True):
self.ds_all = self.ds.sel(time=self.seldates)
self.ds = self.ds.sel(time=np.concatenate(self.event_lagged))
else:
self.kwrgs_load['seldates'] = np.concatenate(self.event_lagged)
self.ds = functions_pp.import_ds_timemeanbins(self.filepath, **self.kwrgs_load)
ds_name = self.ds.name
if (self.name is None):
self.name = ds_name
if ('units' in list(self.ds.attrs.keys())):
self.units = self.ds.attrs['units']
if self.standardize:
self.units = 'std [-]'
self.ds = (self.ds / self.std)
if (self.event_dates is not None):
self.xarray = self.ds.copy().rename({'time': 'lag'})
self.xarray = self.xarray.assign_coords(lag=np.concatenate(self.lag_axes))
else:
self.xarray = self.ds
if (self.zoomdim is not None):
xarray_w = self.xarray.sel(latitude=slice(self.zoomdim[0], self.zoomdim[1]))
xarray_w = functions_pp.area_weighted(xarray_w)
else:
xarray_w = functions_pp.area_weighted(self.xarray)
xarray_meandim = xarray_w.mean(dim=dim)
self.xr_HM = xarray_meandim.groupby('lag').mean()
if self.t_test:
full = (self.ds_all / self.std).mean(dim=dim)
self.xr_mask = self.xr_HM.astype(bool).copy()
pvals = np.zeros_like(self.xr_mask.values, dtype=float)
for (i, lag) in enumerate(self.xr_mask.lag.values):
sample = xarray_meandim.sel(lag=lag)
(T, p, mask) = Welchs_t_test(sample, full, equal_var=False)
pvals[i] = p
self.xr_mask.values = pvals
def quick_HM_plot(self):
if (hasattr(self, 'xr_HM') == False):
print('first run get_HM_data(filepath)')
else:
self.xr_HM.plot()
def plot_HM(self, main_title_right: str=None, ytickstep=5, lattickstep: int=3, clevels: np.ndarray=None, clim: Union[(str, tuple)]='relaxed', cmap=None, drawbox: list=None, save: bool=False, height_ratios: list=[1, 6], fontsize: int=14, alpha: float=0.05, lag_composite: int=0, fig_path: str=None):
if (self.event_dates is not None):
vtimes = self.xr_HM.lag.values
else:
vtimes = self.xr_HM.time.values.astype('datetime64[ms]').astype('O')
if (cmap is None):
cmap = plt.cm.RdBu_r
fig = plt.figure(figsize=(10, 13))
gs = gridspec.GridSpec(nrows=2, ncols=1, height_ratios=height_ratios, hspace=0.03)
dim = [d for d in self.xr_HM.dims if (d != 'lag')][0]
if (dim == 'longitude'):
x_tick_labels = [u'0E', u'90E', u'180E', u'90W', u'0E']
elif (dim == 'latitude'):
x_ticks = np.unique(np.round(self.xarray.latitude.values.astype(int), (- 1)))
x_tick_labels = [u'{}N'.format(coord) for coord in x_ticks]
if (clevels is None):
class MidpointNormalize(mcolors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
mcolors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
(x, y) = ([self.vmin, self.midpoint, self.vmax], [0, 0.5, 1])
return np.ma.masked_array(np.interp(value, x, y))
if (clim == 'relaxed'):
vmin_ = np.nanpercentile(self.xr_HM, 1)
vmax_ = np.nanpercentile(self.xr_HM, 99)
elif (type(clim) == tuple):
(vmin_, vmax_) = clim
else:
vmin_ = (self.xr_HM.min() - 0.01)
vmax_ = (self.xr_HM.max() + 0.01)
vmin = np.round(float(vmin_), decimals=2)
vmax = np.round(float(vmax_), decimals=2)
clevels = np.linspace((- max(abs(vmin), vmax)), max(abs(vmin), vmax), 17)
norm = MidpointNormalize(midpoint=0, vmin=clevels[0], vmax=clevels[(- 1)])
ticksteps = 4
else:
vmin_ = np.nanpercentile(self.xr_HM, 1)
vmax_ = np.nanpercentile(self.xr_HM, 99)
vmin = np.round(float(vmin_), decimals=2)
vmax = np.round(float(vmax_), decimals=2)
clevels = clevels
norm = None
ticksteps = 1
ax1 = fig.add_subplot(gs[(0, 0)], projection=ccrs.PlateCarree(central_longitude=180))
selbox = list(self.kwrgs_load['selbox'])
selbox[1] = (selbox[1] - 0.1)
ax1.add_feature(cfeature.COASTLINE.with_scale('50m'))
ax1.add_feature(cfeature.LAKES.with_scale('50m'), color='black', linewidths=0.5)
xr_events = self.xarray.sel(lag=lag_composite).mean(dim='lag')
lon = xr_events.longitude
if (abs((lon[(- 1)] - 360)) <= (lon[1] - lon[0])):
xr_events = plot_maps.extend_longitude(xr_events)
xr_events.plot.contourf(levels=clevels, cmap=cmap, transform=ccrs.PlateCarree(), ax=ax1, add_colorbar=False)
ax1.set_extent(selbox, ccrs.PlateCarree(central_longitude=180))
y_ticks = np.unique(np.round(xr_events.latitude, decimals=(- 1)))[::lattickstep]
ax1.set_yticks(y_ticks.astype(int))
ax1.set_yticklabels([u'{:.0f}N'.format(l) for l in y_ticks], fontdict={'fontsize': (fontsize - 2)})
ax1.set_ylabel('Latitude', fontdict={'fontsize': fontsize})
ax1.grid(linestyle='dotted', linewidth=2)
if (self.zoomdim is not None):
xmin = float(min(self.xr_HM[dim]))
xmax = float(max(self.xr_HM[dim]))
ax1.hlines(self.zoomdim[0], xmin, xmax, transform=ccrs.PlateCarree())
ax1.hlines(self.zoomdim[1], xmin, xmax, transform=ccrs.PlateCarree())
if (drawbox is not None):
def get_ring(coords):
(west_lon, east_lon, south_lat, north_lat) = coords
lons_sq = [west_lon, west_lon, east_lon, east_lon]
lats_sq = [north_lat, south_lat, south_lat, north_lat]
ring = [LinearRing(list(zip(lons_sq, lats_sq)))]
return ring
ring = get_ring(drawbox)
ax1.add_geometries(ring, ccrs.PlateCarree(), facecolor='none', edgecolor='green', linewidth=2, linestyle='dashed')
title = f'Composite mean of {self.event_dates.size} events at lag={lag_composite}'
plt.title(title, loc='left')
if (main_title_right is not None):
plt.title(main_title_right, loc='right', fontdict={'fontsize': fontsize})
ax2 = fig.add_subplot(gs[(1, 0)])
ax2.invert_yaxis()
ax2.hlines(y=0, xmin=0, xmax=357.5, linewidth=1)
cf = self.xr_HM.plot.contourf(levels=clevels, cmap=cmap, ax=ax2, add_colorbar=False)
self.xr_HM.plot.contour(clevels=clevels, colors='k', linewidths=1, ax=ax2)
if self.t_test:
self.xr_mask.plot.contourf(ax=ax2, levels=[0, alpha, 1], hatches=['...', ''], colors='none', add_colorbar=False)
cbar = plt.colorbar(cf, orientation='horizontal', pad=0.04, aspect=50, extendrect=True, norm=norm, ticks=clevels[::ticksteps])
cbar.ax.tick_params(labelsize=fontsize)
if hasattr(self, 'units'):
cbar.set_label(self.units)
ax2.set_xticks([0, 90, 180, 270, 357.5])
ax2.set_xticklabels(x_tick_labels, fontdict={'fontsize': (fontsize - 2)})
ax2.set_xlabel('')
if (self.event_dates is not None):
y_ticks = list(vtimes[::ytickstep])
ax2.set_yticks(y_ticks)
ax2.set_yticklabels(y_ticks, fontdict={'fontsize': fontsize})
ax2.set_ylabel('lag [in days]', fontdict={'fontsize': fontsize})
ax2.grid(linestyle='dotted', linewidth=2)
if (self.name is not None):
plt.title(self.name, loc='left', fontsize=fontsize)
if (self.slice_dates != None):
plt.title('Time Range: {0:%Y%m%d %HZ} - {1:%Y%m%d %HZ}'.format(vtimes[0], vtimes[(- 1)]), loc='right', fontsize=fontsize)
if (save or (fig_path is not None)):
fname = ('_'.join(np.array(self.kwrgs_load['selbox']).astype(str)) + f'_w{self.rollingmeanwindow}_std{self.standardize}')
fname = ((self.name + '_') + fname)
if (fig_path is None):
fig_path = os.path.join(functions_pp.get_download_path(), fname)
plt.savefig(fig_path, bbox_inches='tight')
def _check_dates(self):
ev_lag = pd.to_datetime(self.event_lagged.flatten())
mde = [int('{:02d}{:02d}'.format(d.month, d.day)) for d in ev_lag]
if (type(self.seldates) is pd.DatetimeIndex):
mds = [int('{:02d}{:02d}'.format(d.month, d.day)) for d in self.seldates]
if (min(mde) < min(mds)):
print(f'An event date minus the max lag {min(mde)} is not in seldates {min(mds)}, adapting startdates of seldates')
start_date = f'{self.seldates[0].year}-{pd.to_datetime(ev_lag[np.argmin(mde)]).month}-{pd.to_datetime(ev_lag[np.argmin(mde)]).day}'
else:
start_date = f'{self.seldates[0].year}-{self.seldates[0].month}-{self.seldates[0].day}'
if (max(mde) > max(mds)):
print(f'An event date plus the max lag {max(mde)} is not in seldates {max(mds)}, adapting enddate of seldates')
end_date = f'{self.seldates[0].year}-{pd.to_datetime(ev_lag[np.argmax(mde)]).month}-{pd.to_datetime(ev_lag[np.argmax(mde)]).day}'
else:
end_date = f'{self.seldates[0].year}-{self.seldates[(- 1)].month}-{self.seldates[(- 1)].day}'
self.seldates_ext = core_pp.make_dates(pd.date_range(start_date, end_date), np.unique(self.seldates.year)) |
class GraphTransformerNet(nn.Module):
def __init__(self, net_params):
super().__init__()
num_atom_type = net_params['num_atom_type']
num_bond_type = net_params['num_bond_type']
hidden_dim = net_params['hidden_dim']
num_heads = net_params['n_heads']
out_dim = net_params['out_dim']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['n_layers']
self.layer_norm = net_params['layer_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.edge_feat = net_params['edge_feat']
self.lap_pos_enc = net_params['lap_pos_enc']
self.wl_pos_enc = net_params['wl_pos_enc']
max_wl_role_index = 37
if self.lap_pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_lap_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
if self.wl_pos_enc:
self.embedding_wl_pos_enc = nn.Embedding(max_wl_role_index, hidden_dim)
self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)
if self.edge_feat:
self.embedding_e = nn.Embedding(num_bond_type, hidden_dim)
else:
self.embedding_e = nn.Linear(1, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphTransformerLayer(hidden_dim, hidden_dim, num_heads, dropout, self.layer_norm, self.batch_norm, self.residual) for _ in range((n_layers - 1))])
self.layers.append(GraphTransformerLayer(hidden_dim, out_dim, num_heads, dropout, self.layer_norm, self.batch_norm, self.residual))
def forward(self, g, h, e, h_lap_pos_enc=None, h_wl_pos_enc=None):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
if self.lap_pos_enc:
h_lap_pos_enc = self.embedding_lap_pos_enc(h_lap_pos_enc.float())
h = (h + h_lap_pos_enc)
if self.wl_pos_enc:
h_wl_pos_enc = self.embedding_wl_pos_enc(h_wl_pos_enc)
h = (h + h_wl_pos_enc)
e = self.embedding_e(e)
for conv in self.layers:
(h, e) = conv(g, h, e)
g.ndata['h'] = h
g.edata['e'] = e
return (h, e) |
def make_sequence_example(inputs, labels, genders):
input_features = [tf.train.Feature(float_list=tf.train.FloatList(value=input_)) for input_ in inputs]
label_features = [tf.train.Feature(float_list=tf.train.FloatList(value=label)) for label in labels]
gender_features = [tf.train.Feature(float_list=tf.train.FloatList(value=genders))]
feature_list = {'inputs': tf.train.FeatureList(feature=input_features), 'labels': tf.train.FeatureList(feature=label_features), 'genders': tf.train.FeatureList(feature=gender_features)}
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
return tf.train.SequenceExample(feature_lists=feature_lists) |
class IntBinopNode(NumBinopNode):
def c_types_okay(self, type1, type2):
return ((type1.is_int or type1.is_enum) and (type2.is_int or type2.is_enum)) |
def format_assignments(assignments, num_workers=1, log_every=1000, verbose=False):
clustering_types = sorted(list(assignments[0].keys()))
format_assignment = partial(_format_assignment, clustering_types=clustering_types)
assignments = list(multiprocess(format_assignment, assignments, num_workers, 'formatting assignments samples', log_every=log_every, verbose=verbose))
assignments = np.array(assignments)
return (assignments, clustering_types) |
def sim_ball_traj(init_pos=np.zeros(3), init_vel=np.array([(- 1.3), 4.5, 2.2]), lin_air_drag=np.array([0.0, 0.0, 0.0]), quad_air_drag=0.0, bounce_fac=np.array([0.9, 0.9, 0.8]), deltaT=0.005, T=120, max_bounces=None):
x = init_pos
xd = init_vel
obs = []
vel = []
time = []
is_bounce = []
bounce_time = []
bounces_left = (max_bounces if (max_bounces is not None) else 1000000)
for i in range(T):
t = (deltaT * i)
obs.append(x)
vel.append(xd)
time.append(t)
a = ((((- lin_air_drag) * xd) - ((quad_air_drag * np.linalg.norm(xd)) * xd)) + np.array([0, 0, (- 9.8)]))
x = ((x + (xd * deltaT)) + ((0.5 * (deltaT ** 2)) * a))
xd = (xd + (deltaT * a))
if ((x[2] < 0.0) and (bounces_left > 0)):
is_bounce.append(True)
bounce_time.append(t)
x[2] *= (- 1.0)
xd[2] *= (- 1.0)
xd = (xd * bounce_fac)
bounces_left -= 1
else:
is_bounce.append(False)
return (np.array(time), np.array(obs), np.array(vel), is_bounce, bounce_time) |
def _polar_graph(m, q, g, intersection_size=None):
from sage.libs.gap.libgap import libgap
from itertools import combinations
W = libgap.FullRowSpace(libgap.GF(q), m)
B = libgap.Elements(libgap.Basis(W))
V = libgap.Orbit(g, B[0], libgap.OnLines)
gp = libgap.Action(g, V, libgap.OnLines)
s = libgap.Subspace(W, [B[i] for i in range((m // 2))])
sp = [libgap.Elements(libgap.Basis(x))[0] for x in libgap.Elements(s.Subspaces(1))]
h = libgap.Set([libgap.Position(V, x) for x in sp])
L = libgap.Orbit(gp, h, libgap.OnSets)
if (intersection_size is None):
G = Graph()
for x in L:
G.add_edges(combinations(x, 2))
return G
else:
return Graph([L, (lambda i, j: (libgap.Size(libgap.Intersection(i, j)) == intersection_size))], loops=False) |
class CategoryEncoder():
def __init__(self, category: List[str]) -> None:
self.category = list(sorted(set(category)))
def __len__(self) -> int:
return len(self.category)
def encode(self, label: str) -> int:
return self.category.index(label)
def decode(self, index: int) -> str:
return self.category[index] |
def convert(name, in_dir, out_dir, resolution, skip_existing):
out_name = f'{name[0]}/{name}'
out_filename = (out_dir / f'{out_name}.json')
if (skip_existing and out_filename.is_file()):
return
music = muspy.read(((in_dir / name[0]) / f'{name}.mid'))
adjust_resolution(music, resolution)
end_time = music.get_end_time()
if ((end_time > ((resolution * 4) * 2000)) or (end_time < ((resolution * 4) * 10))):
return
out_filename.parent.mkdir(exist_ok=True, parents=True)
music.save(out_filename)
return out_name |
def validate_strategy_specs(specs: Dict[(str, StrategySpec)]):
for (rid, spec) in specs.items():
if (len(spec) < 1):
raise ValueError(f'Empty spec for runtime_id={rid}')
expected_prob_list = spec.meta_data.get('prob_list', ([(1 / len(spec))] * len(spec)))
if (expected_prob_list is None):
raise ValueError(f'donot give an empty prob list explictly for runtime_id={rid}.')
if (not np.isclose(sum(expected_prob_list), 1.0)):
raise ValueError(f'The summation of prob list for runtime_id={rid} shoud be close to 1.: {expected_prob_list}.') |
.parametrize('func', [ak.str.is_alnum, ak.str.is_alpha, ak.str.is_ascii, ak.str.is_decimal, ak.str.is_digit, ak.str.is_lower, ak.str.is_numeric, ak.str.is_printable, ak.str.is_space, ak.str.is_title, ak.str.is_upper, ak.str.capitalize, ak.str.lower, ak.str.upper, ak.str.reverse, ak.str.swapcase, ak.str.title, ak.str.ltrim_whitespace, ak.str.rtrim_whitespace, ak.str.trim_whitespace, ak.str.split_whitespace])
def test_string_operations_unary(func):
pytest.importorskip('pyarrow')
assert (func([['hello', 'world!'], [], ["it's a beautiful day!"]], highlevel=True).attrs == {})
assert (func([['hello', 'world!'], [], ["it's a beautiful day!"]], highlevel=True, attrs=SOME_ATTRS).attrs is SOME_ATTRS)
array = ak.Array([['hello', 'world!'], [], ["it's a beautiful day!"]], attrs=SOME_ATTRS)
assert (func(array, highlevel=True).attrs is SOME_ATTRS)
assert (func(array, highlevel=True, attrs=OTHER_ATTRS).attrs is OTHER_ATTRS) |
class SmoothTriangle(Triangle):
def __init__(self, a, b, c, da, db, dc, color=0):
self._a = a
self._b = b
self._c = c
self._da = da
self._db = db
self._dc = dc
self._color = color
def str(self):
return ('%s %s %s %s %s %s %s' % (self._a, self._b, self._c, self._color, self._da, self._db, self._dc))
def get_normals(self):
return (self._da, self._db, self._dc) |
def run_chunks_node(node_rank, cfg):
(args, chunks, num_chunks) = cfg
args.node_rank = node_rank
chunks = chunks[node_rank]
if args.computation.load_async:
run_async(args, chunks, node_rank)
else:
for (i, chunk) in enumerate(chunks):
(num, chunk) = chunk
print('running chunk {}'.format(num))
(res, metas) = run_chunk(args, chunk)
pid = args.parent_pid
name = 'cache_{}_{}_{}'.format(pid, node_rank, i)
if args.save_cache_as_csvs:
_reduce_single_cache(args, name, res, metas)
else:
save_chunk_cache(args, node_rank, i, res, metas) |
_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack)
def test_multiple_ib_mixed():
x = ti.ndarray(float, (), needs_grad=True)
y = ti.ndarray(float, (), needs_grad=True)
def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()):
for j in range(2):
for i in range(3):
y[None] += x[None]
for i in range(3):
y[None] += x[None]
for k in range(2):
y[None] += x[None]
for i in range(3):
y[None] += x[None]
x[None] = 1.0
with ti.ad.Tape(y):
compute_y(x, y)
assert (y[None] == 30.0)
assert (x.grad[None] == 30.0) |
class GroupNorm(nn.Module):
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True):
super().__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.Tensor(num_channels))
self.bias = nn.Parameter(torch.Tensor(num_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.affine:
self.weight.data.fill_(1)
self.bias.data.zero_()
def forward(self, x):
return myF.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
def extra_repr(self):
return '{num_groups}, {num_channels}, eps={eps}, affine={affine}'.format(**self.__dict__) |
def run_selection(args, chunk):
(data, metas, node_rank, i) = chunk
print('running chunk {}_{}'.format(node_rank, i))
res = _run_selection(args, data)
save_chunk_cache(args, node_rank, i, res, metas) |
def test_pixel_decoder():
base_channels = 64
pixel_decoder_cfg = ConfigDict(dict(type='PixelDecoder', in_channels=[(base_channels * (2 ** i)) for i in range(4)], feat_channels=base_channels, out_channels=base_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU')))
self = build_plugin_layer(pixel_decoder_cfg)[1]
img_metas = [{}, {}]
feats = [torch.rand((2, (base_channels * (2 ** i)), (4 * (2 ** (3 - i))), (5 * (2 ** (3 - i))))) for i in range(4)]
(mask_feature, memory) = self(feats, img_metas)
assert (memory == feats[(- 1)]).all()
assert (mask_feature.shape == feats[0].shape) |
def test_optimize_2d():
with goos.OptimizationPlan() as plan:
x = goos.Variable([[1, 2], [3, 4]])
obj = ((goos.Norm((x - goos.Constant([[3, 2], [(- 4), 2]]))) ** 2) + 3)
goos.opt.scipy_minimize(obj, method='L-BFGS-B')
plan.run()
np.testing.assert_almost_equal(x.get().array, [[3, 2], [(- 4), 2]]) |
.parametrize('location, schema', ([(location, OBJECT_SCHEMA) for location in sorted(LOCATION_TO_CONTAINER)] + [('body', EMPTY_OBJECT_SCHEMA), ('body', ARRAY_SCHEMA), ('body', INTEGER_SCHEMA)]))
(data=st.data())
(deadline=None, suppress_health_check=SUPPRESSED_HEALTH_CHECKS, max_examples=MAX_EXAMPLES)
def test_top_level_strategy(data, location, schema):
if ((location != 'body') and (schema.get('type') == 'object')):
schema['additionalProperties'] = False
validate_schema(schema)
validator = Draft4Validator(schema)
schema = fast_deepcopy(schema)
instance = data.draw(negative_schema(schema, operation_name='GET /users/', location=location, media_type='application/json', custom_formats=get_default_format_strategies(), generation_config=GenerationConfig()))
assert (not validator.is_valid(instance))
if is_header_location(location):
assert is_valid_header(instance) |
def match_filtering(new_turn, ori_turn, sentences):
ori_turn_label_set = set()
for (slot, value) in ori_turn['turn_label']:
ori_turn_label_set.add(((slot + '-') + value))
new_turn_label_set = set()
for (slot, value) in new_turn['turn_label']:
new_turn_label_set.add(((slot + '-') + value))
if (ori_turn_label_set == new_turn_label_set):
return ''
missing_values = []
for (domain_slot, value) in ori_turn['turn_label']:
if ((value in ori_turn['system_transcript']) and (value not in ori_turn['transcript'])):
missing_values.append(value)
value_list = []
best_sent = ''
for (domain_slot, value) in new_turn['turn_label']:
(domain, slot) = domain_slot.split('-')
if (slot == 'parking'):
value = slot
elif (slot == 'internet'):
value = 'wifi'
if (value not in missing_values):
value_list.append(value)
for sent in sentences:
flag = True
for value in value_list:
if (value not in sent):
flag = False
break
if flag:
best_sent = sent
break
return best_sent |
class BasicTransform(nn.Module):
def __init__(self, w_in, w_out, stride, norm, activation_class, _params):
super().__init__()
self.a = conv2d(w_in, w_out, 3, stride=stride)
self.a_bn = get_norm(norm, w_out)
self.a_af = activation_class()
self.b = conv2d(w_out, w_out, 3)
self.b_bn = get_norm(norm, w_out)
self.b_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x |
class Dataset():
def __init__(self, name, path=None, vec=None, args=None):
self.name = name
if ((args is not None) and (path is not None) and hasattr(args, 'data_dir')):
path = os.path.join(args.data_dir, path)
self.vec = (pickle.load(open(path, 'rb')) if (vec is None) else vec)
self.vec.entity_size = len(self.vec.entity2idx)
self.train_data = get_data_from_vec(self.vec, ['train'])
self.test_data = get_data_from_vec(self.vec, ['test'], sort=True)
self.dev_data = get_data_from_vec(self.vec, ['dev'], sort=True)
self.by_class = False
self.save_on_metric = 'accuracy'
self.output_size = self.vec.entity_size
self.trainer_type = 'qa'
self.bsize = 100
if ((args is not None) and hasattr(args, 'output_dir')):
self.basepath = args.output_dir
def display_stats(self):
stats = {}
stats['vocab_size'] = self.vec.vocab_size
stats['embed_size'] = self.vec.word_dim
stats['hidden_size'] = self.vec.hidden_size
if self.by_class:
y = np.unique(np.array(self.train_data.A), return_counts=True)
yt = np.unique(np.array(self.test_data.A), return_counts=True)
stats['train_size'] = list(zip(y[0].tolist(), y[1].tolist()))
stats['test_size'] = list(zip(yt[0].tolist(), yt[1].tolist()))
else:
stats['train_size'] = [('Overall', len(self.train_data.A))]
stats['test_size'] = [('Overall', len(self.test_data.A))]
outdir = 'datastats'
os.makedirs(('graph_outputs/' + outdir), exist_ok=True)
stats.update(self.train_data.get_stats('P'))
json.dump(stats, open((((('graph_outputs/' + outdir) + '/') + self.name) + '.txt'), 'w'))
print(stats) |
def config_parser():
parser = configargparse.ArgParser()
parser.add_argument('--config', is_config_file=True, help='config file path')
parser.add_argument('--expname', type=str, help='experiment name')
parser.add_argument('--basedir', type=str, default='./logs/', help='where to store ckpts and logs')
parser.add_argument('--datadir', type=str, default='./data/', help='input data directory')
parser.add_argument('--name_exp', type=str, default='')
parser.add_argument('--with_wandb', action='store_true')
parser.add_argument('--project_name', type=str, default='Re-ReND')
parser.add_argument('--create_mesh', action='store_true')
parser.add_argument('--resolution', type=int, default=512)
parser.add_argument('--num_comp', type=int, default=12)
parser.add_argument('--threshold', type=float, default=0)
parser.add_argument('--from_file', type=str, default=None)
parser.add_argument('--level_set', type=float, default=0.0)
parser.add_argument('--b_min', nargs='+', help='config file path')
parser.add_argument('--b_max', nargs='+', help='config file path')
parser.add_argument('--divide_data', action='store_true')
parser.add_argument('--netdepth', type=int, default=88, help='layers in network')
parser.add_argument('--netwidth', type=int, default=256, help='channels per layer')
parser.add_argument('--netdepth_d', type=int, default=88, help='layers in network dir')
parser.add_argument('--netwidth_d', type=int, default=256, help='channels per layer dir')
parser.add_argument('--components', type=int, default=32, help='components of the basis')
parser.add_argument('--use_residual', action='store_true')
parser.add_argument('--multires_dir', type=int, default=0.0, help='log2 of max freq for positional encoding (3D location)')
parser.add_argument('--layerwise_netwidths', type=str, default='')
parser.add_argument('--act', type=str, default='relu', choices=['relu', 'lrelu'], help='main activation func in a network')
parser.add_argument('--trial.ON', action='store_true')
parser.add_argument('--trial.body_arch', type=str, default='mlp', choices=['mlp', 'resmlp'])
parser.add_argument('--trial.res_scale', type=float, default=1.0)
parser.add_argument('--trial.n_learnable', type=int, default=2, help='num of learnable layers')
parser.add_argument('--trial.inact', default='relu', choices=['none', 'relu', 'lrelu'], help='the within activation func in a block')
parser.add_argument('--trial.outact', default='none', choices=['none', 'relu', 'lrelu'], help='the output activation func in a block')
parser.add_argument('--trial.n_block', type=int, default=(- 1), help='num of block in network body')
parser.add_argument('--trial.near', type=float, default=(- 1))
parser.add_argument('--trial.far', type=float, default=(- 1))
parser.add_argument('--hard_ratio', type=str, default='', help='hard rays ratio in a batch; seperated by comma')
parser.add_argument('--hard_mul', type=float, default=1, help='hard_mul * batch_size is the size of hard ray pool')
parser.add_argument('--warmup_lr', type=str, default='')
parser.add_argument('--multires', type=int, default=10, help='log2 of max freq for positional encoding (3D location) NERF 2')
parser.add_argument('--n_levels', type=int, default=16, help='levels hash encoding')
parser.add_argument('--n_features_per_level', type=int, default=2, help='levn_features_per_levelels hash encoding')
parser.add_argument('--n_levels_dir', type=int, default=10, help='levels hash encoding')
parser.add_argument('--load_ckpt', type=int, default=(- 1), help='load specific checkpoint')
parser.add_argument('--lrate', type=float, default=0.0005, help='learning rate')
parser.add_argument('--lrate_decay', type=int, default=250, help='exponential learning rate decay (in 1000 steps)')
parser.add_argument('--chunk', type=int, default=(1024 * 32), help='number of rays processed in parallel, decrease if running out of memory')
parser.add_argument('--netchunk', type=int, default=(1024 * 64), help='number of pts sent through network in parallel, decrease if running out of memory')
parser.add_argument('--no_reload', action='store_true', help='do not reload weights from saved ckpt')
parser.add_argument('--dataset_type', type=str, default='blender', help='options: blender / tanks_and_temples')
parser.add_argument('--testskip', type=int, default=8, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
parser.add_argument('--num_files', type=int, default=4, help='how many files load to cpu memory')
parser.add_argument('--divide', action='store_true', help='from origins and direction to intersected points')
parser.add_argument('--i_print', type=int, default=1000, help='frequency of console printout and metric loggin')
parser.add_argument('--i_weights', type=int, default=5000, help='frequency of weight ckpt saving')
parser.add_argument('--i_testset', type=int, default=25000, help='frequency of testset saving')
parser.add_argument('--seed', type=int, default=23)
parser.add_argument('--export_textures', action='store_true')
parser.add_argument('--tri_size', type=int, default=6)
parser.add_argument('--num_sample_elev', type=int, default=1024)
parser.add_argument('--num_sample_azim', type=int, default=1024)
parser.add_argument('--train', action='store_true')
parser.add_argument('--n_iters', type=int, default=200)
parser.add_argument('--load_data_device', type=str, default='cpu')
parser.add_argument('--batch_size', type=int, default=200000)
parser.add_argument('--compute_metrics', action='store_true')
parser.add_argument('--render_only', action='store_true', help='do not optimize, reload weights and render out render_poses path')
return parser |
def test_run_diagnostic():
data1 = pd.DataFrame({'col': [1, 2, 3]})
data2 = pd.DataFrame({'col': [2, 1, 3]})
metadata = SingleTableMetadata()
metadata.add_column('col', sdtype='numerical')
DiagnosticReport.generate = Mock(return_value=123)
run_diagnostic(data1, data2, metadata)
DiagnosticReport.generate.assert_called_once_with(data1, data2, metadata.to_dict(), True) |
class Enumeration(EntryBase):
def __init__(self, j):
super().__init__(j, 'enumeration')
if ('inc_cases' in j):
self.cases = load_inc_enums()[j['inc_cases']]
else:
self.cases = dict(((Name(name), value) for (name, value) in j['cases'].items())) |
def multiplicative_sequence(q, n=None):
from sage.combinat.sf.sf import SymmetricFunctions
from sage.combinat.partition import Partitions
from sage.misc.misc_c import prod
if (n is None):
n = q.degree()
R = q.parent().base_ring()
Sym = SymmetricFunctions(R)
m = Sym.m()
mon_pol = m._from_dict({p: prod((q[i] for i in p)) for k in range((n + 1)) for p in Partitions(k)})
return Sym.e()(mon_pol) |
class FactualConsistencyScorer(Scorer):
def __init__(self, align, aggr_type='mean', device='cuda'):
Scorer.__init__(self, align=align, aggr_type=aggr_type, device=device)
def score(self, grounding, hypo, aspect='consistency', remove_stopwords=False):
kwargs = dict(grounding=grounding, hypo=hypo, remove_stopwords=remove_stopwords)
if (aspect == 'consistency'):
return self.score_consistency(**kwargs)
else:
raise NotImplementedError
def score_consistency(self, grounding, hypo, remove_stopwords):
aligner = self._get_aligner('doc_to_summ')
return aligner.get_score(context=grounding, input_text=hypo, remove_stopwords=remove_stopwords) |
def create_model():
logger = logging.getLogger(__name__)
start_iter = 0
checkpoints = {}
output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
weights_file = cfg.TRAIN.WEIGHTS
if cfg.TRAIN.AUTO_RESUME:
final_path = os.path.join(output_dir, 'model_final.pkl')
if os.path.exists(final_path):
logger.info('model_final.pkl exists; no need to train!')
return (None, None, None, {'final': final_path}, output_dir)
if cfg.TRAIN.COPY_WEIGHTS:
copyfile(weights_file, os.path.join(output_dir, os.path.basename(weights_file)))
logger.info('Copy {} to {}'.format(weights_file, output_dir))
files = os.listdir(output_dir)
for f in files:
iter_string = re.findall('(?<=model_iter)\\d+(?=\\.pkl)', f)
if (len(iter_string) > 0):
checkpoint_iter = int(iter_string[0])
if (checkpoint_iter > start_iter):
start_iter = (checkpoint_iter + 1)
resume_weights_file = f
if (start_iter > 0):
weights_file = os.path.join(output_dir, resume_weights_file)
logger.info('> Resuming from checkpoint {} at start iter {}'.format(weights_file, start_iter))
logger.info('Building model: {}'.format(cfg.MODEL.TYPE))
model = model_builder.create(cfg.MODEL.TYPE, train=True)
if cfg.MEMONGER:
optimize_memory(model)
workspace.RunNetOnce(model.param_init_net)
return (model, weights_file, start_iter, checkpoints, output_dir) |
class GradualStyleEncoder(Module):
def __init__(self, num_layers, mode='ir', input_channels=3, opts=None):
super(GradualStyleEncoder, self).__init__()
assert (num_layers in [50, 100, 152]), 'num_layers should be 50,100, or 152'
assert (mode in ['ir', 'ir_se']), 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if (mode == 'ir'):
unit_module = bottleneck_IR
elif (mode == 'ir_se'):
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(input_channels, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride))
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
self.style_count = n_styles
self.coarse_ind = 3
self.middle_ind = 7
for i in range(self.style_count):
if (i < self.coarse_ind):
style = GradualStyleBlock(512, 512, 16)
elif (i < self.middle_ind):
style = GradualStyleBlock(512, 512, 32)
else:
style = GradualStyleBlock(512, 512, 64)
self.styles.append(style)
self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
def _upsample_add(self, x, y):
(_, _, H, W) = y.size()
return (F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y)
def forward(self, x):
x = self.input_layer(x)
latents = []
modulelist = list(self.body._modules.values())
for (i, l) in enumerate(modulelist):
x = l(x)
if (i == 6):
c1 = x
elif (i == 20):
c2 = x
elif (i == 23):
c3 = x
for j in range(self.coarse_ind):
latents.append(self.styles[j](c3))
p2 = self._upsample_add(c3, self.latlayer1(c2))
for j in range(self.coarse_ind, self.middle_ind):
latents.append(self.styles[j](p2))
p1 = self._upsample_add(p2, self.latlayer2(c1))
for j in range(self.middle_ind, self.style_count):
latents.append(self.styles[j](p1))
out = torch.stack(latents, dim=1)
return out |
def mvStraight(speed, angle, verbose=0):
vel_msg = Twist()
angular_speed = (((speed * 2) * PI) / 360)
relative_angle = (((angle * 2) * PI) / 360)
vel_msg.linear.x = angular_speed
t0 = rospy.Time.now().to_sec()
current_angle = 0
if (angle == (- 1)):
printv('inf mode : go straight inf until break', verbose)
velocity_publisher.publish(vel_msg)
else:
while (current_angle < relative_angle):
velocity_publisher.publish(vel_msg)
t1 = rospy.Time.now().to_sec()
current_angle = abs((angular_speed * (t1 - t0)))
vel_msg.linear.x = 0
velocity_publisher.publish(vel_msg)
printv('STOP', verbose) |
class DinatModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TFElectraForTokenClassification():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def get_cifar_100_just_x_or_y_ds(transform, train, **kw):
just = kw['just']
DATA_DIR = kw.get('DATA_DIR', DEFAULT_DATA_DIR)
just = just.lower()
if (just == 'x'):
ds_X = CIFAR100JustX(root=DATA_DIR, download=DOWNLOAD, train=train, transform=transform)
return ds_X
elif (just == 'y'):
ds_Y = CIFAR100JustY(root=DATA_DIR, download=DOWNLOAD, train=train, transform=transform)
return ds_Y
else:
raise ValueError(f"'just' should be in x,y. Got {just} instead.") |
class ToTensor(object):
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32).transpose((2, 0, 1))
mask = np.expand_dims(np.array(sample['label']).astype(np.float32), (- 1)).transpose((2, 0, 1))
img = torch.from_numpy(img).float()
mask = torch.from_numpy(mask).float()
return {'image': img, 'label': mask} |
def get_gatk_bin():
bin = get_param(['software', 'gatk3_jar'], 'GenomeAnalysisTK.jar')
if (bin[(- 4):] == '.jar'):
bin = 'java -XX:ParallelGCThreads={threads} -XX:+UseParallelGC -XX:-UsePerfData -Xms{resources.memory}m -Xmx{resources.memory}m -jar bin'
return bin |
class Refvg(object):
def __init__(self, split, model_method):
self._dataset = 'refvg'
self._imageset = 'vg'
self._split = split
self._ref_db = Refer(opt['data_root'], self._dataset, split)
if (model_method == 'sgmn'):
self._ref_sg = self._load_sg()
self._ref_sg_seq = self._load_sg_seq()
else:
self._ref_sg = None
self._ref_sg_seq = None
self._sent_ids = self._ref_db.get_sentIds()
self._image_ids = self._ref_db.get_imgIds(self._sent_ids)
roidb = Roidb(self._imageset, model_method)
self._rois_db = {}
self.max_num_box = 0
for img_id in self._image_ids:
assert roidb.roidb.has_key(img_id)
self._rois_db[img_id] = roidb.roidb[img_id].copy()
self.max_num_box = max(self.max_num_box, int(self._rois_db[img_id]['num_objs']))
self._h5_files = roidb.h5_files
self._h5_lrel_files = roidb.h5_lrel_files
def sent_ids(self):
return self._sent_ids
def ref_db(self):
return self._ref_db
def image_ids(self):
return self._image_ids
def rois_db(self):
return self._rois_db
def h5_files(self):
return self._h5_files
def h5_lrel_files(self):
return self._h5_lrel_files
def ref_sg(self):
return self._ref_sg
def ref_sg_seq(self):
return self._ref_sg_seq
def id_to_path(self):
path = {}
for img_id in self.image_ids:
file_name = (str(img_id) + '.jpg')
image_path = (osp.join(opt['data_root'], 'images/') + file_name)
path[img_id] = image_path
return path
def get_imgIds(self, sent_ids):
return self._ref_db.get_imgIds(sent_ids)
def _load_sg(self):
sgs = {}
sg_file_path = osp.join(opt['data_root'], self._dataset, (self._split + '_sgs.json'))
data = json.load(open(sg_file_path, 'r'))
for key in list(data.keys()):
sgs[key] = data[key]
return sgs
def _load_sg_seq(self):
sg_seqs = {}
sg_seq_file_path = osp.join(opt['data_root'], self._dataset, (self._split + '_sg_seqs.json'))
data = json.load(open(sg_seq_file_path, 'r'))
for key in list(data.keys()):
sg_seqs[key] = data[key]
return sg_seqs
def load_dictionary(self, pad_at_first=True):
dict_file = osp.join(opt['data_root'], 'word_embedding', 'vocabulary_72700.txt')
with io.open(dict_file, encoding='utf-8') as f:
words = [w.strip() for w in f.readlines()]
if (pad_at_first and (words[0] != '<pad>')):
raise Exception('The first word needs to be <pad> in the word list.')
vocab_dict = {words[n]: n for n in range(len(words))}
return vocab_dict
def get_img_path(self, id):
return self.id_to_path[id]
def get_sent(self, sent_id):
return self.ref_db.load_sent(sent_id) |
def _remove_if_exists(path):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path) |
def get_model(args):
print(f'Creating model: {args.model}')
model = create_model(args.model, pretrained=False, drop_path_rate=args.drop_path, drop_block_rate=None, decoder_depth=args.decoder_depth, use_cls_token=args.use_cls_token, num_frames=args.num_frames, target_feature_dim=args.distillation_target_dim, target_video_feature_dim=args.video_distillation_target_dim, feat_decoder_embed_dim=args.feat_decoder_embed_dim, feat_decoder_num_heads=args.feat_decoder_num_heads, use_checkpoint=args.use_checkpoint, tubelet_size=args.tubelet_size)
return model |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('language', help='Language to download')
parser.add_argument('--output', default='oscar_dump', help='Path for saving files')
parser.add_argument('--no_xz', dest='xz', default=True, action='store_false', help="Don't xz the files - default is to compress while writing")
parser.add_argument('--prefix', default='oscar_dump', help='Prefix to use for the pieces of the dataset')
parser.add_argument('--version', choices=['2019', '2023'], default='2023', help='Which version of the Oscar dataset to download')
args = parser.parse_args()
args.language = lang_to_langcode(args.language)
return args |
class Lgplvm(Gplvm):
name = 'Lgplvm'
def __init__(self, n: int, m: int, d: int, n_samples: int, lat_dist: Rdist, lprior: Lprior, Bayesian=True, Y=None, learn_neuron_scale=False, ard=False, learn_scale=None, sigma=None, C=None):
obs = (Bfa(n, d, Y=Y, learn_neuron_scale=learn_neuron_scale, ard=ard, learn_scale=learn_scale) if Bayesian else Fa(n, d, Y=Y, sigma=sigma, C=C))
super().__init__(obs, lat_dist, lprior, n, m, n_samples) |
def apply_random_mask(message_bits, input_key, sample_seed_prefix, input_nonce):
mask_generator = DRBG(input_key, (sample_seed_prefix + input_nonce))
mask_bits = mask_generator.generate_bits(len(message_bits))
masked_message_bits = deepcopy(message_bits)
for b in range(0, len(message_bits)):
masked_message_bits[b] = (message_bits[b] ^ mask_bits[b])
return masked_message_bits |
class BayesBiNN(Optimizer):
def __init__(self, model, train_set_size, lr=1e-09, betas=0.0, prior_lamda=None, num_samples=5, lamda_init=10, lamda_std=0, temperature=1, reweight=1):
if (lr <= 0.0):
raise ValueError('Invalid learning rate: {}'.format(lr))
if ((prior_lamda is not None) and (not torch.is_tensor(prior_lamda))):
raise ValueError('Invalid prior mu value (from previous task): {}'.format(prior_lamda))
if (not (0.0 <= betas < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas))
if (train_set_size < 1):
raise ValueError('Invalid number of training data points: {}'.format(train_set_size))
defaults = dict(lr=lr, beta=betas, prior_lamda=prior_lamda, num_samples=num_samples, train_set_size=train_set_size, temperature=temperature, reweight=reweight)
super(BayesBiNN, self).__init__(model.parameters(), defaults)
self.train_modules = []
self.set_train_modules(model)
defaults = self.defaults
parameters = self.param_groups[0]['params']
self.param_groups[0]['lr'] = lr
device = parameters[0].device
p = parameters_to_vector(self.param_groups[0]['params'])
mixtures_coeff = torch.randint_like(p, 2)
self.state['lamda'] = ((mixtures_coeff * (lamda_init + (np.sqrt(lamda_std) * torch.randn_like(p)))) + ((1 - mixtures_coeff) * ((- lamda_init) + (np.sqrt(lamda_std) * torch.randn_like(p)))))
if (defaults['num_samples'] <= 0):
self.state['mu'] = torch.tanh(self.state['lamda'])
else:
self.state['mu'] = torch.tanh(self.state['lamda'])
self.state['momentum'] = torch.zeros_like(p, device=device)
if torch.is_tensor(defaults['prior_lamda']):
self.state['prior_lamda'] = defaults['prior_lamda'].to(device)
else:
self.state['prior_lamda'] = torch.zeros_like(p, device=device)
self.state['step'] = 0
self.state['temperature'] = temperature
self.state['reweight'] = reweight
def set_train_modules(self, module):
if (len(list(module.children())) == 0):
if (len(list(module.parameters())) != 0):
self.train_modules.append(module)
else:
for child in list(module.children()):
self.set_train_modules(child)
def step(self, closure):
if (closure is None):
raise RuntimeError('For now, BayesBiNN only supports that the model/loss can be reevaluated inside the step function')
self.state['step'] += 1
defaults = self.defaults
parameters = self.param_groups[0]['params']
lr = self.param_groups[0]['lr']
momentum_beta = defaults['beta']
momentum = self.state['momentum']
mu = self.state['mu']
lamda = self.state['lamda']
temperature = defaults['temperature']
reweight = self.state['reweight']
grad_hat = torch.zeros_like(lamda)
loss_list = []
pred_list = []
if (defaults['num_samples'] <= 0):
w_vector = torch.tanh(self.state['lamda'])
vector_to_parameters(w_vector, parameters)
(loss, preds) = closure()
pred_list.append(preds)
linear_grad = torch.autograd.grad(loss, parameters)
loss_list.append(loss.detach())
grad = parameters_to_vector(linear_grad).detach()
grad_hat = (defaults['train_set_size'] * grad)
else:
for _ in range(defaults['num_samples']):
raw_noise = torch.rand_like(mu)
rou_vector = ((torch.log((raw_noise / (1 - raw_noise))) / 2) + self.state['lamda'])
w_vector = torch.tanh((rou_vector / temperature))
vector_to_parameters(w_vector, parameters)
(loss, preds) = closure()
pred_list.append(preds)
linear_grad = torch.autograd.grad(loss, parameters)
loss_list.append(loss.detach())
grad = parameters_to_vector(linear_grad).detach()
scale = ((((1 - (w_vector * w_vector)) + 1e-10) / temperature) / ((1 - (self.state['mu'] * self.state['mu'])) + 1e-10))
grad_hat.add_((scale * grad))
grad_hat = grad_hat.mul((defaults['train_set_size'] / defaults['num_samples']))
self.state['momentum'] = ((momentum_beta * self.state['momentum']) + ((1 - momentum_beta) * (grad_hat + (reweight * (self.state['lamda'] - self.state['prior_lamda'])))))
loss = torch.mean(torch.stack(loss_list))
bias_correction1 = (1 - (momentum_beta ** self.state['step']))
self.state['lamda'] = (self.state['lamda'] - ((self.param_groups[0]['lr'] * self.state['momentum']) / bias_correction1))
self.state['mu'] = torch.tanh(lamda)
return (loss, pred_list)
def get_distribution_params(self):
mu = self.state['mu'].clone().detach()
precision = (mu * (1 - mu))
return (mu, precision)
def get_mc_predictions(self, forward_function, inputs, ret_numpy=False, raw_noises=None, *args, **kwargs):
parameters = self.param_groups[0]['params']
predictions = []
if (raw_noises is None):
raw_noises = []
mean_vector = torch.where((self.state['mu'] <= 0), torch.zeros_like(self.state['mu']), torch.ones_like(self.state['mu']))
raw_noises.append(mean_vector)
for raw_noise in raw_noises:
vector_to_parameters(((2 * raw_noise) - 1), parameters)
outputs = forward_function(inputs, *args, **kwargs)
if ret_numpy:
outputs = outputs.data.cpu().numpy()
predictions.append(outputs)
return predictions |
def _format_custom_logs(keys=[], raw_log={}, _type=REWARD):
log = {}
if keys:
for key in keys:
if (key in raw_log):
log[key] = raw_log[key]
else:
log = raw_log
log[TYPE] = _type
return _format_log(log) |
def generate_json(folder_path, split):
yaml_file = read_file((((folder_path + '/') + split) + '.yaml'))
translations_file = read_file((((folder_path + '/') + split) + '.fra'))
assert (len(yaml_file) == len(translations_file))
output_json = dict()
for i in range(len(yaml_file)):
content = yaml_file[i]
utt_id = content.split(', wav: ')[1].split('}')[0]
output_json[utt_id] = dict()
output_json[utt_id]['path'] = (((folder_path.replace('/txt', '/wav') + '/') + utt_id) + '.wav')
output_json[utt_id]['trans'] = translations_file[i]
output_json[utt_id]['duration'] = content.split('{duration: ')[1].split(',')[0]
return output_json |
def split_entities(entity_list, add_fraction):
print((('splitting for additional ' + str(add_fraction)) + ' entities'))
num_entities = len(entity_list)
num_new_entities = np.round((num_entities * add_fraction))
entity_splits_dict = {}
for entity in entity_list:
entity_splits_dict[tuple(entity[:(- 1)])] = [entity]
entity_mho_list = [MaxHeapObj(entity) for entity in entity_list]
heapq.heapify(entity_mho_list)
while (len(entity_mho_list) < (num_entities + num_new_entities)):
largest_entity_mho = heapq.heappop(entity_mho_list)
new_entities = halve(largest_entity_mho)
for new_entity in new_entities:
heapq.heappush(entity_mho_list, new_entity)
entity = largest_entity_mho.get_entity()
entity_splits_dict[tuple(entity[:(- 1)])].remove(entity)
for new_entity in new_entities:
entity_splits_dict[tuple(entity[:(- 1)])].append(new_entity.get_entity())
grouped_entity_list = [entity_splits_dict[tuple(entity[:(- 1)])] for entity in entity_list]
return grouped_entity_list |
def load_ply_normal(filename, point_num):
plydata = PlyData.read(filename)
pc = plydata['normal'].data[:point_num]
pc_array = np.array([[x, y, z] for (x, y, z) in pc])
return pc_array |
class Polynomial_padic_capped_relative_dense(Polynomial_generic_cdv, Polynomial_padic):
def __init__(self, parent, x=None, check=True, is_gen=False, construct=False, absprec=infinity, relprec=infinity):
Polynomial.__init__(self, parent, is_gen=is_gen)
self._polygon = None
parentbr = parent.base_ring()
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
if construct:
(self._poly, self._valbase, self._relprecs, self._normalized, self._valaddeds, self._list) = x
return
elif is_gen:
self._poly = PolynomialRing(ZZ, parent.variable_name()).gen()
self._valbase = 0
self._valaddeds = [infinity, 0]
self._relprecs = [infinity, parentbr.precision_cap()]
self._normalized = True
self._list = None
return
if isinstance(x, ZZX):
x = Polynomial_integer_dense(PolynomialRing(ZZ, parent.variable_name()), x, construct=True)
elif (isinstance(x, FractionFieldElement) and (x.denominator() == 1)):
x = x.numerator()
if isinstance(x, Polynomial):
if (x.parent() is self.parent()):
if ((absprec is not infinity) or (relprec is not infinity)):
x._normalize()
self._poly = x._poly
self._valbase = x._valbase
self._valaddeds = x._valaddeds
self._relprecs = x._relprecs
self._normalized = x._normalized
self._list = x._list
if ((absprec is not infinity) or (relprec is not infinity)):
self._adjust_prec_info(absprec, relprec)
return
elif (x.base_ring() is ZZ):
self._poly = PolynomialRing(ZZ, parent.variable_name())(x)
self._valbase = Integer(0)
p = parentbr.prime()
self._relprecs = [(c.valuation(p) + parentbr.precision_cap()) for c in x.list()]
self._comp_valaddeds()
self._normalized = ((not self._valaddeds) or (min(self._valaddeds) == 0))
self._list = None
if ((absprec is not infinity) or (relprec is not infinity)):
self._adjust_prec_info(absprec, relprec)
return
else:
x = [parentbr(a) for a in x.list()]
check = False
elif isinstance(x, dict):
zero = parentbr.zero()
n = (max(x) if x else 0)
v = ([zero] * (n + 1))
for (i, z) in x.items():
v[i] = z
x = v
elif isinstance(x, pari_gen):
x = [parentbr(w) for w in x.list()]
check = False
elif (not isinstance(x, list)):
x = [x]
if (not x):
x = [parentbr.zero()]
if check:
x = [parentbr(z) for z in x]
self._list = x
self._valaddeds = [a.valuation() for a in x]
self._valbase = sage.rings.padics.misc.min(self._valaddeds)
if (self._valbase is infinity):
self._valaddeds = []
self._relprecs = []
self._poly = PolynomialRing(ZZ, parent.variable_name())()
self._normalized = True
if ((absprec is not infinity) or (relprec is not infinity)):
self._adjust_prec_info(absprec, relprec)
else:
self._valaddeds = [(c - self._valbase) for c in self._valaddeds]
self._relprecs = [(a.precision_absolute() - self._valbase) for a in x]
self._poly = PolynomialRing(ZZ, parent.variable_name())([(a >> self._valbase) for a in x])
self._normalized = True
if ((absprec is not infinity) or (relprec is not infinity)):
self._adjust_prec_info(absprec, relprec)
def _new_constant_poly(self, a, P):
return self.__class__(P, [a], check=False)
def _normalize(self):
if (not self._normalized):
if (self._valaddeds is None):
self._comp_valaddeds()
val = sage.rings.padics.misc.min(self._valaddeds)
prime_pow = self.base_ring().prime_pow
selflist = self._poly.list()
if (val is infinity):
pass
elif (val != 0):
self._relprecs = [max((prec - val), 0) for prec in self._relprecs]
v = [(Integer(0) if (e is infinity) else ((c // prime_pow(val)) % prime_pow(e))) for (c, e) in zip(selflist, self._relprecs)]
self._poly = self._poly.parent()(v, check=False)
self._valbase += val
self._valaddeds = [(c - val) for c in self._valaddeds]
else:
self._poly = self._poly.parent()([(Integer(0) if (e is infinity) else (c % prime_pow(e))) for (c, e) in zip(selflist, self._relprecs)], check=False)
self._normalized = True
def _reduce_poly(self):
selflist = self._poly.list()
prime_pow = self.base_ring().prime_pow
self._poly = self._poly.parent()([(Integer(0) if (e is infinity) else (c % prime_pow(e))) for (c, e) in zip(selflist, self._relprecs)], check=False)
def __reduce__(self):
return (make_padic_poly, (self.parent(), (self._poly, self._valbase, self._relprecs, self._normalized, self._valaddeds, self._list), 0))
def _comp_list(self):
if ((self.degree() == (- 1)) and (self._valbase == infinity)):
self._list = []
polylist = self._poly.list()
polylen = len(polylist)
self._list = ([(self.base_ring()(polylist[i], absprec=self._relprecs[i]) << self._valbase) for i in range(polylen)] + [self.base_ring()(0, absprec=(self._relprecs[i] + self._valbase)) for i in range(polylen, len(self._relprecs))])
while (self._list and self._list[(- 1)]._is_exact_zero()):
self._list.pop()
def _comp_valaddeds(self):
self._valaddeds = []
prime = self.parent().base_ring().prime()
for (i, pli) in enumerate(self._poly.list()):
tmp = pli.valuation(prime)
if ((tmp is infinity) or (tmp > self._relprecs[i])):
self._valaddeds.append(self._relprecs[i])
else:
self._valaddeds.append(tmp)
for i in range((self._poly.degree() + 1), len(self._relprecs)):
self._valaddeds.append(self._relprecs[i])
def _adjust_prec_info(self, absprec=infinity, relprec=infinity):
return
def _getprecpoly(self, n):
one = Integer(1)
return self._poly.parent()([(0 if (c is infinity) else (one << (n * c))) for c in self._relprecs])
def _getvalpoly(self, n):
one = Integer(1)
if (self._valaddeds is None):
self._comp_valaddeds()
return self._poly.parent()(([(0 if (c is infinity) else (one << (n * c))) for c in self._valaddeds] + [(0 if (c is infinity) else (one << (n * c))) for c in self._relprecs[len(self._valaddeds):]]))
def list(self, copy=True):
if (self._list is None):
self._comp_list()
if copy:
return list(self._list)
else:
return self._list
def lift(self):
return (self.base_ring().prime_pow(self._valbase) * self._poly)
def __getitem__(self, n):
d = len(self._relprecs)
if isinstance(n, slice):
(start, stop, step) = (n.start, n.stop, n.step)
if (step is not None):
raise IndexError('polynomial slicing with a step is not defined')
if (start is not None):
raise IndexError('polynomial slicing with a start is not defined')
if ((stop is None) or (stop > d)):
stop = d
values = [self[i] for i in range(stop)]
return self.parent()(values)
try:
n = n.__index__()
except AttributeError:
raise TypeError('list indices must be integers, not {0}'.format(type(n).__name__))
if ((n < 0) or (n >= d)):
return self.base_ring().zero()
if (self._list is not None):
return self._list[n]
return self.base_ring()((self.base_ring().prime_pow(self._valbase) * self._poly[n]), absprec=(self._valbase + self._relprecs[n]))
def _add_(self, right):
selfpoly = self._poly
rightpoly = right._poly
if (self._valbase > right._valbase):
selfpoly = (selfpoly * self.base_ring().prime_pow((self._valbase - right._valbase)))
baseval = right._valbase
elif (self._valbase < right._valbase):
rightpoly = (rightpoly * self.base_ring().prime_pow((right._valbase - self._valbase)))
baseval = self._valbase
else:
baseval = self._valbase
return Polynomial_padic_capped_relative_dense(self.parent(), ((selfpoly + rightpoly), baseval, [min(((a + self._valbase) - baseval), ((b + right._valbase) - baseval)) for (a, b) in zip(_extend_by_infinity(self._relprecs, max(len(self._relprecs), len(right._relprecs))), _extend_by_infinity(right._relprecs, max(len(self._relprecs), len(right._relprecs))))], False, None, None), construct=True)
def _sub_(self, right):
selfpoly = self._poly
rightpoly = right._poly
if (self._valbase > right._valbase):
selfpoly = (selfpoly * self.base_ring().prime_pow((self._valbase - right._valbase)))
baseval = right._valbase
elif (self._valbase < right._valbase):
rightpoly = (rightpoly * self.base_ring().prime_pow((right._valbase - self._valbase)))
baseval = self._valbase
else:
baseval = self._valbase
return Polynomial_padic_capped_relative_dense(self.parent(), ((selfpoly - rightpoly), baseval, [min(((a + self._valbase) - baseval), ((b + right._valbase) - baseval)) for (a, b) in zip(_extend_by_infinity(self._relprecs, max(len(self._relprecs), len(right._relprecs))), _extend_by_infinity(right._relprecs, max(len(self._relprecs), len(right._relprecs))))], False, None, None), construct=True)
def _mul_(self, right):
self._normalize()
right._normalize()
zzpoly = (self._poly * right._poly)
if ((not self._relprecs) or (len(right._relprecs) == 0)):
return self.parent()(0)
n = (Integer(((len(self._relprecs) + len(right._relprecs)) - 1)).exact_log(2) + 1)
precpoly1 = (self._getprecpoly(n) * right._getvalpoly(n))
precpoly2 = (self._getvalpoly(n) * right._getprecpoly(n))
tn = (Integer(1) << n)
preclist = [min(a.valuation(tn), b.valuation(tn)) for (a, b) in zip(precpoly1.list(), precpoly2.list())]
answer = Polynomial_padic_capped_relative_dense(self.parent(), (zzpoly, (self._valbase + right._valbase), preclist, False, None, None), construct=True)
answer._reduce_poly()
return answer
def _lmul_(self, right):
return self._rmul_(right)
def _rmul_(self, left):
return None
if (self._valaddeds is None):
self._comp_valaddeds()
if (left != 0):
(val, unit) = left.val_unit()
left_rprec = left.precision_relative()
relprecs = [min((left_rprec + self._valaddeds[i]), self._relprecs[i]) for i in range(len(self._relprecs))]
elif left._is_exact_zero():
return Polynomial_padic_capped_relative_dense(self.parent(), [])
else:
return Polynomial_padic_capped_relative_dense(self.parent(), (self._poly.parent()(0), (self._valbase + left.valuation()), self._valaddeds, False, self._valaddeds, None), construct=True)
return Polynomial_padic_capped_relative_dense(self.parent(), (self._poly._rmul_(unit), (self._valbase + val), relprecs, False, self._valaddeds, None), construct=True)
def _neg_(self):
return Polynomial_padic_capped_relative_dense(self.parent(), ((- self._poly), self._valbase, self._relprecs, False, self._valaddeds, None), construct=True)
def lshift_coeffs(self, shift, no_list=False):
if (shift < 0):
return self.rshift_coeffs((- shift), no_list)
if (no_list or (self._list is None)):
return Polynomial_padic_capped_relative_dense(self.parent(), (self._poly, (self._valbase + shift), self._relprecs, False, self._valaddeds, None), construct=True)
else:
return Polynomial_padic_capped_relative_dense(self.parent(), (self._poly, (self._valbase + shift), self._relprecs, False, self._valaddeds, [c.__lshift__(shift) for c in self._list]), construct=True)
def rshift_coeffs(self, shift, no_list=False):
if (shift < 0):
return self.lshift_coeffs((- shift), no_list)
if (self.base_ring().is_field() or (shift <= self._valbase)):
if (no_list or (self._list is None)):
return Polynomial_padic_capped_relative_dense(self.parent(), (self._poly, (self._valbase - shift), self._relprecs, self._normalized, self._valaddeds, None), construct=True)
else:
return Polynomial_padic_capped_relative_dense(self.parent(), (self._poly, (self._valbase - shift), self._relprecs, self._normalized, self._valaddeds, [c.__rshift__(shift) for c in self._list]), construct=True)
else:
shift = (shift - self._valbase)
fdiv = self.base_ring().prime_pow(shift)
return Polynomial_padic_capped_relative_dense(self.parent(), ((self._poly // fdiv), 0, [(0 if (a <= shift) else (a - shift)) for a in self._relprecs], False, None, None), construct=True)
def _unsafe_mutate(self, n, value):
n = int(n)
value = self.base_ring()(value)
if self.is_gen():
raise ValueError('cannot modify generator')
if (n < 0):
raise IndexError('n must be >= 0')
if (self._valbase is infinity):
if value._is_exact_zero():
return
self._valbase = value.valuation()
if (value != 0):
self._poly._unsafe_mutate(self, n, value.unit_part().lift())
self._relprecs = (([infinity] * n) + [value.precision_relative()])
else:
self._relprecs = (([infinity] * n) + [0])
self._valaddeds = (([infinity] * n) + [0])
zero = self.base_ring()(0)
self._list = (([zero] * n) + [value])
self._normalized = True
elif (value.valuation() >= self._valbase):
if (value != 0):
self._poly._unsafe_mutate(self, n, value.__rshift__(self._valbase).lift())
else:
self._poly._unsafe_mutate(self, n, 0)
if (n < len(self._relprecs)):
self._relprecs[n] = (value.precision_absolute() - self._valbase)
if (self._valaddeds is not None):
self._valaddeds[n] = (value.valuation() - self._valbase)
if (self._list is not None):
self._list[n] = value
else:
self._relprecs.extend((([infinity] * (n - len(self._relprecs))) + [(value.precision_absolute() - self._valbase)]))
if (self._valaddeds is not None):
self._valaddeds.extend((([infinity] * (n - len(self._relprecs))) + [(value.valuation() - self._valbase)]))
if (self._list is not None):
zero = self.base_ring()(0)
self._list.extend((([zero] * (n - len(self._relprecs))) + [value]))
else:
basediff = (self._valbase - value.valuation())
self._valbase = value.valuation()
if (self._valaddeds is not None):
self._valaddeds = [(c + basediff) for c in self._valaddeds]
self._poly = (self._poly * self.base_ring().prime_pow(basediff))
if (value != 0):
self._poly._unsafe_mutate(self, n, value.unit_part().lift())
else:
self._poly._unsafe_mutate(self, n, 0)
if (n < len(self._relprecs)):
self._relprecs[n] = value.precision_relative()
else:
self._relprecs.extend((([infinity] * (n - len(self._relprecs))) + [value.precision_relative()]))
self._normalized = False
if (self._list is not None):
if (n < len(self._list)):
self._list[n] = value
else:
zero = self._base_ring()(0)
self._list.extend((([zero] * (n - len(self._list))) + [value]))
def __pari__(self, variable=None):
if (variable is None):
variable = self.parent().variable_name()
return pari(self.list()).Polrev(variable)
def __copy__(self):
return Polynomial_padic_capped_relative_dense(self.parent(), (copy.copy(self._poly), self._valbase, copy.copy(self._relprecs), self._normalized, copy.copy(self._valaddeds), copy.copy(self._list)), construct=True)
def degree(self, secure=False):
self._normalize()
deg = Integer(self._poly.degree())
if (secure and (deg < self.prec_degree())):
raise PrecisionError('the leading coefficient is indistinguishable from 0')
return deg
def prec_degree(self):
return (len(self._relprecs) - 1)
def precision_absolute(self, n=None):
if (n is None):
return [(c + self._valbase) for c in self._relprecs]
return (self._relprecs[n] + self._valbase)
def precision_relative(self, n=None):
if (n is None):
self._normalize()
return copy.copy(self._relprecs)
n = int(n)
if ((n < 0) or (n >= len(self._relprecs)) or (self._relprecs[n] is infinity)):
return Integer(0)
if (self._valaddeds is None):
return (self._relprecs[n] - self._poly[n].valuation(self.base_ring().prime()))
else:
return (self._relprecs[n] - self._valaddeds[n])
def valuation_of_coefficient(self, n=None):
if (self._valaddeds is None):
self._comp_valaddeds()
if (n is None):
self._normalize()
return [(c + self._valbase) for c in self._valaddeds]
n = int(n)
if ((n < 0) or (n >= len(self._relprecs))):
return infinity
return (self._valbase + self._valaddeds[n])
def valuation(self, val_of_var=None):
if (val_of_var is None):
return self._poly.valuation()
if (self._valaddeds is None):
self._comp_valaddeds()
return (self._valbase + min([(self._valaddeds[i] + (val_of_var * i)) for i in range(len(self._valaddeds))]))
def reverse(self, degree=None):
n = (self._poly.degree() if (degree is None) else degree)
zzlist = (self._poly.list()[:(n + 1)] + ([0] * (n - self._poly.degree())))
zzlist.reverse()
relprec = (self._relprecs[:(n + 1)] + ([infinity] * (n - self.prec_degree())))
relprec.reverse()
if (self._valaddeds is None):
valadded = None
else:
valadded = (self._valaddeds[:(n + 1)] + ([infinity] * (n - self.prec_degree())))
valadded.reverse()
if (self._list is None):
L = None
else:
L = (self._list[:(n + 1)] + ([self.base_ring()(0)] * (n - self.prec_degree())))
L.reverse()
return Polynomial_padic_capped_relative_dense(self.parent(), (self._poly.parent()(zzlist), self._valbase, relprec, self._normalized, valadded, L), construct=True)
def rescale(self, a):
negval = False
try:
a = self.base_ring()(a)
except ValueError as msg:
if (msg == 'element has negative valuation.'):
negval = True
else:
raise ValueError(msg)
if negval:
return self.parent().base_extend(self.base_ring().fraction_field())(self).rescale(a)
if (self.base_ring().is_field() and (a.valuation() < 0)):
D = self.prec_degree()
return ((a ** D) * self.reverse(D).rescale((~ a)).reverse(D))
aval = a.valuation()
arprec = a.precision_relative()
if (self._valaddeds is None):
self._comp_valaddeds()
valadded = [(self._valaddeds[i] + (aval * i)) for i in range(len(self._valaddeds))]
relprec = [(infinity if (self._relprecs[i] is infinity) else ((min((self._relprecs[i] - self._valaddeds[i]), arprec) + (aval * i)) + self._valaddeds[i])) for i in range(len(self._relprecs))]
relprec[0] = self._relprecs[0]
if (a == 0):
zzpoly = self._poly.parent()(0)
else:
zzpoly = self._poly.rescale(Integer(a))
return Polynomial_padic_capped_relative_dense(self.parent(), (zzpoly, self._valbase, relprec, False, valadded, None), construct=True)
def quo_rem(self, right, secure=False):
return self._quo_rem_list(right, secure=secure)
def _quo_rem_naive(self, right):
K = self.base_ring().fraction_field()
f = self.base_extend(K)
g = right.base_extend(K)
if (g == 0):
raise ZeroDivisionError('cannot divide by a polynomial indistinguishable from 0')
x = f.parent().gen()
quo = f.parent()(0)
while (f.degree() >= g.degree()):
a = (f.leading_coefficient() / g.leading_coefficient())
quo = (quo + (a * (x ** (f.degree() - g.degree()))))
f = (f - ((a * (x ** (f.degree() - g.degree()))) * g))
return (quo, f)
def _quo_rem_list(self, right, secure):
if right.is_zero():
raise ZeroDivisionError('cannot divide by a polynomial indistinguishable from 0')
a = self.list()
da = (len(a) - 1)
b = right.list()
db = right.degree(secure=secure)
inv = (~ b[db])
q = []
for i in range(da, (db - 1), (- 1)):
c = (inv * a[i])
q.append(c)
for j in range(db):
a[((j + i) - db)] -= (c * b[j])
q.reverse()
K = self.base_ring().fraction_field()
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
parent = PolynomialRing(K, name=self.parent().variable_name())
return (parent(q), parent(a[:db]))
def disc(self):
return self.discriminant()
def newton_polygon(self):
if (self._valaddeds is None):
self._comp_valaddeds()
from sage.geometry.newton_polygon import NewtonPolygon
valbase = self._valbase
polygon = NewtonPolygon([(x, (val + valbase)) for (x, val) in enumerate(self._valaddeds)])
polygon_prec = NewtonPolygon([(x, (val + valbase)) for (x, val) in enumerate(self._relprecs)])
vertices = polygon.vertices(copy=False)
vertices_prec = polygon_prec.vertices(copy=False)
if (vertices[0][0] > vertices_prec[0][0]):
raise PrecisionError('The constant coefficient has not enough precision')
if (vertices[(- 1)][0] < vertices_prec[(- 1)][0]):
raise PrecisionError('The leading coefficient has not enough precision')
for (x, y) in vertices:
if (polygon_prec(x) <= y):
raise PrecisionError(('The coefficient of %s^%s has not enough precision' % (self.parent().variable_name(), x)))
return polygon
def is_eisenstein(self, secure=False):
deg = self.degree()
if (secure and (self.prec_degree() > deg)):
raise PrecisionError('The degree of the polynomial is not determined')
if (self._valaddeds is None):
self._comp_valaddeds()
compval = (1 - self._valbase)
valaddeds = self._valaddeds
relprecs = self._relprecs
if (relprecs[0] <= compval):
if (valaddeds[0] < relprecs[0]):
return False
raise PrecisionError('Not enough precision on the constant coefficient')
elif (valaddeds[0] != compval):
return False
for i in range(1, deg):
if (relprecs[i] < compval):
if (valaddeds[i] < relprecs[i]):
return False
if secure:
if (i == 1):
raise PrecisionError(('Not enough precision on the coefficient of %s' % self.variable_name()))
else:
raise PrecisionError(('Not enough precision on the coefficient of %s^%s' % (self.variable_name(), i)))
elif (valaddeds[i] < compval):
return False
if (valaddeds[deg] != (- self._valbase)):
return False
return True
def newton_slopes(self, repetition=True):
polygon = self.newton_polygon()
return [(- s) for s in polygon.slopes(repetition=repetition)]
def factor_mod(self):
self._normalize()
if (self._valbase < 0):
raise ValueError('Polynomial does not have integral coefficients')
elif (self._valbase > 0):
raise ValueError('Factorization of the zero polynomial not defined')
elif (min(self._relprecs) <= 0):
raise PrecisionError('Polynomial is not known to high enough precision')
return self._poly.factor_mod(self.base_ring().prime()) |
class CvtIntermediate(nn.Module):
def __init__(self, embed_dim, mlp_ratio):
super().__init__()
self.dense = nn.Linear(embed_dim, int((embed_dim * mlp_ratio)))
self.activation = nn.GELU()
def forward(self, hidden_state):
hidden_state = self.dense(hidden_state)
hidden_state = self.activation(hidden_state)
return hidden_state |
def copy_dory_subset():
testdata = relative_file('data/dory-subset.fa')
shutil.copyfile(testdata, 'dory-subset.fa')
testdata = relative_file('data/dory-subset.fq')
shutil.copyfile(testdata, 'dory-subset.fq') |
class SNLIBertPipe(MatchingBertPipe):
def process_from_file(self, paths=None):
data_bundle = SNLILoader().load(paths)
return self.process(data_bundle) |
def sharp_switch(extr, primary, *params):
primary = primary.strip()
found = False
default = None
rvalue = None
lvalue = ''
for param in params:
pair = param.split('=', 1)
lvalue = extr.expand(pair[0].strip())
rvalue = None
if (len(pair) > 1):
rvalue = extr.expand(pair[1].strip())
if (found or (primary in [v.strip() for v in lvalue.split('|')])):
return rvalue
elif (lvalue == '#default'):
default = rvalue
rvalue = None
elif (lvalue == primary):
found = True
if (rvalue is not None):
return lvalue
elif (default is not None):
return default
return '' |
class BaseCommandParser():
def __init__(self):
self.__attr_names = [name for (name, _, _) in self._desc_]
last_desc = self._desc_[0]
for d in self._desc_:
if (last_desc[1] < d[1]):
last_desc = d
def parse(self, buf, max_num):
desc = self._desc_
cmd_list = []
for i in range(max_num):
cmd = CMDWrapper()
cmd.mlir_cmd = None
for d in desc:
(name, bit_begin, bit_len) = d
byte_begin = (bit_begin // 8)
byte_end = (((bit_begin + bit_len) + 7) // 8)
bit_offset = (bit_begin % 8)
byte_slice = buf[byte_begin:byte_end]
total_val = 0
for v in byte_slice[::(- 1)]:
total_val = ((total_val << 8) | v)
total_val = (total_val >> bit_offset)
total_val = (total_val & ((1 << bit_len) - 1))
cmd.__dict__[name] = total_val
cmd.type = self.__class__._type_func_(cmd)
cmd.mem_records = self.__class__._mem_record_func_(cmd)
cmd.dep_id = (- 1)
cmd_list.append(cmd)
buf = buf[self.command_byte_len():]
cmd.alg_ops = 0
cmd.arch_ops = 0
return cmd_list
def attr_names(self):
return self.__attr_names
def command_byte_len(self):
return self._byte_len_ |
class TestDeterministicIntentParser(FixtureTest):
def setUp(self):
super(TestDeterministicIntentParser, self).setUp()
slots_dataset_stream = io.StringIO('\n---\ntype: intent\nname: dummy_intent_1\nslots:\n - name: dummy_slot_name\n entity: dummy_entity_1\n - name: dummy_slot_name2\n entity: dummy_entity_2\n - name: startTime\n entity: snips/datetime\nutterances:\n - >\n This is a [dummy_slot_name](dummy_1) query with another \n [dummy_slot_name2](dummy_2) [startTime](at 10p.m.) or \n [startTime](tomorrow)\n - "This is a [dummy_slot_name](dummy_1) "\n - "[startTime](tomorrow evening) there is a [dummy_slot_name](dummy_1)"\n \n---\ntype: entity\nname: dummy_entity_1\nautomatically_extensible: no\nvalues:\n- [dummy_a, dummy 2a, dummy a, 2 dummy a]\n- [dummy_b, dummy b, dummy_bb, dummy_b]\n- dummy d\n\n---\ntype: entity\nname: dummy_entity_2\nautomatically_extensible: no\nvalues:\n- [dummy_c, 3p.m., dummy_cc, dummy c]')
self.slots_dataset = Dataset.from_yaml_files('en', [slots_dataset_stream]).json
def test_should_parse_intent(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: intent1\nutterances:\n - foo bar baz\n\n---\ntype: intent\nname: intent2\nutterances:\n - foo bar ban')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = DeterministicIntentParser().fit(dataset)
text = 'foo bar ban'
parsing = parser.parse(text)
probability = 1.0
expected_intent = intent_classification_result(intent_name='intent2', probability=probability)
self.assertEqual(expected_intent, parsing[RES_INTENT])
def test_should_parse_intent_with_filter(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: intent1\nutterances:\n - foo bar baz\n\n---\ntype: intent\nname: intent2\nutterances:\n - foo bar ban')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = DeterministicIntentParser().fit(dataset)
text = 'foo bar ban'
parsing = parser.parse(text, intents=['intent1'])
self.assertEqual(empty_result(text, 1.0), parsing)
def test_should_parse_top_intents(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: intent1\nutterances:\n - meeting [time:snips/datetime](today)\n\n---\ntype: intent\nname: intent2\nutterances:\n - meeting tomorrow\n \n---\ntype: intent\nname: intent3\nutterances:\n - "[event_type](call) [time:snips/datetime](at 9pm)"\n\n---\ntype: entity\nname: event_type\nvalues:\n - meeting\n - feedback session')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = DeterministicIntentParser().fit(dataset)
text = 'meeting tomorrow'
results = parser.parse(text, top_n=3)
time_slot = {'entity': 'snips/datetime', 'range': {'end': 16, 'start': 8}, 'slotName': 'time', 'value': 'tomorrow'}
event_slot = {'entity': 'event_type', 'range': {'end': 7, 'start': 0}, 'slotName': 'event_type', 'value': 'meeting'}
weight_intent_1 = (1.0 / 2.0)
weight_intent_2 = 1.0
weight_intent_3 = (1.0 / 3.0)
total_weight = ((weight_intent_1 + weight_intent_2) + weight_intent_3)
proba_intent2 = (weight_intent_2 / total_weight)
proba_intent1 = (weight_intent_1 / total_weight)
proba_intent3 = (weight_intent_3 / total_weight)
expected_results = [extraction_result(intent_classification_result(intent_name='intent2', probability=proba_intent2), slots=[]), extraction_result(intent_classification_result(intent_name='intent1', probability=proba_intent1), slots=[time_slot]), extraction_result(intent_classification_result(intent_name='intent3', probability=proba_intent3), slots=[event_slot, time_slot])]
self.assertEqual(expected_results, results)
('snips_nlu.intent_parser.deterministic_intent_parser.get_stop_words')
def test_should_parse_intent_with_stop_words(self, mock_get_stop_words):
mock_get_stop_words.return_value = {'a', 'hey'}
dataset = self.slots_dataset
config = DeterministicIntentParserConfig(ignore_stop_words=True)
parser = DeterministicIntentParser(config).fit(dataset)
text = 'Hey this is dummy_a query with another dummy_c at 10p.m. or at 12p.m.'
parsing = parser.parse(text)
probability = 1.0
expected_intent = intent_classification_result(intent_name='dummy_intent_1', probability=probability)
self.assertEqual(expected_intent, parsing[RES_INTENT])
def test_should_parse_intent_with_duplicated_slot_names(self):
slots_dataset_stream = io.StringIO('\n---\ntype: intent\nname: math_operation\nslots:\n - name: number\n entity: snips/number\nutterances:\n - what is [number](one) plus [number](one)')
dataset = Dataset.from_yaml_files('en', [slots_dataset_stream]).json
parser = DeterministicIntentParser().fit(dataset)
text = 'what is one plus one'
parsing = parser.parse(text)
probability = 1.0
expected_intent = intent_classification_result(intent_name='math_operation', probability=probability)
expected_slots = [{'entity': 'snips/number', 'range': {'end': 11, 'start': 8}, 'slotName': 'number', 'value': 'one'}, {'entity': 'snips/number', 'range': {'end': 20, 'start': 17}, 'slotName': 'number', 'value': 'one'}]
self.assertDictEqual(expected_intent, parsing[RES_INTENT])
self.assertListEqual(expected_slots, parsing[RES_SLOTS])
def test_should_ignore_completely_ambiguous_utterances(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: dummy_intent_1\nutterances:\n - Hello world\n\n---\ntype: intent\nname: dummy_intent_2\nutterances:\n - Hello world')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = DeterministicIntentParser().fit(dataset)
text = 'Hello world'
res = parser.parse(text)
self.assertEqual(empty_result(text, 1.0), res)
def test_should_ignore_very_ambiguous_utterances(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: intent_1\nutterances:\n - "[event_type](meeting) tomorrow"\n\n---\ntype: intent\nname: intent_2\nutterances:\n - call [time:snips/datetime](today)\n\n---\ntype: entity\nname: event_type\nvalues:\n - call\n - diner')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = DeterministicIntentParser().fit(dataset)
text = 'call tomorrow'
res = parser.parse(text)
self.assertEqual(empty_result(text, 1.0), res)
def test_should_parse_slightly_ambiguous_utterances(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: intent_1\nutterances:\n - call tomorrow\n\n---\ntype: intent\nname: intent_2\nutterances:\n - call [time:snips/datetime](today)')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = DeterministicIntentParser().fit(dataset)
text = 'call tomorrow'
res = parser.parse(text)
expected_intent = intent_classification_result(intent_name='intent_1', probability=(2.0 / 3.0))
expected_result = parsing_result(text, expected_intent, [])
self.assertEqual(expected_result, res)
def test_should_not_parse_when_not_fitted(self):
parser = DeterministicIntentParser()
self.assertFalse(parser.fitted)
with self.assertRaises(NotTrained):
parser.parse('foobar')
def test_should_parse_intent_after_deserialization(self):
dataset = self.slots_dataset
shared = self.get_shared_data(dataset)
parser = DeterministicIntentParser(**shared).fit(dataset)
parser.persist(self.tmp_file_path)
deserialized_parser = DeterministicIntentParser.from_path(self.tmp_file_path, **shared)
text = 'this is a dummy_a query with another dummy_c at 10p.m. or at 12p.m.'
parsing = deserialized_parser.parse(text)
probability = 1.0
expected_intent = intent_classification_result(intent_name='dummy_intent_1', probability=probability)
self.assertEqual(expected_intent, parsing[RES_INTENT])
def test_should_parse_slots(self):
dataset = self.slots_dataset
parser = DeterministicIntentParser().fit(dataset)
texts = [('this is a dummy a query with another dummy_c at 10p.m. or at 12p.m.', [unresolved_slot(match_range=(10, 17), value='dummy a', entity='dummy_entity_1', slot_name='dummy_slot_name'), unresolved_slot(match_range=(37, 44), value='dummy_c', entity='dummy_entity_2', slot_name='dummy_slot_name2'), unresolved_slot(match_range=(45, 54), value='at 10p.m.', entity='snips/datetime', slot_name='startTime'), unresolved_slot(match_range=(58, 67), value='at 12p.m.', entity='snips/datetime', slot_name='startTime')]), ('this, is,, a, dummy a query with another dummy_c at 10pm or at 12p.m.', [unresolved_slot(match_range=(14, 21), value='dummy a', entity='dummy_entity_1', slot_name='dummy_slot_name'), unresolved_slot(match_range=(41, 48), value='dummy_c', entity='dummy_entity_2', slot_name='dummy_slot_name2'), unresolved_slot(match_range=(49, 56), value='at 10pm', entity='snips/datetime', slot_name='startTime'), unresolved_slot(match_range=(60, 69), value='at 12p.m.', entity='snips/datetime', slot_name='startTime')]), ('this is a dummy b', [unresolved_slot(match_range=(10, 17), value='dummy b', entity='dummy_entity_1', slot_name='dummy_slot_name')]), (' this is a dummy b ', [unresolved_slot(match_range=(11, 18), value='dummy b', entity='dummy_entity_1', slot_name='dummy_slot_name')]), (' at 8am there is a dummy a', [unresolved_slot(match_range=(1, 7), value='at 8am', entity='snips/datetime', slot_name='startTime'), unresolved_slot(match_range=(21, 29), value='dummy a', entity='dummy_entity_1', slot_name='dummy_slot_name')])]
for (text, expected_slots) in texts:
parsing = parser.parse(text)
self.assertListEqual(expected_slots, parsing[RES_SLOTS])
def test_should_parse_stop_words_slots(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: search\nutterances:\n - search\n - search [search_object](this)\n - search [search_object](a cat)\n \n---\ntype: entity\nname: search_object\nvalues:\n - [this thing, that]\n ')
resources = deepcopy(self.get_resources('en'))
resources[STOP_WORDS] = {'a', 'this', 'that'}
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser_config = DeterministicIntentParserConfig(ignore_stop_words=True)
parser = DeterministicIntentParser(config=parser_config, resources=resources)
parser.fit(dataset)
res_1 = parser.parse('search this')
res_2 = parser.parse('search that')
expected_intent = intent_classification_result(intent_name='search', probability=1.0)
expected_slots_1 = [unresolved_slot(match_range=(7, 11), value='this', entity='search_object', slot_name='search_object')]
expected_slots_2 = [unresolved_slot(match_range=(7, 11), value='that', entity='search_object', slot_name='search_object')]
self.assertEqual(expected_intent, res_1[RES_INTENT])
self.assertEqual(expected_intent, res_2[RES_INTENT])
self.assertListEqual(expected_slots_1, res_1[RES_SLOTS])
self.assertListEqual(expected_slots_2, res_2[RES_SLOTS])
def test_should_get_intents(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: greeting1\nutterances:\n - Hello John\n\n---\ntype: intent\nname: greeting2\nutterances:\n - Hello [name](John)\n\n---\ntype: intent\nname: greeting3\nutterances:\n - "[greeting](Hello) [name](John)"\n ')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = DeterministicIntentParser().fit(dataset)
top_intents = parser.get_intents('Hello John')
expected_intents = [{RES_INTENT_NAME: 'greeting1', RES_PROBA: (1.0 / ((1.0 + (1.0 / 2.0)) + (1.0 / 3.0)))}, {RES_INTENT_NAME: 'greeting2', RES_PROBA: ((1.0 / 2.0) / ((1.0 + (1.0 / 2.0)) + (1.0 / 3.0)))}, {RES_INTENT_NAME: 'greeting3', RES_PROBA: ((1.0 / 3.0) / ((1.0 + (1.0 / 2.0)) + (1.0 / 3.0)))}, {RES_INTENT_NAME: None, RES_PROBA: 0.0}]
def sorting_key(intent_res):
if (intent_res[RES_INTENT_NAME] is None):
return 'null'
return intent_res[RES_INTENT_NAME]
sorted_expected_intents = sorted(expected_intents, key=sorting_key)
sorted_intents = sorted(top_intents, key=sorting_key)
self.assertEqual(expected_intents[0], top_intents[0])
self.assertListEqual(sorted_expected_intents, sorted_intents)
def test_should_get_slots(self):
slots_dataset_stream = io.StringIO('\n---\ntype: intent\nname: greeting1\nutterances:\n - Hello [name1](John)\n\n---\ntype: intent\nname: greeting2\nutterances:\n - Hello [name2](Thomas)\n \n---\ntype: intent\nname: goodbye\nutterances:\n - Goodbye [name](Eric)')
dataset = Dataset.from_yaml_files('en', [slots_dataset_stream]).json
parser = DeterministicIntentParser().fit(dataset)
slots_greeting1 = parser.get_slots('Hello John', 'greeting1')
slots_greeting2 = parser.get_slots('Hello Thomas', 'greeting2')
slots_goodbye = parser.get_slots('Goodbye Eric', 'greeting1')
self.assertEqual(1, len(slots_greeting1))
self.assertEqual(1, len(slots_greeting2))
self.assertEqual(0, len(slots_goodbye))
self.assertEqual('John', slots_greeting1[0][RES_VALUE])
self.assertEqual('name1', slots_greeting1[0][RES_ENTITY])
self.assertEqual('Thomas', slots_greeting2[0][RES_VALUE])
self.assertEqual('name2', slots_greeting2[0][RES_ENTITY])
def test_should_get_no_slots_with_none_intent(self):
slots_dataset_stream = io.StringIO('\n---\ntype: intent\nname: greeting\nutterances:\n - Hello [name](John)')
dataset = Dataset.from_yaml_files('en', [slots_dataset_stream]).json
parser = DeterministicIntentParser().fit(dataset)
slots = parser.get_slots('Hello John', None)
self.assertListEqual([], slots)
def test_get_slots_should_raise_with_unknown_intent(self):
slots_dataset_stream = io.StringIO('\n---\ntype: intent\nname: greeting1\nutterances:\n - Hello [name1](John)\n\n---\ntype: intent\nname: goodbye\nutterances:\n - Goodbye [name](Eric)')
dataset = Dataset.from_yaml_files('en', [slots_dataset_stream]).json
parser = DeterministicIntentParser().fit(dataset)
with self.assertRaises(IntentNotFoundError):
parser.get_slots('Hello John', 'greeting3')
def test_should_parse_slots_after_deserialization(self):
dataset = self.slots_dataset
shared = self.get_shared_data(dataset)
parser = DeterministicIntentParser(**shared).fit(dataset)
parser.persist(self.tmp_file_path)
deserialized_parser = DeterministicIntentParser.from_path(self.tmp_file_path, **shared)
texts = [('this is a dummy a query with another dummy_c at 10p.m. or at 12p.m.', [unresolved_slot(match_range=(10, 17), value='dummy a', entity='dummy_entity_1', slot_name='dummy_slot_name'), unresolved_slot(match_range=(37, 44), value='dummy_c', entity='dummy_entity_2', slot_name='dummy_slot_name2'), unresolved_slot(match_range=(45, 54), value='at 10p.m.', entity='snips/datetime', slot_name='startTime'), unresolved_slot(match_range=(58, 67), value='at 12p.m.', entity='snips/datetime', slot_name='startTime')]), ('this, is,, a, dummy a query with another dummy_c at 10pm or at 12p.m.', [unresolved_slot(match_range=(14, 21), value='dummy a', entity='dummy_entity_1', slot_name='dummy_slot_name'), unresolved_slot(match_range=(41, 48), value='dummy_c', entity='dummy_entity_2', slot_name='dummy_slot_name2'), unresolved_slot(match_range=(49, 56), value='at 10pm', entity='snips/datetime', slot_name='startTime'), unresolved_slot(match_range=(60, 69), value='at 12p.m.', entity='snips/datetime', slot_name='startTime')]), ('this is a dummy b', [unresolved_slot(match_range=(10, 17), value='dummy b', entity='dummy_entity_1', slot_name='dummy_slot_name')]), (' this is a dummy b ', [unresolved_slot(match_range=(11, 18), value='dummy b', entity='dummy_entity_1', slot_name='dummy_slot_name')])]
for (text, expected_slots) in texts:
parsing = deserialized_parser.parse(text)
self.assertListEqual(expected_slots, parsing[RES_SLOTS])
def test_should_be_serializable_into_bytearray(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: MakeTea\nutterances:\n- make me [number_of_cups:snips/number](one) cup of tea\n- i want [number_of_cups] cups of tea please\n- can you prepare [number_of_cups] cup of tea ?\n\n---\ntype: intent\nname: MakeCoffee\nutterances:\n- make me [number_of_cups:snips/number](two) cups of coffee\n- brew [number_of_cups] cups of coffee\n- can you prepare [number_of_cups] cup of coffee')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
shared = self.get_shared_data(dataset)
intent_parser = DeterministicIntentParser(**shared).fit(dataset)
intent_parser_bytes = intent_parser.to_byte_array()
loaded_intent_parser = DeterministicIntentParser.from_byte_array(intent_parser_bytes, **shared)
result = loaded_intent_parser.parse('make me two cups of coffee')
self.assertEqual('MakeCoffee', result[RES_INTENT][RES_INTENT_NAME])
def test_should_parse_naughty_strings(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: my_intent\nutterances:\n- this is [slot1:entity1](my first entity)\n- this is [slot2:entity2](second_entity)')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
naughty_strings_path = ((TEST_PATH / 'resources') / 'naughty_strings.txt')
with naughty_strings_path.open(encoding='utf8') as f:
naughty_strings = [line.strip('\n') for line in f.readlines()]
parser = DeterministicIntentParser().fit(dataset)
for s in naughty_strings:
with self.fail_if_exception('Exception raised'):
parser.parse(s)
def test_should_fit_with_naughty_strings_no_tags(self):
naughty_strings_path = ((TEST_PATH / 'resources') / 'naughty_strings.txt')
with naughty_strings_path.open(encoding='utf8') as f:
naughty_strings = [line.strip('\n') for line in f.readlines()]
utterances = [{DATA: [{TEXT: naughty_string}]} for naughty_string in naughty_strings]
naughty_dataset = {'intents': {'naughty_intent': {'utterances': utterances}}, 'entities': dict(), 'language': 'en'}
with self.fail_if_exception('Exception raised'):
DeterministicIntentParser().fit(naughty_dataset)
def test_should_fit_and_parse_with_non_ascii_tags(self):
inputs = [('string%s' % i) for i in range(10)]
utterances = [{DATA: [{TEXT: string, ENTITY: 'non_ascii_entity', SLOT_NAME: 'non_ascii_slot'}]} for string in inputs]
naughty_dataset = {'intents': {'naughty_intent': {'utterances': utterances}}, 'entities': {'non_ascii_entity': {'use_synonyms': False, 'automatically_extensible': True, 'matching_strictness': 1.0, 'data': []}}, 'language': 'en'}
with self.fail_if_exception('Exception raised'):
parser = DeterministicIntentParser().fit(naughty_dataset)
parsing = parser.parse('string0')
expected_slot = {'entity': 'non_ascii_entity', 'range': {'start': 0, 'end': 7}, 'slotName': u'non_ascii_slot', 'value': u'string0'}
intent_name = parsing[RES_INTENT][RES_INTENT_NAME]
self.assertEqual('naughty_intent', intent_name)
self.assertListEqual([expected_slot], parsing[RES_SLOTS])
def test_should_be_serializable_before_fitting(self):
config = DeterministicIntentParserConfig(max_queries=42, max_pattern_length=43, ignore_stop_words=True)
parser = DeterministicIntentParser(config=config)
parser.persist(self.tmp_file_path)
expected_dict = {'config': {'unit_name': 'deterministic_intent_parser', 'max_queries': 42, 'max_pattern_length': 43, 'ignore_stop_words': True}, 'language_code': None, 'group_names_to_slot_names': None, 'patterns': None, 'slot_names_to_entities': None, 'stop_words_whitelist': None}
metadata = {'unit_name': 'deterministic_intent_parser'}
self.assertJsonContent((self.tmp_file_path / 'metadata.json'), metadata)
self.assertJsonContent((self.tmp_file_path / 'intent_parser.json'), expected_dict)
('snips_nlu.intent_parser.deterministic_intent_parser.get_stop_words')
def test_should_be_serializable(self, mock_get_stop_words):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: searchFlight\nslots:\n - name: origin\n entity: city\n - name: destination\n entity: city\nutterances:\n - find me a flight from [origin](Paris) to [destination](New York)\n - I need a flight to [destination](Berlin)\n\n---\ntype: entity\nname: city\nvalues:\n - london\n - [new york, big apple]\n - [paris, city of lights]\n ')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
mock_get_stop_words.return_value = {'a', 'me'}
config = DeterministicIntentParserConfig(max_queries=42, max_pattern_length=100, ignore_stop_words=True)
parser = DeterministicIntentParser(config=config).fit(dataset)
parser.persist(self.tmp_file_path)
expected_dict = {'config': {'unit_name': 'deterministic_intent_parser', 'max_queries': 42, 'max_pattern_length': 100, 'ignore_stop_words': True}, 'language_code': 'en', 'group_names_to_slot_names': {'group0': 'destination', 'group1': 'origin'}, 'patterns': {'searchFlight': ['^\\s*find\\s*flight\\s*from\\s*(?P<group1>%CITY%)\\s*to\\s*(?P<group0>%CITY%)\\s*$', '^\\s*i\\s*need\\s*flight\\s*to\\s*(?P<group0>%CITY%)\\s*$']}, 'slot_names_to_entities': {'searchFlight': {'destination': 'city', 'origin': 'city'}}, 'stop_words_whitelist': dict()}
metadata = {'unit_name': 'deterministic_intent_parser'}
self.assertJsonContent((self.tmp_file_path / 'metadata.json'), metadata)
self.assertJsonContent((self.tmp_file_path / 'intent_parser.json'), expected_dict)
def test_should_be_deserializable_without_stop_words(self):
parser_dict = {'config': {'max_queries': 42, 'max_pattern_length': 43}, 'language_code': 'en', 'group_names_to_slot_names': {'hello_group': 'hello_slot', 'world_group': 'world_slot'}, 'patterns': {'my_intent': ['(?P<hello_group>hello?)', '(?P<world_group>world$)']}, 'slot_names_to_entities': {'my_intent': {'hello_slot': 'hello_entity', 'world_slot': 'world_entity'}}}
self.tmp_file_path.mkdir()
metadata = {'unit_name': 'deterministic_intent_parser'}
self.writeJsonContent((self.tmp_file_path / 'intent_parser.json'), parser_dict)
self.writeJsonContent((self.tmp_file_path / 'metadata.json'), metadata)
parser = DeterministicIntentParser.from_path(self.tmp_file_path)
patterns = {'my_intent': ['(?P<hello_group>hello?)', '(?P<world_group>world$)']}
group_names_to_slot_names = {'hello_group': 'hello_slot', 'world_group': 'world_slot'}
slot_names_to_entities = {'my_intent': {'hello_slot': 'hello_entity', 'world_slot': 'world_entity'}}
config = DeterministicIntentParserConfig(max_queries=42, max_pattern_length=43)
expected_parser = DeterministicIntentParser(config=config)
expected_parser.language = LANGUAGE_EN
expected_parser.group_names_to_slot_names = group_names_to_slot_names
expected_parser.slot_names_to_entities = slot_names_to_entities
expected_parser.patterns = patterns
expected_parser._stop_words_whitelist = dict()
self.assertEqual(parser.to_dict(), expected_parser.to_dict())
def test_should_be_deserializable_with_stop_words(self):
parser_dict = {'config': {'max_queries': 42, 'max_pattern_length': 43}, 'language_code': 'en', 'group_names_to_slot_names': {'hello_group': 'hello_slot', 'world_group': 'world_slot'}, 'patterns': {'my_intent': ['(?P<hello_group>hello?)', '(?P<world_group>world$)']}, 'slot_names_to_entities': {'my_intent': {'hello_slot': 'hello_entity', 'world_slot': 'world_entity'}}, 'stop_words_whitelist': {'my_intent': ['this', 'that']}}
self.tmp_file_path.mkdir()
metadata = {'unit_name': 'deterministic_intent_parser'}
self.writeJsonContent((self.tmp_file_path / 'intent_parser.json'), parser_dict)
self.writeJsonContent((self.tmp_file_path / 'metadata.json'), metadata)
parser = DeterministicIntentParser.from_path(self.tmp_file_path)
patterns = {'my_intent': ['(?P<hello_group>hello?)', '(?P<world_group>world$)']}
group_names_to_slot_names = {'hello_group': 'hello_slot', 'world_group': 'world_slot'}
slot_names_to_entities = {'my_intent': {'hello_slot': 'hello_entity', 'world_slot': 'world_entity'}}
stop_words_whitelist = {'my_intent': {'this', 'that'}}
config = DeterministicIntentParserConfig(max_queries=42, max_pattern_length=43)
expected_parser = DeterministicIntentParser(config=config)
expected_parser.language = LANGUAGE_EN
expected_parser.group_names_to_slot_names = group_names_to_slot_names
expected_parser.slot_names_to_entities = slot_names_to_entities
expected_parser.patterns = patterns
expected_parser._stop_words_whitelist = stop_words_whitelist
self.assertEqual(parser.to_dict(), expected_parser.to_dict())
def test_should_be_deserializable_before_fitting_without_whitelist(self):
parser_dict = {'config': {'max_queries': 42, 'max_pattern_length': 43}, 'language_code': None, 'group_names_to_slot_names': None, 'patterns': None, 'slot_names_to_entities': None}
self.tmp_file_path.mkdir()
metadata = {'unit_name': 'deterministic_intent_parser'}
self.writeJsonContent((self.tmp_file_path / 'intent_parser.json'), parser_dict)
self.writeJsonContent((self.tmp_file_path / 'metadata.json'), metadata)
parser = DeterministicIntentParser.from_path(self.tmp_file_path)
config = DeterministicIntentParserConfig(max_queries=42, max_pattern_length=43)
expected_parser = DeterministicIntentParser(config=config)
self.assertEqual(parser.to_dict(), expected_parser.to_dict())
def test_should_be_deserializable_before_fitting_with_whitelist(self):
parser_dict = {'config': {'max_queries': 42, 'max_pattern_length': 43}, 'language_code': None, 'group_names_to_slot_names': None, 'patterns': None, 'slot_names_to_entities': None, 'stop_words_whitelist': None}
self.tmp_file_path.mkdir()
metadata = {'unit_name': 'deterministic_intent_parser'}
self.writeJsonContent((self.tmp_file_path / 'intent_parser.json'), parser_dict)
self.writeJsonContent((self.tmp_file_path / 'metadata.json'), metadata)
parser = DeterministicIntentParser.from_path(self.tmp_file_path)
config = DeterministicIntentParserConfig(max_queries=42, max_pattern_length=43)
expected_parser = DeterministicIntentParser(config=config)
self.assertEqual(parser.to_dict(), expected_parser.to_dict())
def test_should_deduplicate_overlapping_slots(self):
language = LANGUAGE_EN
slots = [unresolved_slot([0, 3], 'kid', 'e', 's1'), unresolved_slot([4, 8], 'loco', 'e1', 's2'), unresolved_slot([0, 8], 'kid loco', 'e1', 's3'), unresolved_slot([9, 13], 'song', 'e2', 's4')]
deduplicated_slots = _deduplicate_overlapping_slots(slots, language)
expected_slots = [unresolved_slot([0, 8], 'kid loco', 'e1', 's3'), unresolved_slot([9, 13], 'song', 'e2', 's4')]
self.assertSequenceEqual(deduplicated_slots, expected_slots)
def test_should_limit_nb_queries(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: my_first_intent\nutterances:\n- this is [slot1:entity1](my first entity)\n- this is [slot2:entity2](my second entity)\n- this is [slot3:entity3](my third entity)\n\n---\ntype: intent\nname: my_second_intent\nutterances:\n- this is [slot4:entity4](my fourth entity)')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
config = DeterministicIntentParserConfig(max_queries=2, max_pattern_length=1000)
parser = DeterministicIntentParser(config=config).fit(dataset)
self.assertEqual(len(parser.regexes_per_intent['my_first_intent']), 2)
self.assertEqual(len(parser.regexes_per_intent['my_second_intent']), 1)
def test_should_limit_patterns_length(self):
dataset_stream = io.StringIO("\n---\ntype: intent\nname: my_first_intent\nutterances:\n- how are you\n- hello how are you?\n- what's up\n\n---\ntype: intent\nname: my_second_intent\nutterances:\n- what is the weather today ?\n- does it rain\n- will it rain tomorrow")
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
config = DeterministicIntentParserConfig(max_queries=1000, max_pattern_length=25, ignore_stop_words=False)
parser = DeterministicIntentParser(config=config).fit(dataset)
self.assertEqual(2, len(parser.regexes_per_intent['my_first_intent']))
self.assertEqual(1, len(parser.regexes_per_intent['my_second_intent']))
def test_should_get_range_shift(self):
ranges_mapping = {(2, 5): {START: 2, END: 4}, (8, 9): {START: 7, END: 11}}
self.assertEqual((- 1), _get_range_shift((6, 7), ranges_mapping))
self.assertEqual(2, _get_range_shift((12, 13), ranges_mapping))
def test_training_should_be_reproducible(self):
random_state = 42
dataset_stream = io.StringIO('\n---\ntype: intent\nname: MakeTea\nutterances:\n- make me a [beverage_temperature:Temperature](hot) cup of tea\n- make me [number_of_cups:snips/number](five) tea cups\n\n---\ntype: intent\nname: MakeCoffee\nutterances:\n- make me [number_of_cups:snips/number](one) cup of coffee please\n- brew [number_of_cups] cups of coffee')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser1 = DeterministicIntentParser(random_state=random_state)
parser1.fit(dataset)
parser2 = DeterministicIntentParser(random_state=random_state)
parser2.fit(dataset)
with temp_dir() as tmp_dir:
dir_parser1 = (tmp_dir / 'parser1')
dir_parser2 = (tmp_dir / 'parser2')
parser1.persist(dir_parser1)
parser2.persist(dir_parser2)
hash1 = dirhash(str(dir_parser1), 'sha256')
hash2 = dirhash(str(dir_parser2), 'sha256')
self.assertEqual(hash1, hash2) |
def directed_RNN(layer, recur_size, seq_lengths, bidirectional=True, recur_cell=LSTM, **kwargs):
bilin = kwargs.pop('bilin', False)
if bidirectional:
return BiRNN(layer, recur_size, seq_lengths, recur_cell=recur_cell, bilin=bilin, **kwargs)
else:
return UniRNN(layer, recur_size, seq_lengths, recur_cell=recur_cell, **kwargs) |
def get_nonlinearity_for_embedding():
if (args.nonlinearity_for_embedding == 'relu'):
return tf.nn.relu
if (args.nonlinearity_for_embedding == 'tanh'):
return tf.nn.tanh
assert False |
class SimilarityClassTypes(UniqueRepresentation, Parent):
def __classcall_private__(cls, n, min=None):
if (min is None):
min = PrimarySimilarityClassType(1, Partition([1]))
if isinstance(min, list):
min = PrimarySimilarityClassType(min[0], min[1])
if (not isinstance(min, PrimarySimilarityClassType)):
raise ValueError('min must be a PrimarySimilarityClassType')
return super().__classcall__(cls, n, min)
def __init__(self, n, min):
Parent.__init__(self, category=FiniteEnumeratedSets())
self._n = n
self._min = min
def _element_constructor_(self, tau):
ret = []
for l in tau:
if isinstance(l, PrimarySimilarityClassType):
ret.append(l)
else:
ret.append(PrimarySimilarityClassType(*l))
return self.element_class(self, ret)
Element = SimilarityClassType
def __iter__(self):
n = self._n
min = self._min
if (n == 0):
(yield self.element_class(self, []))
if (min.size() > n):
return
else:
for PT in chain(PrimarySimilarityClassTypes(min.size(), min=min), *[PrimarySimilarityClassTypes(k) for k in range((min.size() + 1), (n + 1))]):
if (PT.size() == n):
(yield self.element_class(self, [PT]))
else:
for smaller_type in SimilarityClassTypes((n - PT.size()), min=PT):
(yield self.element_class(self, ([PT] + list(smaller_type))))
def size(self):
return self._n
def sum(self, stat, sumover='matrices', invertible=False, q=None):
if (sumover == 'matrices'):
return sum([(tau.statistic(stat, q=q) * tau.number_of_matrices(invertible=invertible, q=q)) for tau in self])
elif (sumover == 'classes'):
return sum([(tau.statistic(stat, q=q) * tau.number_of_classes(invertible=invertible, q=q)) for tau in self])
elif (sumover == 'types'):
return sum([tau.statistic(stat, invertible=invertible, q=q) for tau in self])
else:
raise ValueError(('invalid parameter %s' % sumover)) |
def test_find_dependencies_with_zero_round(tensor_key):
tensor_codec = TensorCodec(NoCompressionPipeline())
(tensor_name, origin, round_number, report, tags) = tensor_key
tensor_key = TensorKey(tensor_name, origin, round_number, report, ('model',))
tensor_key_dependencies = tensor_codec.find_dependencies(tensor_key, True)
assert (len(tensor_key_dependencies) == 0) |
def start_client(args):
init_time_start = time.time()
time.sleep(WAIT_TIME)
args.gpu = [(- 1)]
args.mix_cpu_gpu = False
args.async_update = False
args.rel_part = False
args.strict_rel_part = False
args.soft_rel_part = False
args.valid = False
total_machine = get_machine_count(args.ip_config)
server_namebook = dgl.contrib.read_ip_config(filename=args.ip_config)
machine_id = get_local_machine_id(server_namebook)
(dataset, entity_partition_book, local2global) = get_partition_dataset(args.data_path, args.dataset, machine_id)
n_entities = dataset.n_entities
n_relations = dataset.n_relations
print(('Partition %d n_entities: %d' % (machine_id, n_entities)))
print(('Partition %d n_relations: %d' % (machine_id, n_relations)))
entity_partition_book = F.tensor(entity_partition_book)
relation_partition_book = get_long_tail_partition(dataset.n_relations, total_machine)
relation_partition_book = F.tensor(relation_partition_book)
local2global = F.tensor(local2global)
relation_partition_book.share_memory_()
entity_partition_book.share_memory_()
local2global.share_memory_()
train_data = TrainDataset(dataset, args, ranks=args.num_client)
if (args.neg_sample_size_eval < 0):
args.neg_sample_size_eval = dataset.n_entities
args.batch_size = get_compatible_batch_size(args.batch_size, args.neg_sample_size)
args.batch_size_eval = get_compatible_batch_size(args.batch_size_eval, args.neg_sample_size_eval)
args.num_workers = 8
train_samplers = []
for i in range(args.num_client):
train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='head', num_workers=args.num_workers, shuffle=True, exclude_positive=False, rank=i)
train_sampler_tail = train_data.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='tail', num_workers=args.num_workers, shuffle=True, exclude_positive=False, rank=i)
train_samplers.append(NewBidirectionalOneShotIterator(train_sampler_head, train_sampler_tail, args.neg_sample_size, args.neg_sample_size, True, n_entities))
dataset = None
model = load_model(args, n_entities, n_relations)
model.share_memory()
print('Total initialize time {:.3f} seconds'.format((time.time() - init_time_start)))
rel_parts = (train_data.rel_parts if (args.strict_rel_part or args.soft_rel_part) else None)
cross_rels = (train_data.cross_rels if args.soft_rel_part else None)
procs = []
for i in range(args.num_client):
proc = mp.Process(target=dist_train_test, args=(args, model, train_samplers[i], entity_partition_book, relation_partition_book, local2global, i, rel_parts, cross_rels))
procs.append(proc)
proc.start()
for proc in procs:
proc.join() |
def dump_conv2d(name='Conv2d_1a_3x3'):
conv_operation = sess.graph.get_operation_by_name((('InceptionResnetV2/' + name) + '/Conv2D'))
weights_tensor = sess.graph.get_tensor_by_name((('InceptionResnetV2/' + name) + '/weights:0'))
weights = weights_tensor.eval()
padding = make_padding(conv_operation.get_attr('padding'), weights_tensor.get_shape())
strides = conv_operation.get_attr('strides')
conv_out = sess.graph.get_operation_by_name((('InceptionResnetV2/' + name) + '/Conv2D')).outputs[0].eval()
beta = sess.graph.get_tensor_by_name((('InceptionResnetV2/' + name) + '/BatchNorm/beta:0')).eval()
mean = sess.graph.get_tensor_by_name((('InceptionResnetV2/' + name) + '/BatchNorm/moving_mean:0')).eval()
var = sess.graph.get_tensor_by_name((('InceptionResnetV2/' + name) + '/BatchNorm/moving_variance:0')).eval()
relu_out = sess.graph.get_operation_by_name((('InceptionResnetV2/' + name) + '/Relu')).outputs[0].eval()
os.system(('mkdir -p dump/InceptionResnetV2/' + name))
h5f = h5py.File((('dump/InceptionResnetV2/' + name) + '.h5'), 'w')
h5f.create_dataset('weights', data=weights)
h5f.create_dataset('strides', data=strides)
h5f.create_dataset('padding', data=padding)
h5f.create_dataset('conv_out', data=conv_out)
h5f.create_dataset('beta', data=beta)
h5f.create_dataset('mean', data=mean)
h5f.create_dataset('var', data=var)
h5f.create_dataset('relu_out', data=relu_out)
h5f.close() |
.experimental
.parametrize('batch_size', BATCH_SIZES)
def test_critic_forward(ddpg_critic_param, batch_size):
(critic, param) = ddpg_critic_param
state_dim = param['state_repr_dim']
action_dim = param['action_emb_dim']
state = torch.rand((batch_size, state_dim))
action = torch.rand((batch_size, action_dim))
out = critic(state, action)
assert (out.shape == (batch_size, 1)), 'Wrong output shape of critic forward' |
def desolve_rk4(de, dvar, ics=None, ivar=None, end_points=None, step=0.1, output='list', **kwds):
if (ics is None):
raise ValueError('No initial conditions, specify with ics=[x0,y0].')
if (output not in ['list', 'plot', 'slope_field']):
raise ValueError("Option output should be 'list', 'plot' or 'slope_field'.")
if (ivar is None):
ivars = de.variables()
ivars = [t for t in ivars if (t != dvar)]
if (len(ivars) != 1):
raise ValueError('Unable to determine independent variable, please specify.')
ivar = ivars[0]
step = abs(step)
def desolve_rk4_inner(de, dvar):
de0 = de._maxima_()
maxima("load('dynamics)")
(lower_bound, upper_bound) = desolve_rk4_determine_bounds(ics, end_points)
(sol_1, sol_2) = ([], [])
if (lower_bound < ics[0]):
cmd = ('rk(%s,%s,%s,[%s,%s,%s,%s]) ' % (de0.str(), ('_SAGE_VAR_' + str(dvar)), str(ics[1]), ('_SAGE_VAR_' + str(ivar)), str(ics[0]), lower_bound, (- step)))
sol_1 = maxima(cmd).sage()
sol_1.pop(0)
sol_1.reverse()
if (upper_bound > ics[0]):
cmd = ('rk(%s,%s,%s,[%s,%s,%s,%s]) ' % (de0.str(), ('_SAGE_VAR_' + str(dvar)), str(ics[1]), ('_SAGE_VAR_' + str(ivar)), str(ics[0]), upper_bound, step))
sol_2 = maxima(cmd).sage()
sol_2.pop(0)
sol = sol_1
sol.extend([[ics[0], ics[1]]])
sol.extend(sol_2)
if (output == 'list'):
return sol
from sage.plot.plot import list_plot
from sage.plot.plot_field import plot_slope_field
R = list_plot(sol, plotjoined=True, **kwds)
if (output == 'plot'):
return R
if (output == 'slope_field'):
XMIN = sol[0][0]
YMIN = sol[0][1]
XMAX = XMIN
YMAX = YMIN
for (s, t) in sol:
if (s > XMAX):
XMAX = s
if (s < XMIN):
XMIN = s
if (t > YMAX):
YMAX = t
if (t < YMIN):
YMIN = t
return (plot_slope_field(de, (ivar, XMIN, XMAX), (dvar, YMIN, YMAX)) + R)
if (not (isinstance(dvar, Expression) and dvar.is_symbol())):
from sage.symbolic.ring import SR
from sage.calculus.all import diff
from sage.symbolic.relation import solve
if (isinstance(de, Expression) and de.is_relational()):
de = (de.lhs() - de.rhs())
de = solve(de, diff(dvar, ivar), solution_dict=True)
if (len(de) != 1):
raise NotImplementedError('Sorry, cannot find explicit formula for right-hand side of the ODE.')
with SR.temp_var() as dummy_dvar:
return desolve_rk4_inner(de[0][diff(dvar, ivar)].subs({dvar: dummy_dvar}), dummy_dvar)
else:
return desolve_rk4_inner(de, dvar) |
def _propagate_node(dfg_state, node):
if isinstance(node, nodes.EntryNode):
internal_edges = [e for e in dfg_state.out_edges(node) if (e.src_conn and e.src_conn.startswith('OUT_'))]
external_edges = [e for e in dfg_state.in_edges(node) if (e.dst_conn and e.dst_conn.startswith('IN_'))]
geticonn = (lambda e: e.src_conn[4:])
geteconn = (lambda e: e.dst_conn[3:])
use_dst = False
else:
internal_edges = [e for e in dfg_state.in_edges(node) if (e.dst_conn and e.dst_conn.startswith('IN_'))]
external_edges = [e for e in dfg_state.out_edges(node) if (e.src_conn and e.src_conn.startswith('OUT_'))]
geticonn = (lambda e: e.dst_conn[3:])
geteconn = (lambda e: e.src_conn[4:])
use_dst = True
for edge in external_edges:
if edge.data.is_empty():
new_memlet = Memlet()
else:
internal_edge = next((e for e in internal_edges if (geticonn(e) == geteconn(edge))))
aligned_memlet = align_memlet(dfg_state, internal_edge, dst=use_dst)
new_memlet = propagate_memlet(dfg_state, aligned_memlet, node, True, connector=geteconn(edge))
edge.data = new_memlet |
def test_list_files(workspace: Workspace, test_directory: Path, agent: Agent):
file_a = workspace.get_path('file_a.txt')
file_b = workspace.get_path('file_b.txt')
with open(file_a, 'w') as f:
f.write('This is file A.')
with open(file_b, 'w') as f:
f.write('This is file B.')
if (not os.path.exists(test_directory)):
os.makedirs(test_directory)
with open(os.path.join(test_directory, file_a.name), 'w') as f:
f.write('This is file A in the subdirectory.')
files = file_ops.list_files(str(workspace.root), agent=agent)
assert (file_a.name in files)
assert (file_b.name in files)
assert (os.path.join(Path(test_directory).name, file_a.name) in files)
os.remove(file_a)
os.remove(file_b)
os.remove(os.path.join(test_directory, file_a.name))
os.rmdir(test_directory)
non_existent_file = 'non_existent_file.txt'
files = file_ops.list_files('', agent=agent)
assert (non_existent_file not in files) |
class ImageLogger(Callback):
def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True, rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False, log_images_kwargs=None):
super().__init__()
self.rescale = rescale
self.batch_freq = batch_frequency
self.max_images = max_images
self.logger_log_images = {pl.loggers.TestTubeLogger: self._testtube}
self.log_steps = [(2 ** n) for n in range((int(np.log2(self.batch_freq)) + 1))]
if (not increase_log_steps):
self.log_steps = [self.batch_freq]
self.clamp = clamp
self.disabled = disabled
self.log_on_batch_idx = log_on_batch_idx
self.log_images_kwargs = (log_images_kwargs if log_images_kwargs else {})
self.log_first_step = log_first_step
_zero_only
def _testtube(self, pl_module, images, batch_idx, split):
for k in images:
grid = torchvision.utils.make_grid(images[k])
grid = ((grid + 1.0) / 2.0)
tag = f'{split}/{k}'
pl_module.logger.experiment.add_image(tag, grid, global_step=pl_module.global_step)
_zero_only
def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx):
root = os.path.join(save_dir, 'images', split)
for k in images:
grid = torchvision.utils.make_grid(images[k], nrow=4)
if self.rescale:
grid = ((grid + 1.0) / 2.0)
grid = grid.transpose(0, 1).transpose(1, 2).squeeze((- 1))
grid = grid.numpy()
grid = (grid * 255).astype(np.uint8)
filename = '{}_gs-{:06}_e-{:06}_b-{:06}.png'.format(k, global_step, current_epoch, batch_idx)
path = os.path.join(root, filename)
os.makedirs(os.path.split(path)[0], exist_ok=True)
Image.fromarray(grid).save(path)
def log_img(self, pl_module, batch, batch_idx, split='train'):
check_idx = (batch_idx if self.log_on_batch_idx else pl_module.global_step)
if (self.check_frequency(check_idx) and hasattr(pl_module, 'log_images') and callable(pl_module.log_images) and (self.max_images > 0)):
logger = type(pl_module.logger)
is_train = pl_module.training
if is_train:
pl_module.eval()
with torch.no_grad():
images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
for k in images:
N = min(images[k].shape[0], self.max_images)
images[k] = images[k][:N]
if isinstance(images[k], torch.Tensor):
images[k] = images[k].detach().cpu()
if self.clamp:
images[k] = torch.clamp(images[k], (- 1.0), 1.0)
self.log_local(pl_module.logger.save_dir, split, images, pl_module.global_step, pl_module.current_epoch, batch_idx)
logger_log_images = self.logger_log_images.get(logger, (lambda *args, **kwargs: None))
logger_log_images(pl_module, images, pl_module.global_step, split)
if is_train:
pl_module.train()
def check_frequency(self, check_idx):
if ((((check_idx % self.batch_freq) == 0) or (check_idx in self.log_steps)) and ((check_idx > 0) or self.log_first_step)):
try:
self.log_steps.pop(0)
except IndexError as e:
print(e)
pass
return True
return False
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if ((not self.disabled) and ((pl_module.global_step > 0) or self.log_first_step)):
self.log_img(pl_module, batch, batch_idx, split='train')
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if ((not self.disabled) and (pl_module.global_step > 0)):
self.log_img(pl_module, batch, batch_idx, split='val')
if hasattr(pl_module, 'calibrate_grad_norm'):
if ((pl_module.calibrate_grad_norm and ((batch_idx % 25) == 0)) and (batch_idx > 0)):
self.log_gradients(trainer, pl_module, batch_idx=batch_idx) |
class TypedArrayBuilder():
def __init__(self, form):
self.form = form
self.vm = awkward.forth.ForthMachine32('\n input data\n output part0-node0-offsets int64\n output part0-node2-data float64\n output part0-node3-offsets int64\n output part0-node4-data int64\n\n : node4-int64\n {int64} = if\n 0 data seek\n data q-> part0-node4-data\n else\n halt\n then\n ;\n\n : node3-list\n {begin_list} <> if\n halt\n then\n\n 0\n begin\n pause ( always pause before each list item )\n dup {end_list} = if\n drop\n part0-node3-offsets +<- stack\n exit\n else\n node4-int64\n 1+\n then\n again\n ;\n\n : node2-float64\n {float64} = if\n 0 data seek\n data d-> part0-node2-data\n else\n halt\n then\n ;\n\n : node1-record\n node2-float64 pause ( pause after each field item except the last )\n node3-list\n ;\n\n : node0-list\n {begin_list} <> if\n halt\n then\n\n 0\n begin\n pause ( always pause before each list item )\n dup {end_list} = if\n drop\n part0-node0-offsets +<- stack\n exit\n else\n node1-record\n 1+\n then\n again\n ;\n\n 0 part0-node0-offsets <- stack\n 0 part0-node3-offsets <- stack\n\n 0\n begin\n pause ( always pause before each outermost array item )\n node0-list\n 1+\n again\n '.format(int64=0, float64=1, begin_list=2, end_list=3))
self.data = np.empty(8, np.uint8)
self.vm.run({'data': self.data})
def int64(self, x):
self.data.view(np.int64)[0] = x
self.vm.stack_push(0)
self.vm.resume()
def float64(self, x):
self.data.view(np.float64)[0] = x
self.vm.stack_push(1)
self.vm.resume()
def begin_list(self):
self.vm.stack_push(2)
self.vm.resume()
def end_list(self):
self.vm.stack_push(3)
self.vm.resume()
def snapshot(self):
return ak.from_buffers(self.form, self.vm.stack[0], self.vm.outputs)
def debug_step(self):
print('stack: ', builder.vm.stack)
for (k, v) in builder.vm.outputs.items():
print((k + ':'), np.asarray(v))
print('array:', self.snapshot())
print() |
class TestRL2Worker(TfGraphTestCase):
def test_rl2_worker(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(1,)))
policy = DummyPolicy(env_spec=env.spec)
worker = RL2Worker(seed=1, max_path_length=100, worker_number=1, n_paths_per_trial=5)
worker.update_agent(policy)
worker.update_env(env)
rollouts = worker.rollout()
assert (rollouts.rewards.shape[0] == 500) |
def tensors(n, min_dim=1, max_dim=4, dtype=np.float32, elements=None, **kwargs):
dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
return dims_.flatmap((lambda dims: st.lists(arrays(dims, dtype, elements), min_size=n, max_size=n))) |
def main():
config = DavisConfig()
config.display()
seq_root = '../prepare/DAVIS_2017/JPEGImages/480p/carousel/'
dataLoader = DataLoader('../prepare/mask_rcnn_result/carousel.json')
obj_id = 0
mht = MHT(config, dataLoader, 'carousel')
for i in range(len(dataLoader.content)):
content_roi = dataLoader.content[i]['rois']
plot_detections(content_roi, i, obj_id, seq_root)
print(len(dataLoader.content))
print(dataLoader.content[0]['rois'].shape)
roi_numbers = mht.iterTracking()
for obj_id in range(len(mht.trackTrees)):
for trackId in range(len(mht.trackTrees[obj_id])):
plot_tracks(mht.trackTrees[obj_id][trackId], trackId, obj_id, seq_root)
data = dataLoader.content
targetPath = 'final_results'
print(roi_numbers)
for i in range(1, len(roi_numbers[0])):
img_path = os.path.join(seq_root, ('%05d.jpg' % i))
img = cv2.imread(img_path)
data_t = data[i]['rois']
for obj_id in range(len(roi_numbers)):
if (roi_numbers[obj_id][i] == (- 1)):
continue
roi = data_t[roi_numbers[obj_id][i]]
cv2.rectangle(img, pt1=(roi[1], roi[0]), pt2=(roi[3], roi[2]), color=(0, 255, 0), thickness=2)
img = cv2.putText(img, str(obj_id), (roi[1], roi[0]), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255))
if (not os.path.exists(targetPath)):
os.makedirs(targetPath)
cv2.imwrite(os.path.join(targetPath, ('%05d.jpg' % i)), img) |
def get_cookie_header(jar, request):
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie') |
_BUILDERS.register_module()
class LayerDecayOptimizerConstructor(DefaultOptimizerConstructor):
def _validate_cfg(self):
if ('custom_keys' in self.paramwise_cfg):
if (not isinstance(self.paramwise_cfg['custom_keys'], dict)):
raise TypeError(f"If specified, custom_keys must be a dict, but got {type(self.paramwise_cfg['custom_keys'])}")
if (self.base_wd is None):
for key in self.paramwise_cfg['custom_keys']:
if ('decay_mult' in self.paramwise_cfg['custom_keys'][key]):
raise ValueError('base_wd should not be None')
if (('bias_decay_mult' in self.paramwise_cfg) or ('norm_decay_mult' in self.paramwise_cfg) or ('dwconv_decay_mult' in self.paramwise_cfg)):
if (self.base_wd is None):
raise ValueError('base_wd should not be None')
def add_params(self, params, module, prefix='', is_dcn_module=None):
parameter_groups = {}
print(self.paramwise_cfg)
if isinstance(self.paramwise_cfg, (list, tuple, set)):
assert (len(self.paramwise_cfg) == 2)
(num_layers, layer_decay_rate) = self.paramwise_cfg
else:
num_layers = self.paramwise_cfg.get('num_layers')
layer_decay_rate = self.paramwise_cfg.get('layer_decay_rate')
num_layers = (num_layers + 2)
print(('Build LayerDecayOptimizerConstructor %f - %d' % (layer_decay_rate, num_layers)))
weight_decay = self.base_wd
for (name, param) in module.named_parameters():
if (not param.requires_grad):
continue
if ((len(param.shape) == 1) or name.endswith('.bias') or name.endswith('_token') or name.endswith('pos_embed')):
group_name = 'no_decay'
this_weight_decay = 0.0
else:
group_name = 'decay'
this_weight_decay = weight_decay
layer_id = get_num_layer_for_vit(name, num_layers)
group_name = ('layer_%d_%s' % (layer_id, group_name))
if (group_name not in parameter_groups):
scale = (layer_decay_rate ** ((num_layers - layer_id) - 1))
parameter_groups[group_name] = {'weight_decay': this_weight_decay, 'params': [], 'param_names': [], 'lr_scale': scale, 'group_name': group_name, 'lr': (scale * self.base_lr)}
parameter_groups[group_name]['params'].append(param)
parameter_groups[group_name]['param_names'].append(name)
(rank, _) = get_dist_info()
if (rank == 0):
to_display = {}
for key in parameter_groups:
to_display[key] = {'param_names': parameter_groups[key]['param_names'], 'lr_scale': parameter_groups[key]['lr_scale'], 'lr': parameter_groups[key]['lr'], 'weight_decay': parameter_groups[key]['weight_decay']}
print(('Param groups = %s' % json.dumps(to_display, indent=2)))
params.extend(parameter_groups.values()) |
def gelu(input_tensor):
cdf = (0.5 * (1.0 + tf.erf((input_tensor / tf.sqrt(2.0)))))
return (input_tensor * cdf) |
(scope='package')
def cfg_train_global() -> DictConfig:
with initialize(version_base='1.2', config_path='../configs'):
cfg = compose(config_name='train.yaml', return_hydra_config=True, overrides=[])
with open_dict(cfg):
cfg.paths.root_dir = str(pyrootutils.find_root())
cfg.trainer.max_epochs = 1
cfg.trainer.limit_train_batches = 0.01
cfg.trainer.limit_val_batches = 0.1
cfg.trainer.limit_test_batches = 0.1
cfg.trainer.accelerator = 'cpu'
cfg.trainer.devices = 1
cfg.datamodule.num_workers = 0
cfg.datamodule.pin_memory = False
cfg.extras.print_config = False
cfg.extras.enforce_tags = False
cfg.logger = None
return cfg |
class EdgeMatcher(edge_matcher.BaseEdgeMatcher):
def __init__(self, source_matcher: BaseNode, target_matcher: BaseNode):
super().__init__(source_matcher, target_matcher)
def apply(self, input_object: Any) -> bool:
if (isinstance(input_object, tuple) and (len(input_object) >= 2)):
return (self.source_matcher.apply(input_object[0]) and self.target_matcher.apply(input_object[1]))
else:
return False |
def writeInfoToFile(log_file, info):
fh = open(log_file, 'w', encoding='utf-8')
fh.write((info + '\r\n'))
fh.flush()
fh.close() |
def register_Ns3LteSpectrumPhy_methods(root_module, cls):
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('SetChannel', 'void', [param('ns3::Ptr< ns3::SpectrumChannel >', 'c')], is_virtual=True)
cls.add_method('SetMobility', 'void', [param('ns3::Ptr< ns3::MobilityModel >', 'm')], is_virtual=True)
cls.add_method('SetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'd')], is_virtual=True)
cls.add_method('GetMobility', 'ns3::Ptr< ns3::MobilityModel >', [], is_virtual=True)
cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True, is_virtual=True)
cls.add_method('GetRxSpectrumModel', 'ns3::Ptr< ns3::SpectrumModel const >', [], is_const=True, is_virtual=True)
cls.add_method('GetRxAntenna', 'ns3::Ptr< ns3::AntennaModel >', [], is_virtual=True)
cls.add_method('StartRx', 'void', [param('ns3::Ptr< ns3::SpectrumSignalParameters >', 'params')], is_virtual=True)
cls.add_method('StartRxData', 'void', [param('ns3::Ptr< ns3::LteSpectrumSignalParametersDataFrame >', 'params')])
cls.add_method('StartRxDlCtrl', 'void', [param('ns3::Ptr< ns3::LteSpectrumSignalParametersDlCtrlFrame >', 'lteDlCtrlRxParams')])
cls.add_method('StartRxUlSrs', 'void', [param('ns3::Ptr< ns3::LteSpectrumSignalParametersUlSrsFrame >', 'lteUlSrsRxParams')])
cls.add_method('SetHarqPhyModule', 'void', [param('ns3::Ptr< ns3::LteHarqPhy >', 'harq')])
cls.add_method('SetTxPowerSpectralDensity', 'void', [param('ns3::Ptr< ns3::SpectrumValue >', 'txPsd')])
cls.add_method('SetNoisePowerSpectralDensity', 'void', [param('ns3::Ptr< ns3::SpectrumValue const >', 'noisePsd')])
cls.add_method('Reset', 'void', [])
cls.add_method('SetAntenna', 'void', [param('ns3::Ptr< ns3::AntennaModel >', 'a')])
cls.add_method('StartTxDataFrame', 'bool', [param('ns3::Ptr< ns3::PacketBurst >', 'pb'), param('std::list< ns3::Ptr< ns3::LteControlMessage > >', 'ctrlMsgList'), param('ns3::Time', 'duration')])
cls.add_method('StartTxDlCtrlFrame', 'bool', [param('std::list< ns3::Ptr< ns3::LteControlMessage > >', 'ctrlMsgList'), param('bool', 'pss')])
cls.add_method('StartTxUlSrsFrame', 'bool', [])
cls.add_method('SetLtePhyRxDataEndErrorCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'c')])
cls.add_method('SetLtePhyRxDataEndOkCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'c')])
cls.add_method('SetLtePhyRxCtrlEndOkCallback', 'void', [param('ns3::Callback< void, std::list< ns3::Ptr< ns3::LteControlMessage > >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'c')])
cls.add_method('SetLtePhyRxCtrlEndErrorCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'c')])
cls.add_method('SetLtePhyRxPssCallback', 'void', [param('ns3::Callback< void, unsigned short, ns3::Ptr< ns3::SpectrumValue >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'c')])
cls.add_method('SetLtePhyDlHarqFeedbackCallback', 'void', [param('ns3::Callback< void, ns3::DlInfoListElement_s, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'c')])
cls.add_method('SetLtePhyUlHarqFeedbackCallback', 'void', [param('ns3::Callback< void, ns3::UlInfoListElement_s, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'c')])
cls.add_method('SetState', 'void', [param('ns3::LteSpectrumPhy::State', 'newState')])
cls.add_method('SetCellId', 'void', [param('uint16_t', 'cellId')])
cls.add_method('SetComponentCarrierId', 'void', [param('uint8_t', 'componentCarrierId')])
cls.add_method('AddRsPowerChunkProcessor', 'void', [param('ns3::Ptr< ns3::LteChunkProcessor >', 'p')])
cls.add_method('AddDataPowerChunkProcessor', 'void', [param('ns3::Ptr< ns3::LteChunkProcessor >', 'p')])
cls.add_method('AddDataSinrChunkProcessor', 'void', [param('ns3::Ptr< ns3::LteChunkProcessor >', 'p')])
cls.add_method('AddInterferenceCtrlChunkProcessor', 'void', [param('ns3::Ptr< ns3::LteChunkProcessor >', 'p')])
cls.add_method('AddInterferenceDataChunkProcessor', 'void', [param('ns3::Ptr< ns3::LteChunkProcessor >', 'p')])
cls.add_method('AddCtrlSinrChunkProcessor', 'void', [param('ns3::Ptr< ns3::LteChunkProcessor >', 'p')])
cls.add_method('AddExpectedTb', 'void', [param('uint16_t', 'rnti'), param('uint8_t', 'ndi'), param('uint16_t', 'size'), param('uint8_t', 'mcs'), param('std::vector< int >', 'map'), param('uint8_t', 'layer'), param('uint8_t', 'harqId'), param('uint8_t', 'rv'), param('bool', 'downlink')])
cls.add_method('UpdateSinrPerceived', 'void', [param('ns3::SpectrumValue const &', 'sinr')])
cls.add_method('SetTransmissionMode', 'void', [param('uint8_t', 'txMode')])
cls.add_method('GetChannel', 'ns3::Ptr< ns3::SpectrumChannel >', [])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')])
return |
def get_logger(log_path, name='default'):
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
fh = logging.FileHandler(log_path, mode='w')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger |
def load_splitter(path: str) -> Splitter:
spark = State().session
args = spark.read.json(join(path, 'init_args.json')).first().asDict()
name = args['_splitter_name']
del args['_splitter_name']
splitter = globals()[name]
return splitter(**args) |
class ParsedDate():
def __init__(self) -> None:
self.ymd: Dict[(str, int)] = {'year': (- 1), 'month': (- 1), 'day': (- 1)}
self.hms: Dict[(str, int)] = {'hour': (- 1), 'minute': (- 1), 'second': (- 1)}
self.weekday: int = (- 1)
self.tzinfo: Dict[(str, Union[(int, str)])] = {'timezone': '', 'utc_add': '', 'utc_offset_hours': (- 1), 'utc_offset_minutes': (- 1)}
self.valid: str = 'cleaned'
def set_year(self, year: int) -> None:
if (1700 <= year <= 2500):
self.ymd['year'] = year
else:
self.valid = 'unknown'
def set_month(self, month: int) -> None:
if (1 <= month <= 12):
self.ymd['month'] = month
else:
self.valid = 'unknown'
def set_day(self, day: int) -> None:
if (self.ymd['month'] in [1, 3, 5, 7, 8, 10, 12]):
if (1 <= day <= 31):
self.ymd['day'] = day
else:
self.valid = 'unknown'
elif (self.ymd['month'] in [4, 6, 9, 11]):
if (1 <= day <= 30):
self.ymd['day'] = day
else:
self.valid = 'unknown'
elif (self.ymd['month'] in [2]):
if self._is_leap_year():
if (1 <= day <= 29):
self.ymd['day'] = day
else:
self.valid = 'unknown'
elif (1 <= day <= 28):
self.ymd['day'] = day
else:
self.valid = 'unknown'
else:
self.valid = 'unknown'
def set_hour(self, hour: int) -> None:
if (0 <= hour < 24):
self.hms['hour'] = hour
else:
self.valid = 'unknown'
def set_minute(self, minute: int) -> None:
if (0 <= minute < 60):
self.hms['minute'] = minute
else:
self.valid = 'unknown'
def set_second(self, second: int) -> None:
if (0 <= second < 60):
self.hms['second'] = second
else:
self.valid = 'unknown'
def set_tzinfo(self, timezone: str='', utc_add: str='', utc_offset_hours: int=(- 1), utc_offset_minutes: int=(- 1)) -> None:
if (timezone != ''):
if ((timezone in all_timezones) or (timezone in ZONE)):
self.tzinfo['timezone'] = timezone
else:
self.valid = 'unknown'
if (utc_add != ''):
self.tzinfo['utc_add'] = utc_add
if (utc_offset_hours >= 0):
self.tzinfo['utc_offset_hours'] = utc_offset_hours
if (utc_offset_minutes >= 0):
self.tzinfo['utc_offset_minutes'] = utc_offset_minutes
def set_weekday(self, weekday: int) -> None:
if (1 <= weekday <= 7):
self.weekday = weekday
else:
self.valid = 'unknown'
def _is_leap_year(self) -> bool:
if ((self.ymd['year'] % 4) == 0):
if ((self.ymd['year'] % 100) == 0):
return ((self.ymd['year'] % 400) == 0)
else:
return True
return False |
class Partition3(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[0]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[1]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[2]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[3]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/Linear[lm_head]', 'T5ForConditionalGeneration/CrossEntropyLoss[lm_loss]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:3'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.dropout', 'l_1': 'decoder.0', 'l_2': 'decoder.1', 'l_3': 'decoder.2', 'l_4': 'decoder.3', 'l_5': 'decoder.4', 'l_6': 'decoder.5', 'l_7': 'decoder.final_layer_norm', 'l_8': 'decoder.dropout', 'l_9': 'lm_head', 'l_10': 'lm_loss'}
self.to(self.device)
def forward(self, *args):
(decoder_attention_mask, inverted_encoder_attention_mask, lm_labels, x0, x1) = move_tensors(unflatten(args, self.input_structure), self.device)
t_0 = self.l_0(x1)
t_0 = self.l_1(t_0, attention_mask=decoder_attention_mask, position_bias=None, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=None)
t_1 = t_0[0]
t_2 = t_0[1]
t_0 = t_0[2]
t_1 = self.l_2(t_1, attention_mask=decoder_attention_mask, position_bias=t_2, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=t_0)
t_1 = self.l_3(t_1, attention_mask=decoder_attention_mask, position_bias=t_2, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=t_0)
t_1 = self.l_4(t_1, attention_mask=decoder_attention_mask, position_bias=t_2, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=t_0)
t_1 = self.l_5(t_1, attention_mask=decoder_attention_mask, position_bias=t_2, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=t_0)
t_0 = self.l_6(t_1, attention_mask=decoder_attention_mask, position_bias=t_2, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=t_0)
t_0 = self.l_7(t_0)
t_0 = self.l_8(t_0)
t_0 = (t_0 * 0.)
t_0 = self.l_9(t_0)
t_2 = t_0.size((- 1))
t_2 = t_0.view((- 1), t_2)
t_0 = lm_labels.view((- 1))
t_0 = self.l_10(t_2, t_0)
return (t_0,)
def state_dict(self, device=None):
return state_dict(self, device=device)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def load_data(config):
print(('-*-' * 10))
print('current data_sign: {}'.format(config.data_sign))
if (config.data_sign == 'conll03'):
data_processor = Conll03Processor()
elif (config.data_sign == 'zh_msra'):
data_processor = MSRAProcessor()
elif (config.data_sign == 'zh_onto'):
data_processor = Onto4ZhProcessor()
elif (config.data_sign == 'en_onto'):
data_processor = Onto5EngProcessor()
elif (config.data_sign == 'genia'):
data_processor = GeniaProcessor()
elif (config.data_sign == 'ace2004'):
data_processor = ACE2004Processor()
elif (config.data_sign == 'ace2005'):
data_processor = ACE2005Processor()
elif (config.data_sign == 'resume'):
data_processor = ResumeZhProcessor()
elif (config.data_sign == 'wiki'):
data_processor = WikiProcessor()
elif (config.data_sign == 'HP'):
data_processor = HPProcessor()
elif (config.data_sign == 'HC'):
data_processor = HCProcessor()
elif (config.data_sign == 'ecommerce'):
data_processor = EcommerceProcessor()
elif (config.data_sign == 'twitter'):
data_processor = TwitterProcessor()
else:
raise ValueError('Please Notice that your data_sign DO NOT exits !!!!!')
label_list = data_processor.get_labels()
tokenizer = BertTokenizer4Tagger.from_pretrained(config.bert_model, do_lower_case=True)
dataset_loaders = MRCNERDataLoader(config, data_processor, label_list, tokenizer, mode='train', allow_impossible=True)
if (config.data_sign == 'HP'):
train_dataloader = dataset_loaders.get_dataloader(data_sign='train')
else:
train_dataloader = dataset_loaders.get_dataloader(data_sign='train', saved_dir=config.data_dir)
dev_dataloader = dataset_loaders.get_dataloader(data_sign='dev')
test_dataloader = dataset_loaders.get_dataloader(data_sign='test')
num_train_steps = dataset_loaders.get_num_train_epochs()
return (train_dataloader, dev_dataloader, test_dataloader, num_train_steps, label_list) |
def _script_local_optimizer_step(local_optim_rref: RRef[_ScriptLocalOptimizerInterface], autograd_ctx_id: int) -> None:
local_optim = local_optim_rref.local_value()
local_optim.step(autograd_ctx_id) |
def Q8():
E = 'abcdefgh'
CC = {3: ['abfg', 'bcdg', 'defg', 'cdeh', 'aefh', 'abch', 'abed', 'cfgh', 'bcef', 'adgh', 'acdf'], 4: [E]}
M = CircuitClosuresMatroid(groundset=E, circuit_closures=CC)
M.rename(('Q8: ' + repr(M)))
return M |
def resplit_mwt(tokens, pipeline, keep_tokens=True):
if ('tokenize' not in pipeline.processors):
raise ValueError('Need a Pipeline with a valid tokenize processor')
if ('mwt' not in pipeline.processors):
raise ValueError('Need a Pipeline with a valid mwt processor')
tokenize_processor = pipeline.processors['tokenize']
mwt_processor = pipeline.processors['mwt']
fake_text = '\n\n'.join((' '.join(sentence) for sentence in tokens))
batches = TokenizationDataset(tokenize_processor.config, input_text=fake_text, vocab=tokenize_processor.vocab, evaluation=True, dictionary=tokenize_processor.trainer.dictionary)
(all_preds, all_raw) = predict(trainer=tokenize_processor.trainer, data_generator=batches, batch_size=tokenize_processor.trainer.args['batch_size'], max_seqlen=tokenize_processor.config.get('max_seqlen', tokenize_processor.MAX_SEQ_LENGTH_DEFAULT), use_regex_tokens=True, num_workers=tokenize_processor.config.get('num_workers', 0))
if keep_tokens:
for (sentence, pred) in zip(tokens, all_preds):
char_idx = 0
for word in sentence:
if (len(word) > 0):
pred[char_idx:((char_idx + len(word)) - 1)] = 0
if (pred[((char_idx + len(word)) - 1)] == 0):
pred[((char_idx + len(word)) - 1)] = 1
char_idx += (len(word) + 1)
(_, _, document) = decode_predictions(vocab=tokenize_processor.vocab, mwt_dict=None, orig_text=fake_text, all_raw=all_raw, all_preds=all_preds, no_ssplit=True, skip_newline=tokenize_processor.trainer.args['skip_newline'], use_la_ittb_shorthand=(tokenize_processor.trainer.args['shorthand'] == 'la_ittb'))
document = doc.Document(document, fake_text)
mwt_processor.process(document)
return document |
class GenericAccessibleObject(metaclass=abc.ABCMeta):
def __init__(self, owner: (TypeInfo | None)):
self._owner = owner
def generated_type(self) -> ProperType:
def owner(self) -> (TypeInfo | None):
return self._owner
def is_enum(self) -> bool:
return False
def is_method(self) -> bool:
return False
def is_constructor(self) -> bool:
return False
def is_function(self) -> bool:
return False
def is_field(self) -> bool:
return False
def is_static(self) -> bool:
return False
def get_num_parameters(self) -> int:
return 0
def get_dependencies(self, memo: dict[(InferredSignature, dict[(str, ProperType)])]) -> OrderedSet[ProperType]: |
def main():
global num_bins, sampling_rate, num_centroids, percent
print('Opening video!')
capture = cv2.VideoCapture(os.path.abspath(os.path.expanduser(sys.argv[1])))
print('Video opened\nChoosing frames')
frames = []
i = 0
while capture.isOpened():
if ((i % sampling_rate) == 0):
capture.set(1, i)
(ret, frame) = capture.read()
if (frame is None):
break
frames.append(np.asarray(frame))
i += 1
frames = np.array(frames)
print('Frames chosen')
print(('Length of video %d' % frames.shape[0]))
features = get_cnn_feat(frames)
print(('Shape of features ' + str(features.shape)))
print('Clustering')
num_centroids = int((((percent * frames.shape[0]) * sampling_rate) / 100))
if (percent == (- 1)):
video_address = sys.argv[1].split('/')
gt_file = (video_address[(len(video_address) - 1)].split('.')[0] + '.mat')
video_address[(len(video_address) - 1)] = gt_file
video_address[(len(video_address) - 2)] = 'GT'
gt_file = '/'.join(video_address)
num_frames = int(scipy.io.loadmat(gt_file).get('user_score').shape[0])
num_centroids = int((0.1 * num_frames))
if (len(frames) < num_centroids):
print('Samples too less to generate such a large summary')
print('Changing to maximum possible centroids')
num_centroids = frames.shape[0]
kmeans = GaussianMixture(n_components=num_centroids).fit(features)
print('Done Clustering!')
print('Generating summary frames')
summary_frames = []
features_transform = kmeans.transform(features)
frame_indices = []
for cluster in range(features_transform.shape[1]):
print(('Frame number: %d' % (np.argmin(features_transform.T[cluster]) * sampling_rate)))
frame_indices.append(np.argmin(features_transform.T[cluster]))
frame_indices = sorted(frame_indices)
summary_frames = [frames[i] for i in frame_indices]
print('Generated summary')
if ((len(sys.argv) > 5) and ((int(sys.argv[5]) == 1) or (int(sys.argv[6]) == 1))):
save_keyframes(frame_indices, summary_frames) |
class TokenizeTest(absltest.TestCase):
def test_give_me_a_name(self):
self.assertEqual(['one', 'two', 'three'], tokenize.tokenize('one Two three', None))
self.assertEqual(['one', 'two', 'three'], tokenize.tokenize('one\n Two \nthree', None)) |
def cosine_rampdown_1(current, rampdown_length):
'Cosine rampdown from
assert (0 <= current <= rampdown_length)
return max(0.0, float((0.5 * (np.cos(((np.pi * current) / rampdown_length)) + 1)))) |
def is_type_list(x, type):
if (not isinstance(x, list)):
return False
return all((isinstance(item, type) for item in x)) |
class _DistributedDataParallel(torch.nn.parallel.DistributedDataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name) |
def _check_ip(val: Any, input_format: str, clean: bool) -> Any:
try:
if (val in NULL_VALUES):
return ((None, 'null') if clean else False)
address = ip_address(val)
vers = address.version
if (((vers == 4) and (input_format != 'ipv6')) or ((vers == 6) and (input_format != 'ipv4'))):
return ((address, 'success') if clean else True)
return ((None, 'unknown') if clean else False)
except (TypeError, ValueError):
return ((None, 'unknown') if clean else False) |
class UnetBlock_with_z(nn.Module):
def __init__(self, input_nc, outer_nc, inner_nc, nz=0, submodule=None, outermost=False, innermost=False, norm_layer=None, nl_layer=None, use_dropout=False, upsample='basic', padding_type='zero'):
super(UnetBlock_with_z, self).__init__()
p = 0
downconv = []
if (padding_type == 'reflect'):
downconv += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
downconv += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 1
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
self.outermost = outermost
self.innermost = innermost
self.nz = nz
input_nc = (input_nc + nz)
downconv += [nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=p)]
downrelu = nn.LeakyReLU(0.2, True)
uprelu = nl_layer()
if outermost:
upconv = upsampleLayer((inner_nc * 2), outer_nc, upsample=upsample, padding_type=padding_type)
down = downconv
up = (([uprelu] + upconv) + [nn.Tanh()])
elif innermost:
upconv = upsampleLayer(inner_nc, outer_nc, upsample=upsample, padding_type=padding_type)
down = ([downrelu] + downconv)
up = ([uprelu] + upconv)
if (norm_layer is not None):
up += [norm_layer(outer_nc)]
else:
upconv = upsampleLayer((inner_nc * 2), outer_nc, upsample=upsample, padding_type=padding_type)
down = ([downrelu] + downconv)
if (norm_layer is not None):
down += [norm_layer(inner_nc)]
up = ([uprelu] + upconv)
if (norm_layer is not None):
up += [norm_layer(outer_nc)]
if use_dropout:
up += [nn.Dropout(0.5)]
self.down = nn.Sequential(*down)
self.submodule = submodule
self.up = nn.Sequential(*up)
def forward(self, x, z):
if (self.nz > 0):
z_img = z.view(z.size(0), z.size(1), 1, 1).expand(z.size(0), z.size(1), x.size(2), x.size(3))
x_and_z = torch.cat([x, z_img], 1)
else:
x_and_z = x
if self.outermost:
x1 = self.down(x_and_z)
x2 = self.submodule(x1, z)
return self.up(x2)
elif self.innermost:
x1 = self.up(self.down(x_and_z))
return torch.cat([x1, x], 1)
else:
x1 = self.down(x_and_z)
x2 = self.submodule(x1, z)
return torch.cat([self.up(x2), x], 1) |
_method_args
class SageSet(Set):
def __new__(cls, sage_set):
return Basic.__new__(cls, sage_set)
def _sage_(self):
return self._args[0]
def is_empty(self):
return self._sage_().is_empty()
def is_finite_set(self):
return self._sage_().is_finite()
def is_iterable(self):
from sage.categories.enumerated_sets import EnumeratedSets
return (self._sage_() in EnumeratedSets())
def __iter__(self):
for element in self._sage_():
(yield sympify(element))
def _contains(self, element):
if element.is_symbol:
return None
return (element in self._sage_())
def __len__(self):
return len(self._sage_())
def __str__(self):
return f'SageSet({self._sage_()})'
__repr__ = __str__ |
.operations('success', 'failure', 'unsatisfiable')
def test_junitxml_file(cli, schema_url, hypothesis_max_examples, tmp_path):
xml_path = (tmp_path / 'junit.xml')
cli.run(schema_url, f'--junit-xml={xml_path}', f'--hypothesis-max-examples={(hypothesis_max_examples or 1)}', '--hypothesis-seed=1', '--checks=all')
tree = ElementTree.parse(xml_path)
root = tree.getroot()
assert (root.tag == 'testsuites')
assert (root.attrib['errors'] == '1')
assert (root.attrib['failures'] == '1')
assert (root.attrib['tests'] == '3')
testsuite = root[0]
assert (testsuite.tag == 'testsuite')
assert (testsuite.attrib['name'] == 'schemathesis')
assert (testsuite.attrib['errors'] == '1')
assert (testsuite.attrib['failures'] == '1')
assert (testsuite.attrib['tests'] == '3')
testcases = list(testsuite)
assert (len(testcases) == 3)
assert (testcases[0].tag == 'testcase')
assert (testcases[0].attrib['name'] == 'GET /api/failure')
assert (testcases[0][0].tag == 'failure')
assert (testcases[0][0].attrib['type'] == 'failure')
assert (testcases[0][0].attrib['message'] == '1. Undocumented Content-Type')
assert (testcases[1].attrib['name'] == 'GET /api/success')
assert (testcases[2].attrib['name'] == 'POST /api/unsatisfiable')
assert (testcases[2][0].tag == 'error')
assert (testcases[2][0].attrib['type'] == 'error')
assert ('Failed to generate test cases for this API operation' in testcases[2][0].attrib['message']) |
class TestEnum(JitTestCase):
def test_enum_value_types(self):
global IntEnum
class IntEnum(Enum):
FOO = 1
BAR = 2
global FloatEnum
class FloatEnum(Enum):
FOO = 1.2
BAR = 2.3
global StringEnum
class StringEnum(Enum):
FOO = 'foo as in foo bar'
BAR = 'bar as in foo bar'
.script
def supported_enum_types(a: IntEnum, b: FloatEnum, c: StringEnum):
return (a.name, b.name, c.name)
FileCheck().check('IntEnum').check('FloatEnum').check('StringEnum').run(str(supported_enum_types.graph))
global TensorEnum
class TensorEnum(Enum):
FOO = torch.tensor(0)
BAR = torch.tensor(1)
def unsupported_enum_types(a: TensorEnum):
return a.name
with self.assertRaisesRegex(RuntimeError, "Cannot create Enum with value type 'Tensor'"):
torch.jit.script(unsupported_enum_types)
def test_enum_comp(self):
global Color
class Color(Enum):
RED = 1
GREEN = 2
.script
def enum_comp(x: Color, y: Color) -> bool:
return (x == y)
FileCheck().check('aten::eq').run(str(enum_comp.graph))
self.assertEqual(enum_comp(Color.RED, Color.RED), True)
self.assertEqual(enum_comp(Color.RED, Color.GREEN), False)
def test_enum_comp_diff_classes(self):
global Foo, Bar
class Foo(Enum):
ITEM1 = 1
ITEM2 = 2
class Bar(Enum):
ITEM1 = 1
ITEM2 = 2
.script
def enum_comp(x: Foo) -> bool:
return (x == Bar.ITEM1)
FileCheck().check('prim::Constant').check_same('Bar.ITEM1').check('aten::eq').run(str(enum_comp.graph))
self.assertEqual(enum_comp(Foo.ITEM1), False)
def test_heterogenous_value_type_enum_error(self):
global Color
class Color(Enum):
RED = 1
GREEN = 'green'
def enum_comp(x: Color, y: Color) -> bool:
return (x == y)
with self.assertRaisesRegex(RuntimeError, 'Could not unify type list'):
torch.jit.script(enum_comp)
def test_enum_name(self):
global Color
class Color(Enum):
RED = 1
GREEN = 2
.script
def enum_name(x: Color) -> str:
return x.name
FileCheck().check('Color').check_next('prim::EnumName').check_next('return').run(str(enum_name.graph))
self.assertEqual(enum_name(Color.RED), Color.RED.name)
self.assertEqual(enum_name(Color.GREEN), Color.GREEN.name)
def test_enum_value(self):
global Color
class Color(Enum):
RED = 1
GREEN = 2
.script
def enum_value(x: Color) -> int:
return x.value
FileCheck().check('Color').check_next('prim::EnumValue').check_next('return').run(str(enum_value.graph))
self.assertEqual(enum_value(Color.RED), Color.RED.value)
self.assertEqual(enum_value(Color.GREEN), Color.GREEN.value)
def test_enum_as_const(self):
global Color
class Color(Enum):
RED = 1
GREEN = 2
.script
def enum_const(x: Color) -> bool:
return (x == Color.RED)
FileCheck().check('prim::Constant[value=__torch__.jit.test_enum.Color.RED]').check_next('aten::eq').check_next('return').run(str(enum_const.graph))
self.assertEqual(enum_const(Color.RED), True)
self.assertEqual(enum_const(Color.GREEN), False)
def test_non_existent_enum_value(self):
global Color
class Color(Enum):
RED = 1
GREEN = 2
def enum_const(x: Color) -> bool:
if (x == Color.PURPLE):
return True
else:
return False
with self.assertRaisesRegexWithHighlight(RuntimeError, "has no attribute 'PURPLE'", 'Color.PURPLE'):
torch.jit.script(enum_const)
def test_enum_ivalue_type(self):
global Color
class Color(Enum):
RED = 1
GREEN = 2
.script
def is_color_enum(x: Any):
return isinstance(x, Color)
FileCheck().check('prim::isinstance[types=[Enum<__torch__.jit.test_enum.Color>]]').check_next('return').run(str(is_color_enum.graph))
self.assertEqual(is_color_enum(Color.RED), True)
self.assertEqual(is_color_enum(Color.GREEN), True)
self.assertEqual(is_color_enum(1), False)
def test_closed_over_enum_constant(self):
global Color
class Color(Enum):
RED = 1
GREEN = 2
a = Color
.script
def closed_over_aliased_type():
return a.RED.value
FileCheck().check('prim::Constant[value={}]'.format(a.RED.value)).check_next('return').run(str(closed_over_aliased_type.graph))
self.assertEqual(closed_over_aliased_type(), Color.RED.value)
b = Color.RED
.script
def closed_over_aliased_value():
return b.value
FileCheck().check('prim::Constant[value={}]'.format(b.value)).check_next('return').run(str(closed_over_aliased_value.graph))
self.assertEqual(closed_over_aliased_value(), Color.RED.value)
def test_enum_as_module_attribute(self):
global Color
class Color(Enum):
RED = 1
GREEN = 2
class TestModule(torch.nn.Module):
def __init__(self, e: Color):
super(TestModule, self).__init__()
self.e = e
def forward(self):
return self.e.value
m = TestModule(Color.RED)
scripted = torch.jit.script(m)
FileCheck().check('TestModule').check_next('Color').check_same('prim::GetAttr[name="e"]').check_next('prim::EnumValue').check_next('return').run(str(scripted.graph))
self.assertEqual(scripted(), Color.RED.value)
def test_string_enum_as_module_attribute(self):
global Color
class Color(Enum):
RED = 'red'
GREEN = 'green'
class TestModule(torch.nn.Module):
def __init__(self, e: Color):
super(TestModule, self).__init__()
self.e = e
def forward(self):
return (self.e.name, self.e.value)
m = TestModule(Color.RED)
scripted = torch.jit.script(m)
self.assertEqual(scripted(), (Color.RED.name, Color.RED.value))
def test_enum_return(self):
global Color
class Color(Enum):
RED = 1
GREEN = 2
.script
def return_enum(cond: bool):
if cond:
return Color.RED
else:
return Color.GREEN
self.assertEqual(return_enum(True), Color.RED)
self.assertEqual(return_enum(False), Color.GREEN)
def test_enum_module_return(self):
global Color
class Color(Enum):
RED = 1
GREEN = 2
class TestModule(torch.nn.Module):
def __init__(self, e: Color):
super(TestModule, self).__init__()
self.e = e
def forward(self):
return self.e
m = TestModule(Color.RED)
scripted = torch.jit.script(m)
FileCheck().check('TestModule').check_next('Color').check_same('prim::GetAttr[name="e"]').check_next('return').run(str(scripted.graph))
self.assertEqual(scripted(), Color.RED)
def test_enum_iterate(self):
global Color
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
def iterate_enum(x: Color):
res: List[int] = []
for e in Color:
if (e != x):
res.append(e.value)
return res
scripted = torch.jit.script(iterate_enum)
FileCheck().check('Enum<__torch__.jit.test_enum.Color>[]').check_same('Color.RED').check_same('Color.GREEN').check_same('Color.BLUE').run(str(scripted.graph))
self.assertEqual(scripted(Color.RED), [Color.GREEN.value, Color.BLUE.value])
self.assertEqual(scripted(Color.GREEN), [Color.RED.value, Color.BLUE.value]) |
class Local_op(nn.Module):
def __init__(self, in_channels, out_channels):
super(Local_op, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, x):
(b, n, s, d) = x.size()
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
(x, _) = torch.max(x, (- 1), keepdim=False)
return x |
()
def get_text_between(cand):
start = (cand.person1_word_idx[1] + 1)
end = cand.person2_word_idx[0]
cand.text_between = ' '.join(cand.tokens[start:end])
return cand |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.