code stringlengths 101 5.91M |
|---|
def entropy_loss(logits):
min_prob = 1e-16
probs = F.softmax(logits, dim=(- 1)).clamp(min=min_prob)
log_probs = probs.log()
entropy = ((- probs) * log_probs)
entropy_loss = (- entropy.mean())
return entropy_loss |
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0, ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0, hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12, mxords=5, printmessg=0, tfirst=False):
if (ml is None):
ml = (- 1)
if (mu is None):
mu = (- 1)
dt = np.diff(t)
if (not ((dt >= 0).all() or (dt <= 0).all())):
raise ValueError('The values in t must be monotonically increasing or monotonically decreasing; repeated values are allowed.')
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu, full_output, rtol, atol, tcrit, h0, hmax, hmin, ixpr, mxstep, mxhnil, mxordn, mxords, int(bool(tfirst)))
if (output[(- 1)] < 0):
warning_msg = f'{_msgs[output[(- 1)]]} Run with full_output = 1 to get quantitative information.'
warnings.warn(warning_msg, ODEintWarning, stacklevel=2)
elif printmessg:
warning_msg = _msgs[output[(- 1)]]
warnings.warn(warning_msg, ODEintWarning, stacklevel=2)
if full_output:
output[1]['message'] = _msgs[output[(- 1)]]
output = output[:(- 1)]
if (len(output) == 1):
return output[0]
else:
return output |
def test_online_boosting():
stream = SEAGenerator(1, noise_percentage=0.067, random_state=112)
nb = NaiveBayes()
learner = OnlineBoostingClassifier(base_estimator=nb, n_estimators=3, random_state=112)
first = True
cnt = 0
max_samples = 5000
predictions = []
wait_samples = 100
correct_predictions = 0
while (cnt < max_samples):
(X, y) = stream.next_sample()
if (((cnt % wait_samples) == 0) and (cnt != 0)):
predictions.append(learner.predict(X)[0])
if (y[0] == predictions[(- 1)]):
correct_predictions += 1
if first:
learner.partial_fit(X, y, classes=stream.target_values)
first = False
else:
learner.partial_fit(X, y)
cnt += 1
performance = (correct_predictions / len(predictions))
expected_predictions = [1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1]
expected_correct_predictions = 45
expected_performance = 0.
assert np.alltrue((predictions == expected_predictions))
assert np.isclose(expected_performance, performance)
assert (correct_predictions == expected_correct_predictions)
assert (type(learner.predict(X)) == np.ndarray)
assert (type(learner.predict_proba(X)) == np.ndarray)
expected_info = 'OnlineBoostingClassifier(base_estimator=NaiveBayes(nominal_attributes=None), drift_detection=True, n_estimators=None, random_state=112)'
info = ' '.join([line.strip() for line in learner.get_info().split()])
assert (info == expected_info) |
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True, return_sim=False):
super(NONLocalBlock3D, self).__init__(in_channels, inter_channels=inter_channels, dimension=3, sub_sample=sub_sample, bn_layer=bn_layer, return_sim=return_sim) |
def launch_mpi_driver(driver_path, args, config, partitions, m):
workers = config.resource_info['worker']
_prepare_workers(workers, driver_path, args, partitions, (m is not None))
mpi_cmd = _get_mpi_cmd(config)
parallax_log.warning(colored(('\n$ %s' % mpi_cmd), 'red'))
proc = subprocess.Popen(args=mpi_cmd, shell=True, preexec_fn=os.setsid)
def cleanup_mpi(recv_signal, frame):
if m:
m.shutdown()
try:
os.killpg(os.getpgid(proc.pid), signal.SIGINT)
except:
pass
signal.signal(signal.SIGINT, cleanup_mpi)
return ([proc], cleanup_mpi) |
class SEDataset(Dataset):
def __init__(self, clean_dir, noisy_dir, preemph, cache_dir='.', split='train', slice_size=(2 ** 14), stride=0.5, max_samples=None, do_cache=False, verbose=False, slice_workers=2, preemph_norm=False, random_scale=[1]):
super(SEDataset, self).__init__()
print('Creating {} split out of data in {}'.format(split, clean_dir))
self.clean_names = glob.glob(os.path.join(clean_dir, '*.wav'))
self.noisy_names = glob.glob(os.path.join(noisy_dir, '*.wav'))
print('Found {} clean names and {} noisy names'.format(len(self.clean_names), len(self.noisy_names)))
self.slice_workers = slice_workers
if ((len(self.clean_names) != len(self.noisy_names)) or (len(self.clean_names) == 0)):
raise ValueError('No wav data found! Check your data path please')
if (max_samples is not None):
assert isinstance(max_samples, int), type(max_samples)
self.clean_names = self.clean_names[:max_samples]
self.noisy_names = self.noisy_names[:max_samples]
self.cache_dir = cache_dir
self.slice_size = slice_size
self.stride = stride
self.split = split
self.verbose = verbose
self.preemph = preemph
self.preemph_norm = preemph_norm
self.random_scale = random_scale
cache_path = cache_dir
if (not os.path.exists(cache_path)):
os.makedirs(cache_path)
if (not os.path.exists(os.path.join(cache_path, '{}_idx2slice.pkl'.format(split)))):
self.prepare_slicing()
with open(os.path.join(cache_path, '{}_idx2slice.pkl'.format(split)), 'wb') as i2s_f:
pickle.dump(self.idx2slice, i2s_f)
for (s_i, slicing) in self.slicings.items():
with open(os.path.join(cache_path, '{}_{}.pkl'.format(split, s_i)), 'wb') as ch_f:
pickle.dump(slicing, ch_f)
self.num_samples = len(self.idx2slice)
self.slicings = None
else:
with open(os.path.join(cache_path, '{}_idx2slice.pkl'.format(split)), 'rb') as i2s_f:
self.idx2slice = pickle.load(i2s_f)
print('Loaded {} idx2slice items'.format(len(self.idx2slice)))
def read_wav_file(self, wavfilename):
(rate, wav) = wavfile.read(wavfilename)
if self.preemph_norm:
wav = pre_emphasize(wav, self.preemph)
wav = normalize_wave_minmax(wav)
else:
wav = normalize_wave_minmax(wav)
wav = pre_emphasize(wav, self.preemph)
return (rate, wav)
def read_wavs(self):
self.clean_paths = []
self.noisy_paths = []
clen = len(self.clean_names)
nlen = len(self.noisy_names)
assert (clen == nlen), clen
if self.verbose:
print('< Reading {} wav files... >'.format(clen))
beg_t = timeit.default_timer()
for (i, (clean_name, noisy_name)) in enumerate(zip(self.clean_names, self.noisy_names), start=1):
self.clean_paths.append(clean_name)
self.noisy_paths.append(noisy_name)
end_t = timeit.default_timer()
if self.verbose:
print('> Loaded files in {} s <'.format((end_t - beg_t)))
def read_wavs_and_cache(self):
cache_path = os.path.join(self.cache_dir, 'cached_pair.pkl')
try:
with open(cache_path) as f_in:
cache = pickle.load(f_in)
if self.verbose:
print('Reading clean and wav pair from ', cache_path)
self.clean_wavs = cache['clean']
self.noisy_wavs = cache['noisy']
except IOError:
self.read_wavs()
cache = {'noisy': self.noisy_wavs, 'clean': self.clean_wavs}
if (not os.path.exists(self.cache_dir)):
os.makedirs(self.cache_dir)
with open(cache_path, 'wb') as f_out:
pickle.dump(cache, f_out)
if self.verbose:
print('Cached clean and wav pair into ', cache_path)
def prepare_slicing(self):
slicings = {}
idx2slice = []
verbose = self.verbose
if verbose:
print('< Slicing all signals with window {} and stride {}... >'.format(self.slice_size, self.stride))
beg_t = timeit.default_timer()
pool = mp.Pool(self.slice_workers)
clean_args = [(self.clean_names[i], self.slice_size, self.stride) for i in range(len(self.clean_names))]
c_slices = pool.map(slice_index_helper, clean_args)
noisy_args = [(self.noisy_names[i], self.slice_size, self.stride) for i in range(len(self.noisy_names))]
n_slices = pool.map(slice_index_helper, noisy_args)
if (len(n_slices) != len(c_slices)):
raise ValueError('n_slices and c_slices have different lengths:{} != {}'.format(len(n_slices), len(c_slices)))
for (w_i, (c_slice, n_slice)) in enumerate(zip(c_slices, n_slices)):
c_path = self.clean_names[w_i]
n_path = self.noisy_names[w_i]
if (w_i not in slicings):
slicings[w_i] = []
for (t_i, (c_ss, n_ss)) in enumerate(zip(c_slice, n_slice)):
if ((c_ss[1] - c_ss[0]) < 1024):
continue
slicings[w_i].append({'c_slice': c_ss, 'n_slice': n_ss, 'c_path': c_path, 'n_path': n_path, 'slice_idx': t_i})
idx2slice.append((w_i, t_i))
"\n for w_i, (c_path, n_path) in enumerate(zip(self.clean_names,\n self.noisy_names)):\n c_wav, rate = librosa.load(c_path)\n n_wav, rate = librosa.load(n_path)\n c_slices = slice_signal_index(c_wav, self.slice_size, self.stride)\n n_slices = slice_signal_index(n_wav, self.slice_size, self.stride)\n for c_slice, n_slice in zip(c_slices, n_slices):\n if c_slice[1] - c_slice[0] < 4096:\n continue\n if verbose:\n print('Id: {}, name: {}, c_slice: {}, n_slice: {}'.format(w_i, self.clean_names[w_i], c_slice,\n n_slice))\n slicings.append({'id':w_i, 'c_slice':c_slice,\n 'n_slice':n_slice,\n 'c_path':c_path,\n 'n_path':n_path})\n "
self.slicings = slicings
self.idx2slice = idx2slice
end_t = timeit.default_timer()
if verbose:
print('Sliced all signals in {} s'.format((end_t - beg_t)))
def extract_slice(self, index):
(s_i, e_i) = self.idx2slice[index]
slice_file = os.path.join(self.cache_dir, '{}_{}.pkl'.format(self.split, s_i))
with open(slice_file, 'rb') as s_f:
slice_ = pickle.load(s_f)
slice_ = slice_[e_i]
(c_slice_, n_slice_) = (slice_['c_slice'], slice_['n_slice'])
slice_idx = slice_['slice_idx']
n_path = slice_['n_path']
bname = os.path.splitext(os.path.basename(n_path))[0]
met_path = os.path.join(os.path.dirname(n_path), (bname + '.met'))
ssnr = None
pesq = None
if os.path.exists(met_path):
metrics = json.load(open(met_path, 'r'))
pesq = metrics['pesq']
ssnr = metrics['ssnr']
c_signal = self.read_wav_file(slice_['c_path'])[1]
n_signal = self.read_wav_file(slice_['n_path'])[1]
c_slice = c_signal[c_slice_[0]:c_slice_[1]]
n_slice = n_signal[n_slice_[0]:n_slice_[1]]
if (n_slice.shape[0] > c_slice.shape[0]):
n_slice = n_slice[:c_slice.shape[0]]
if (c_slice.shape[0] > n_slice.shape[0]):
c_slice = c_slice[:n_slice.shape[0]]
if (c_slice.shape[0] < self.slice_size):
pad_t = np.zeros(((self.slice_size - c_slice.shape[0]),))
c_slice = np.concatenate((c_slice, pad_t))
n_slice = np.concatenate((n_slice, pad_t))
bname = os.path.splitext(os.path.basename(n_path))[0]
return (c_slice, n_slice, pesq, ssnr, slice_idx, bname)
def __getitem__(self, index):
(c_slice, n_slice, pesq, ssnr, slice_idx, bname) = self.extract_slice(index)
rscale = random.choice(self.random_scale)
if (rscale != 1):
c_slice = (rscale * c_slice)
n_slice = (rscale * n_slice)
returns = [bname, torch.FloatTensor(c_slice), torch.FloatTensor(n_slice), slice_idx]
if (pesq is not None):
returns.append(torch.FloatTensor([pesq]))
if (ssnr is not None):
returns.append(torch.FloatTensor([ssnr]))
return returns
def __len__(self):
return len(self.idx2slice) |
def test_arrow_null_struct():
a = pyarrow.array([{'x': 1, 'y': 1.1}, None, {'x': 2, 'y': 2.2}, {'x': 3, 'y': 3.3}])
assert (to_list(ak._connect.pyarrow.handle_arrow(a)) == [{'x': 1, 'y': 1.1}, None, {'x': 2, 'y': 2.2}, {'x': 3, 'y': 3.3}]) |
def generate_bad(dims, reduce_dim, libname, reps=1):
if os.path.exists(libname):
return
size = reduce((lambda x, y: (x * y)), dims.values())
reduce_size = dims[reduce_dim]
dims_declaration = '\n'.join([('struct %s { enum { value = %d }; };' % (d, dims[d])) for d in dims])
temp_source = ('\n #include "blocks.cuh"\n \n #include <chrono>\n \n ' + dims_declaration)
for dims_permutation_in in itertools.permutations(dims):
for dims_permutation_out in itertools.permutations(dims):
in_label = ''.join(dims_permutation_in)
out_label = ''.join(dims_permutation_out)
in_layout = ', '.join(dims_permutation_in)
out_layout = ', '.join(dims_permutation_out)
layouts_declaration = ('\n using lIN = metal::list<%s>;\n using lOUT = metal::list<%s>;\n ' % (in_layout, out_layout))
func_name = ('temp_%s_%s' % (in_label, out_label))
temp_source += '\n extern "C" {{\n double {func_name}(half* IN, half* OUT, half* BIAS) {{\n \n half* gIN = nullptr;\n half* gOUT = nullptr;\n half* gBIAS = nullptr;\n \n CHECK(cudaMalloc(&gIN, {size} * sizeof(half)));\n CHECK(cudaMemcpy(gIN, IN, {size} * sizeof(half), cudaMemcpyHostToDevice));\n \n CHECK(cudaMalloc(&gBIAS, {reduce_size} * sizeof(half)));\n CHECK(cudaMemcpy(gBIAS, BIAS, {reduce_size} * sizeof(half), cudaMemcpyHostToDevice));\n \n CHECK(cudaMalloc(&gOUT, {size} * sizeof(half)));\n\n {layouts_declaration}\n \n float dropoutProbability = 0.;\n \n typedef std::chrono::high_resolution_clock Clock;\n auto t1 = Clock::now();\n for (int i = 0; i < {reps}; i++) {{\n BiasActivationDropout<lIN, lOUT, {reduce_dim}>::run(gIN, gOUT, gBIAS, dropoutProbability, (cudaStream_t)0);\n CHECK(cudaStreamSynchronize(0));\n }}\n auto t2 = Clock::now();\n \n CHECK(cudaMemcpy(OUT, gOUT, {size} * sizeof(half), cudaMemcpyDeviceToHost));\n \n CHECK(cudaFree(gIN));\n CHECK(cudaFree(gOUT));\n CHECK(cudaFree(gBIAS));\n \n return std::chrono::duration<double, std::micro>(t2 - t1).count() / {reps};\n }}\n }}\n '.format(layouts_declaration=layouts_declaration, func_name=func_name, size=size, reduce_dim=reduce_dim, reps=reps, reduce_size=reduce_size)
with open('temp.cu', 'w') as f:
f.write(temp_source)
subprocess.run('nvcc -O3 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -c --compiler-options -fPIC temp.cu -o temp.o'.split(' '))
subprocess.run('nvcc -shared -o {libname} temp.o'.format(libname=libname).split(' ')) |
_REGISTRY.register()
class VideoRecurrentTestDataset(VideoTestDataset):
def __init__(self, opt):
super(VideoRecurrentTestDataset, self).__init__(opt)
self.folders = sorted(list(set(self.data_info['folder'])))
def __getitem__(self, index):
folder = self.folders[index]
if self.cache_data:
imgs_lq = self.imgs_lq[folder]
imgs_gt = self.imgs_gt[folder]
else:
raise NotImplementedError('Without cache_data is not implemented.')
return {'lq': imgs_lq, 'gt': imgs_gt, 'folder': folder}
def __len__(self):
return len(self.folders) |
def plot_alignment_to_numpy(alignment, info=None):
(fig, ax) = plt.subplots(figsize=(6, 4))
im = ax.imshow(alignment, aspect='auto', origin='lower', interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if (info is not None):
xlabel += ('\n\n' + info)
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data |
def test_rdn():
scale = 4
model_cfg = dict(type='RDN', in_channels=3, out_channels=3, mid_channels=64, num_blocks=16, upscale_factor=scale)
model = build_backbone(model_cfg)
assert (model.__class__.__name__ == 'RDN')
inputs = torch.rand(1, 3, 32, 16)
targets = torch.rand(1, 3, 128, 64)
loss_function = nn.L1Loss()
optimizer = torch.optim.Adam(model.parameters())
output = model(inputs)
optimizer.zero_grad()
loss = loss_function(output, targets)
loss.backward()
optimizer.step()
assert torch.is_tensor(output)
assert (output.shape == targets.shape)
if torch.cuda.is_available():
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters())
inputs = inputs.cuda()
targets = targets.cuda()
output = model(inputs)
optimizer.zero_grad()
loss = loss_function(output, targets)
loss.backward()
optimizer.step()
assert torch.is_tensor(output)
assert (output.shape == targets.shape) |
class Scorer():
__metaclass__ = ABCMeta
def __init__(self, argument_string):
self._reference = None
self._arguments = {}
if argument_string:
argument_strings = argument_string.split(',')
for a in argument_strings:
(argument, value) = a.split('=')
argument = argument.strip()
value = value.strip()
try:
value = int(value)
except ValueError:
value = value
self._arguments[argument] = value
def set_reference(self, reference_tokens):
pass
def score(self, hypothesis_tokens):
return self._reference.score(hypothesis_tokens)
def score_matrix(self, hypothesis_matrix):
return self._reference.score_matrix(hypothesis_matrix) |
class WavLMConfig(PretrainedConfig):
model_type = 'wavlm'
def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-05, feat_extract_norm='group', feat_extract_activation='gelu', conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, num_buckets=320, max_bucket_distance=800, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction='mean', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, num_ctc_classes=80, pad_token_id=0, bos_token_id=1, eos_token_id=2, add_adapter=False, adapter_kernel_size=3, adapter_stride=2, num_adapter_layers=3, output_hidden_size=None, **kwargs):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_buckets = num_buckets
self.max_bucket_distance = max_bucket_distance
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.num_ctc_classes = num_ctc_classes
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.use_weighted_layer_sum = use_weighted_layer_sum
self.classifier_proj_size = classifier_proj_size
if ((len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers)):
raise ValueError(f'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.num_codevectors_per_group = num_codevectors_per_group
self.num_codevector_groups = num_codevector_groups
self.contrastive_logits_temperature = contrastive_logits_temperature
self.num_negatives = num_negatives
self.codevector_dim = codevector_dim
self.proj_codevector_dim = proj_codevector_dim
self.diversity_loss_weight = diversity_loss_weight
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
self.add_adapter = add_adapter
self.adapter_kernel_size = adapter_kernel_size
self.adapter_stride = adapter_stride
self.num_adapter_layers = num_adapter_layers
self.output_hidden_size = (output_hidden_size or hidden_size)
self.classifier_proj_size = classifier_proj_size
self.tdnn_dim = list(tdnn_dim)
self.tdnn_kernel = list(tdnn_kernel)
self.tdnn_dilation = list(tdnn_dilation)
self.xvector_output_dim = xvector_output_dim
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1) |
class VisualGoalEncoder(nn.Module):
def __init__(self, hidden_size: int, latent_goal_features: int, in_features: int, l2_normalize_goal_embeddings: bool, activation_function: str):
super().__init__()
self.l2_normalize_output = l2_normalize_goal_embeddings
self.act_fn = getattr(nn, activation_function)()
self.mlp = nn.Sequential(nn.Linear(in_features=in_features, out_features=hidden_size), self.act_fn, nn.Linear(in_features=hidden_size, out_features=hidden_size), self.act_fn, nn.Linear(in_features=hidden_size, out_features=latent_goal_features))
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.mlp(x)
if self.l2_normalize_output:
x = F.normalize(x, p=2, dim=1)
return x |
_operation
def mtimes(a: torch.Tensor, b: torch.Tensor, conj_a=False, conj_b=False):
if is_real(a):
if (a.dim() >= b.dim()):
raise ValueError('Incorrect dimensions.')
return mtimes_real_complex(a, b, conj_b=conj_b)
if is_real(b):
if (b.dim() >= a.dim()):
raise ValueError('Incorrect dimensions.')
return mtimes_complex_real(a, b, conj_a=conj_a)
if ((not conj_a) and (not conj_b)):
return complex((torch.matmul(a[(..., 0)], b[(..., 0)]) - torch.matmul(a[(..., 1)], b[(..., 1)])), (torch.matmul(a[(..., 0)], b[(..., 1)]) + torch.matmul(a[(..., 1)], b[(..., 0)])))
if (conj_a and (not conj_b)):
return complex((torch.matmul(a[(..., 0)], b[(..., 0)]) + torch.matmul(a[(..., 1)], b[(..., 1)])), (torch.matmul(a[(..., 0)], b[(..., 1)]) - torch.matmul(a[(..., 1)], b[(..., 0)])))
if ((not conj_a) and conj_b):
return complex((torch.matmul(a[(..., 0)], b[(..., 0)]) + torch.matmul(a[(..., 1)], b[(..., 1)])), (torch.matmul(a[(..., 1)], b[(..., 0)]) - torch.matmul(a[(..., 0)], b[(..., 1)])))
if (conj_a and conj_b):
return complex((torch.matmul(a[(..., 0)], b[(..., 0)]) - torch.matmul(a[(..., 1)], b[(..., 1)])), ((- torch.matmul(a[(..., 0)], b[(..., 1)])) - torch.matmul(a[(..., 1)], b[(..., 0)]))) |
class NFM(BaseModel):
def __init__(self, linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-05, l2_reg_linear=1e-05, l2_reg_dnn=0, init_std=0.0001, seed=1024, bi_dropout=0, dnn_dropout=0, dnn_activation='relu', task='binary', device='cpu'):
super(NFM, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.dnn = DNN((self.compute_input_dim(dnn_feature_columns, include_sparse=False) + self.embedding_size), dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=False, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2_reg_dnn)
self.bi_pooling = BiInteractionPooling()
self.bi_dropout = bi_dropout
if (self.bi_dropout > 0):
self.dropout = nn.Dropout(bi_dropout)
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
linear_logit = self.linear_model(X)
fm_input = torch.cat(sparse_embedding_list, dim=1)
bi_out = self.bi_pooling(fm_input)
if self.bi_dropout:
bi_out = self.dropout(bi_out)
dnn_input = combined_dnn_input([bi_out], dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
logit = (linear_logit + dnn_logit)
y_pred = self.out(logit)
return y_pred |
class DownsamplerBlock(nn.Module):
def __init__(self, ninput, noutput):
super().__init__()
self.conv = nn.Conv2d(ninput, (noutput - ninput), (3, 3), stride=2, padding=1, bias=True)
self.pool = nn.MaxPool2d(2, stride=2)
self.bn = nn.BatchNorm2d(noutput, eps=0.001)
def forward(self, input):
output = torch.cat([self.conv(input), self.pool(input)], 1)
output = self.bn(output)
return F.relu(output) |
def main():
parser = ArgumentParser()
parser.add_argument('--train_corpus', type=Path, required=True)
parser.add_argument('--output_dir', type=Path, required=True)
parser.add_argument('--bert_model', type=str, required=True, choices=['bert-base-uncased', 'bert-large-uncased', 'bert-base-cased', 'bert-base-multilingual-uncased', 'bert-base-chinese', 'bert-base-multilingual-cased'])
parser.add_argument('--do_lower_case', action='store_true')
parser.add_argument('--do_whole_word_mask', action='store_true', help='Whether to use whole word masking rather than per-WordPiece masking.')
parser.add_argument('--reduce_memory', action='store_true', help='Reduce memory usage for large datasets by keeping data on disc rather than in memory')
parser.add_argument('--num_workers', type=int, default=1, help='The number of workers to use to write the files')
parser.add_argument('--epochs_to_generate', type=int, default=3, help='Number of epochs of data to pregenerate')
parser.add_argument('--max_seq_len', type=int, default=128)
parser.add_argument('--short_seq_prob', type=float, default=0.1, help='Probability of making a short sentence as a training example')
parser.add_argument('--masked_lm_prob', type=float, default=0.15, help='Probability of masking each token for the LM task')
parser.add_argument('--max_predictions_per_seq', type=int, default=20, help='Maximum number of tokens to mask in each sequence')
args = parser.parse_args()
if ((args.num_workers > 1) and args.reduce_memory):
raise ValueError('Cannot use multiple workers while reducing memory')
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
vocab_list = list(tokenizer.vocab.keys())
with DocumentDatabase(reduce_memory=args.reduce_memory) as docs:
with args.train_corpus.open() as f:
doc = []
for line in tqdm(f, desc='Loading Dataset', unit=' lines'):
line = line.strip()
if (line == ''):
docs.add_document(doc)
doc = []
else:
tokens = tokenizer.tokenize(line)
if tokens:
doc.append(tokens)
if doc:
docs.add_document(doc)
if (len(docs) <= 1):
exit('ERROR: No document breaks were found in the input file! These are necessary to allow the script to ensure that random NextSentences are not sampled from the same document. Please add blank lines to indicate breaks between documents in your input file. If your dataset does not contain multiple documents, blank lines can be inserted at any natural boundary, such as the ends of chapters, sections or paragraphs.')
args.output_dir.mkdir(exist_ok=True)
if (args.num_workers > 1):
writer_workers = Pool(min(args.num_workers, args.epochs_to_generate))
arguments = [(docs, vocab_list, args, idx) for idx in range(args.epochs_to_generate)]
writer_workers.starmap(create_training_file, arguments)
else:
for epoch in trange(args.epochs_to_generate, desc='Epoch'):
create_training_file(docs, vocab_list, args, epoch) |
class WarmupSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, learnrate, warmup_steps):
super().__init__()
self.learnrate = learnrate
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = self.learnrate
arg2 = ((step * self.learnrate) / float(self.warmup_steps))
return tf.math.minimum(arg1, arg2) |
class KeyCheckerDict():
def __init__(self, children):
self.children = children
def __getitem__(self, key):
return self.children[key]
def __setitem__(self, key, value):
self.children[key] = value
def verify(self, obj):
for (k, v) in self.children.items():
self._verify_prop(getattr(obj, k, None), k, v)
def _verify_prop(self, obj, obj_name, s):
val = (lambda x: (x(s, self.children) if callable(x) else x))
if (s.warn_empty and (obj in [None, {}])):
c_f.LOGGER.warning(('%s is empty' % obj_name))
if (obj is not None):
keys = val(s.keys)
for k in obj.keys():
assert any((pattern.match(k) for pattern in c_f.regex_wrapper(keys))), ('%s keys must be one of %s' % (obj_name, ', '.join(keys)))
for imp_key in val(s.important):
if (not any((c_f.regex_wrapper(imp_key).match(k) for k in obj))):
c_f.LOGGER.warning(('%s is missing "%s"' % (obj_name, imp_key)))
for ess_key in val(s.essential):
assert any((c_f.regex_wrapper(ess_key).match(k) for k in obj)), ('%s must contain "%s"' % (obj_name, ess_key)) |
class MLQALanguage():
def __init__(self, articles_regex_pattern: Optional[re.Pattern]=None):
self.articles_regex_pattern = articles_regex_pattern
def tokenize(self, text: str):
return whitespace_tokenize(text)
def from_code(cls, code: str):
code_to_language = {'en': English, 'es': Spanish, 'hi': Hindi, 'vi': Vietnamese, 'de': German, 'ar': Arabic, 'zh': Chinese}
if (code not in code_to_language):
return MLQALanguage()
return code_to_language[code]() |
class AutoProphet(ICAutoMLForecaster, SeasonalityLayer):
config_class = AutoProphetConfig
def supports_exog(self):
return True
def generate_theta(self, train_data: TimeSeries) -> Iterator:
seas = list(super().generate_theta(train_data))
modes = ['additive', 'multiplicative']
return iter(GridSearch(param_values=OrderedDict(seas=[seas], seasonality_mode=modes)))
def set_theta(self, model, theta, train_data: TimeSeries=None):
(seasonalities, seasonality_mode) = (theta['seas'], theta['seasonality_mode'])
(seasonalities, _, _) = SeasonalityLayer.evaluate_theta(self, thetas=iter(seasonalities), train_data=train_data)
SeasonalityLayer.set_theta(self, model=model, theta=seasonalities, train_data=train_data)
model.base_model.config.seasonality_mode = seasonality_mode
model.base_model.model.seasonality_mode = seasonality_mode
def _model_name(self, theta) -> str:
return f"Prophet({','.join((f'{k}={v}' for (k, v) in theta.items()))})"
def get_ic(self, model, train_data: pd.DataFrame, train_result: Tuple[(pd.DataFrame, pd.DataFrame)]) -> float:
(pred, stderr) = train_result
n = len(train_data)
log_like = norm.logpdf(((pred.values - train_data.values) / stderr.values)).sum()
n_params = sum((len(v.flatten()) for (k, v) in model.base_model.model.params.items() if (k != 'trend')))
ic_id = self.config.information_criterion
if (ic_id is InformationCriterion.AIC):
return ((2 * n_params) - (2 * log_like.sum()))
elif (ic_id is InformationCriterion.BIC):
return ((n_params * np.log(n)) - (2 * log_like))
elif (ic_id is InformationCriterion.AICc):
return (((2 * n_params) - (2 * log_like)) + (((2 * n_params) * (n_params + 1)) / max(1, ((n - n_params) - 1))))
else:
raise ValueError(f"{type(self.model).__name__} doesn't support information criterion {ic_id.name}") |
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module)]]:
if (prefix is None):
prefix = type(module).__name__
for (name, sub_module) in module.named_children():
scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')
if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)):
if full:
(yield (sub_module, scope, module, True))
else:
(yield (sub_module, scope, module))
else:
if full:
(yield (sub_module, scope, module, False))
(yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full)) |
def _iterator_size(size, length=None, alphabet=None):
if alphabet:
min_p = min(alphabet)
max_p = max(alphabet)
for alpha in IntegerListsLex(size, length=length, min_part=1, max_part=min(size, sum(alphabet))):
for p in product(*[IntegerListsLex(a, min_slope=1, min_part=min_p, max_part=min(a, max_p)) for a in alpha]):
if frozenset(_concatenate(p)).issubset(frozenset(alphabet)):
(yield tuple((frozenset(k) for k in p)))
else:
for alpha in IntegerListsLex(size, length=length, min_part=1, max_part=size):
for p in product(*[IntegerListsLex(a, min_slope=1, min_part=1) for a in alpha]):
(yield tuple((frozenset(k) for k in p))) |
def get_path(g, src, dst):
try:
path = nx.shortest_path(g, src, dst)
except NetworkXNoPath:
try:
if verbose:
print('Warning: trying inverse path.')
path = nx.shortest_path(g, dst, src)
except NetworkXNoPath:
return NO_PATH
labeled_path = []
for (i, src) in enumerate(path):
dst_index = (i + 1)
if (dst_index >= len(path)):
break
dst = path[dst_index]
labeled_path.append((g.nodes[src]['label'], g[src][dst]['label'], g.nodes[dst]['label']))
words = '-->'.join([s for (s, t, d) in labeled_path])
deps = '-->'.join([t for (s, t, d) in labeled_path])
deps_n_words = ':'.join([('%s--%s--%s' % (s, t, d)) for (s, t, d) in labeled_path])
return (words, deps, deps_n_words) |
class Timer():
def __init__(self):
self.start = time.process_time()
def reset(self):
self.start = time.process_time()
def elapsed(self):
return (time.process_time() - self.start) |
def apply_mutation(base_seq, mut_pos, mut_res, op_type, tokenizer):
tokens = tokenizer.decode(tokenizer.encode(base_seq)).split(' ')[1:(- 1)]
if (op_type == 'sub'):
mut_seq = ''.join(((tokens[:mut_pos] + [mut_res]) + tokens[(mut_pos + 1):]))
elif (op_type == 'ins'):
mut_seq = ''.join(((tokens[:mut_pos] + [mut_res]) + tokens[mut_pos:]))
elif (op_type == 'del'):
mut_seq = ''.join((tokens[:mut_pos] + tokens[(mut_pos + 1):]))
else:
raise ValueError('unsupported operation')
return mut_seq |
def load_model(config, num_train_steps, label_list, pretrain=None):
device = torch.device('cuda')
n_gpu = torch.cuda.device_count()
if pretrain:
model = BertMRCNER_CLUSTER(config)
pretrained_dict = torch.load((pretrain + 'bert_finetune_model.bin'))
model_dict = model.state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
model.to(device)
if (n_gpu > 1):
model = torch.nn.DataParallel(model)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = BertAdam(optimizer_grouped_parameters, lr=config.learning_rate, warmup=config.warmup, t_total=num_train_steps, max_grad_norm=config.clip_grad)
return (model, optimizer, device, n_gpu) |
def calc_total_sphere_msd(initial_location, initial_rot_matrix, location, rot_matrix):
dx = (np.array(location) - np.array(initial_location))
u_hat = np.zeros(3)
for i in range(3):
e = np.zeros(3)
e[i] = 1.0
u_hat += (0.5 * np.cross(np.inner(initial_rot_matrix, e), np.inner(rot_matrix, e)))
displacement = np.concatenate([dx, u_hat])
return np.outer(displacement, displacement) |
class Model(object):
def __init__(self, mode, x_input):
self.mode = mode
self.x_input = x_input
self._build_model()
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
return [1, stride, stride, 1]
def _build_model(self):
assert ((self.mode == 'train') or (self.mode == 'eval'))
with tf.variable_scope('input'):
self.y_input = tf.placeholder(tf.int64, shape=None)
input_standardized = tf.map_fn((lambda img: tf.image.per_image_standardization(img)), self.x_input)
x = self._conv('init_conv', input_standardized, 3, 3, 16, self._stride_arr(1))
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
res_func = self._residual
filters = [16, 160, 320, 640]
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]), activate_before_residual[0])
for i in range(1, 5):
with tf.variable_scope(('unit_1_%d' % i)):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]), activate_before_residual[1])
for i in range(1, 5):
with tf.variable_scope(('unit_2_%d' % i)):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]), activate_before_residual[2])
for i in range(1, 5):
with tf.variable_scope(('unit_3_%d' % i)):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, 0.1)
x = self._global_avg_pool(x)
with tf.variable_scope('logit'):
self.pre_softmax = self._fully_connected(x, 10)
self.predictions = tf.argmax(self.pre_softmax, 1)
self.correct_prediction = tf.equal(self.predictions, self.y_input)
self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
with tf.variable_scope('costs'):
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
self.mean_xent = tf.reduce_mean(self.y_xent)
self.weight_decay_loss = self._decay()
def _batch_norm(self, name, x):
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(inputs=x, decay=0.9, center=True, scale=True, activation_fn=None, updates_collections=None, is_training=(self.mode == 'train'))
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, 0.1)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if (in_filter != out_filter):
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [((out_filter - in_filter) // 2), ((out_filter - in_filter) // 2)]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay(self):
costs = []
for var in tf.trainable_variables():
if (var.op.name.find('DW') > 0):
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
with tf.variable_scope(name):
n = ((filter_size * filter_size) * out_filters)
kernel = tf.get_variable('DW', [filter_size, filter_size, in_filters, out_filters], tf.float32, initializer=tf.random_normal_initializer(stddev=np.sqrt((2.0 / n))))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
return tf.where(tf.less(x, 0.0), (leakiness * x), x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range((num_non_batch_dimensions - 1)):
prod_non_batch_dimensions *= int(x.shape[(ii + 1)])
x = tf.reshape(x, [tf.shape(x)[0], (- 1)])
w = tf.get_variable('DW', [prod_non_batch_dimensions, out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert (x.get_shape().ndims == 4)
return tf.reduce_mean(x, [1, 2]) |
class QDQBertModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class FaceDataset(Dataset):
def __init__(self, istraining=True, args=None, transform=None):
self.transform = transform
self.istraining = istraining
self.args = args
self.metas = []
if istraining:
with open(args.train_list) as f:
lines = f.readlines()
print(('building dataset from %s' % args.train_list))
self.num = len(lines)
for line in lines:
(path, cls) = line.rstrip().split()
self.metas.append((((args.train_root + '/') + path), int(cls)))
else:
with open(args.probe_list) as f:
lines = f.readlines()
print(('building dataset from %s' % args.probe_list))
for line in lines:
path = line.rstrip()
self.metas.append((((args.probe_root + '/') + path), 0))
with open(args.distractor_list) as f:
lines = f.readlines()
print(('building dataset from %s' % args.distractor_list))
for line in lines:
path = line.rstrip()
self.metas.append((((args.distractor_root + '/') + path), 0))
self.num = len(self.metas)
self.initialized = False
def __len__(self):
return self.num
def __getitem__(self, idx):
filename = self.metas[idx][0]
cls = self.metas[idx][1]
img = pil_loader(filename)
if (self.transform is not None):
img = self.transform(img)
return (img, cls) |
class MBInvertedConvLayer(BasicUnit):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, expand_ratio=6):
super(MBInvertedConvLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.expand_ratio = expand_ratio
if (self.expand_ratio > 1):
feature_dim = round((in_channels * self.expand_ratio))
self.inverted_bottleneck = nn.Sequential(OrderedDict([('conv', nn.Conv2d(in_channels, feature_dim, 1, 1, 0, bias=False)), ('bn', nn.BatchNorm2d(feature_dim)), ('relu', nn.ReLU6(inplace=True))]))
else:
feature_dim = in_channels
self.inverted_bottleneck = None
pad = get_same_padding(self.kernel_size)
self.depth_conv = nn.Sequential(OrderedDict([('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=feature_dim, bias=False)), ('bn', nn.BatchNorm2d(feature_dim)), ('relu', nn.ReLU6(inplace=True))]))
self.point_linear = OrderedDict([('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)), ('bn', nn.BatchNorm2d(out_channels))])
self.point_linear = nn.Sequential(self.point_linear)
def forward(self, x):
if self.inverted_bottleneck:
x = self.inverted_bottleneck(x)
x = self.depth_conv(x)
x = self.point_linear(x)
return x
def unit_str(self):
unit_str = ('%dx%d_MBConv%d' % (self.kernel_size, self.kernel_size, self.expand_ratio))
return unit_str
def config(self):
return {'name': MBInvertedConvLayer.__name__, 'in_channels': self.in_channels, 'out_channels': self.out_channels, 'kernel_size': self.kernel_size, 'stride': self.stride, 'expand_ratio': self.expand_ratio}
def build_from_config(config):
return MBInvertedConvLayer(**config)
def get_flops(self, x):
if self.inverted_bottleneck:
flop1 = count_conv_flop(self.inverted_bottleneck.conv, x)
x = self.inverted_bottleneck(x)
else:
flop1 = 0
flop2 = count_conv_flop(self.depth_conv.conv, x)
x = self.depth_conv(x)
flop3 = count_conv_flop(self.point_linear.conv, x)
x = self.point_linear(x)
return (((flop1 + flop2) + flop3), x)
def is_zero_layer():
return False |
def rot_y(beta):
return torch.tensor([[cos(beta), 0, sin(beta)], [0, 1, 0], [(- sin(beta)), 0, cos(beta)]], dtype=beta.dtype) |
def _get_uplift_curve(y_treatment: np.ndarray, y_control: np.ndarray, n_treatment: np.ndarray, n_control: np.ndarray, mode: str):
assert (mode in _available_uplift_modes), "Mode isn't available"
if (mode == 'qini'):
curve_values = ((y_treatment / n_treatment[(- 1)]) - (y_control / n_control[(- 1)]))
elif (mode == 'cum_gain'):
treatment_target_rate = np.nan_to_num((y_treatment / n_treatment), 0.0)
control_target_rate = np.nan_to_num((y_control / n_control), 0.0)
curve_values = (treatment_target_rate - control_target_rate)
n_join = (n_treatment + n_control)
curve_values = ((curve_values * n_join) / n_join[(- 1)])
elif (mode == 'adj_qini'):
normed_factor = np.nan_to_num((n_treatment / n_control), 0.0)
normed_y_control = (y_control * normed_factor)
curve_values = ((y_treatment - normed_y_control) / n_treatment[(- 1)])
return curve_values |
def qiskit_info_original(file_name, new_file_name):
original_circ = QuantumCircuit.from_qasm_file(file_name)
original_depth = original_circ.depth()
original_gate_count = original_circ.count_ops()
print('depth =', original_depth)
print('gate count =', original_gate_count)
with open((new_file_name + '.txt'), 'w') as file:
file.write('before:\n')
file.write((('depth ' + str(original_depth)) + '\n'))
for (type, count) in original_gate_count.items():
file.write((((type + ' ') + str(count)) + '\n'))
file.close()
original_cx_count = original_gate_count['cx']
return [original_depth, original_cx_count] |
def create_AP_plot(axis, data_to_plot, accept_classes, max_depth):
if ('AP_per_depth' not in data_to_plot):
raise ValueError()
axis.set_title('AP per depth')
axis.set_ylim([0, 1.01])
axis.set_ylabel('AP')
for label in accept_classes:
aps = data_to_plot['AP_per_depth'][label]
x_vals = [float(x) for x in list(aps.keys())]
y_vals = [float(x['auc']) for x in list(aps.values())]
fill_standard_subplot(axis, x_vals, y_vals, label, [], max_depth) |
def register_model_metadata_from_path(path: str) -> None:
with open(path, 'r') as f:
raw = yaml.safe_load(f)
model_metadata_list = dacite.from_dict(ModelMetadataList, raw)
for model_metadata in model_metadata_list.models:
register_model_metadata(model_metadata) |
class GAEASearchTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
self.download_directory = self.download_data_from_s3()
dataset_hypers = {'ECG': (4, 1), 'satellite': (24, 1), 'deepsea': (36, 4)}
(n_classes, in_channels) = dataset_hypers[self.hparams.task]
if (self.hparams.task == 'deepsea'):
criterion = nn.BCEWithLogitsLoss().cuda()
self.accuracy = False
else:
criterion = nn.CrossEntropyLoss().cuda()
self.accuracy = True
self.model = self.context.wrap_model(Network(self.hparams.init_channels, n_classes, self.hparams.layers, criterion, self.hparams.nodes, k=self.hparams.shuffle_factor, in_channels=in_channels))
total_params = (sum((p.numel() for p in self.model.parameters() if p.requires_grad)) / 1000000.0)
print('Parameter size in MB: ', total_params)
self.ws_opt = self.context.wrap_optimizer(torch.optim.SGD(self.model.ws_parameters(), self.hparams.learning_rate, momentum=self.hparams.momentum, weight_decay=self.hparams.weight_decay))
self.arch_opt = self.context.wrap_optimizer(EG(self.model.arch_parameters(), self.hparams.arch_learning_rate, (lambda p: (p / p.sum(dim=(- 1), keepdim=True)))))
self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=CosineAnnealingLR(self.ws_opt, self.hparams.scheduler_epochs, self.hparams.min_learning_rate), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH)
def download_data_from_s3(self):
s3_bucket = self.context.get_data_config()['bucket']
download_directory = f'/tmp/data-rank{self.context.distributed.get_rank()}'
s3 = boto3.client('s3')
os.makedirs(download_directory, exist_ok=True)
download_from_s3(s3_bucket, self.hparams.task, download_directory)
(self.train_data, self.val_data, _) = load_data(self.hparams.task, download_directory, True)
return download_directory
def build_training_data_loader(self) -> DataLoader:
bilevel = BilevelDataset(self.train_data)
self.train_data = bilevel
print('Length of bilevel dataset: ', len(bilevel))
return DataLoader(bilevel, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, num_workers=2)
def build_validation_data_loader(self) -> DataLoader:
valset = self.val_data
return DataLoader(valset, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, num_workers=2)
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]:
if (epoch_idx != self.last_epoch):
self.train_data.shuffle_val_inds()
self.last_epoch = epoch_idx
(x_train, y_train, x_val, y_val) = batch
if (self.hparams.task == 'deepsea'):
y_train = y_train.float()
y_val = y_val.float()
for a in self.model.arch_parameters():
a.requires_grad = False
for w in self.model.ws_parameters():
w.requires_grad = True
loss = self.model._loss(x_train, y_train)
self.context.backward(loss)
self.context.step_optimizer(optimizer=self.ws_opt, clip_grads=(lambda params: torch.nn.utils.clip_grad_norm_(params, self.context.get_hparam('clip_gradients_l2_norm'))))
arch_loss = 0.0
if (epoch_idx > 10):
for a in self.model.arch_parameters():
a.requires_grad = True
for w in self.model.ws_parameters():
w.requires_grad = False
arch_loss = self.model._loss(x_val, y_val)
self.context.backward(arch_loss)
self.context.step_optimizer(self.arch_opt)
return {'loss': loss, 'arch_loss': arch_loss}
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
if (self.hparams.task == 'ECG'):
return self.evaluate_full_dataset_ECG(data_loader)
elif (self.hparams.task == 'satellite'):
return self.evaluate_full_dataset_satellite(data_loader)
elif (self.hparams.task == 'deepsea'):
return self.evaluate_full_dataset_deepsea(data_loader)
return None
def evaluate_full_dataset_ECG(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
loss_avg = AverageMeter()
all_pred_prob = []
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
loss = self.model._loss(input, target)
loss_avg.update(loss, n)
all_pred_prob.append(logits.cpu().data.numpy())
all_pred_prob = np.concatenate(all_pred_prob)
all_pred = np.argmax(all_pred_prob, axis=1)
final_pred = []
final_gt = []
pid_test = self.val_data.pid
for i_pid in np.unique(pid_test):
tmp_pred = all_pred[(pid_test == i_pid)]
tmp_gt = self.val_data.label[(pid_test == i_pid)]
final_pred.append(Counter(tmp_pred).most_common(1)[0][0])
final_gt.append(Counter(tmp_gt).most_common(1)[0][0])
tmp_report = classification_report(final_gt, final_pred, output_dict=True)
print(confusion_matrix(final_gt, final_pred))
f1_score = ((((tmp_report['0']['f1-score'] + tmp_report['1']['f1-score']) + tmp_report['2']['f1-score']) + tmp_report['3']['f1-score']) / 4)
results = {'loss': loss_avg.avg, 'score': f1_score}
return results
def evaluate_full_dataset_satellite(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
acc_top1 = AverageMeter()
acc_top5 = AverageMeter()
loss_avg = AverageMeter()
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
loss = self.model._loss(input, target)
(top1, top5) = accuracy(logits, target, topk=(1, 5))
acc_top1.update(top1.item(), n)
acc_top5.update(top5.item(), n)
loss_avg.update(loss, n)
results = {'loss': loss_avg.avg, 'top1_accuracy': acc_top1.avg, 'top5_accuracy': acc_top5.avg}
return results
def evaluate_full_dataset_deepsea(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]:
loss_avg = AverageMeter()
test_predictions = []
test_gts = []
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
(input, target) = batch
n = input.size(0)
logits = self.model(input)
loss = self.model._loss(input, target.float())
loss_avg.update(loss, n)
logits_sigmoid = torch.sigmoid(logits)
test_predictions.append(logits_sigmoid.detach().cpu().numpy())
test_gts.append(target.detach().cpu().numpy())
test_predictions = np.concatenate(test_predictions).astype(np.float32)
test_gts = np.concatenate(test_gts).astype(np.int32)
stats = calculate_stats(test_predictions, test_gts)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
results = {'test_mAUC': mAUC, 'test_mAP': mAP}
return results
def build_callbacks(self):
return {'genotype': GenotypeCallback(self.context)} |
(scope='module')
def comments_tree() -> astroid.Module:
module = importlib.import_module('tests.fixtures.cluster.comments')
return astroid.parse(inspect.getsource(module), path='comments.py') |
class ContinuedFraction_infinite(ContinuedFraction_base):
def __init__(self, w, value=None, check=True):
ContinuedFraction_base.__init__(self)
self._w = w
if check:
for i in range(10):
k = w[i]
if (not isinstance(k, Integer)):
try:
k = Integer(w[i])
except (TypeError, ValueError):
raise ValueError('the sequence must consist of integers')
self.quotient = self._Integer_quotient
if ((not k) and i):
raise ValueError('only the first partial quotient can be null')
if (check and (value is not None)):
from sage.rings.real_mpfi import RealIntervalField
R = RealIntervalField(53)
x = R(value)
y = R(self)
if ((x.lower() > y.lower()) or (x.upper() < y.upper())):
raise ValueError(('value evaluates to %s while the continued fraction evaluates to %s in %s.' % (x, y, R)))
self._value = value
def _repr_(self):
return (((('[' + str(self._w[0])) + '; ') + ', '.join(map(str, self._w[1:20]))) + '...]')
def length(self):
return Infinity
def quotient(self, n):
return self._w[n]
def quotients(self):
return self._w
def _Integer_quotient(self, n):
return Integer(self._w[n])
def value(self):
if (self._value is not None):
return self._value
else:
from sage.rings.real_lazy import RLF
if (self._w[0] < 0):
return (- RLF((- self)))
return RLF(self)
def __neg__(self):
from sage.combinat.words.word import Word
_w = self._w
if (_w[1] == 1):
_w = Word((((- _w[0]) - 1), (_w[2] + 1))).concatenate(Word(_w[3:]))
else:
_w = Word((((- _w[0]) - 1), ZZ_1, (_w[1] - 1))).concatenate(Word(_w[2:]))
return self.__class__(_w) |
def louvain_animation(adj_matrix, frames, dark=False, duration=15, filename=None, dpi=None, seed=2):
anim = Animation(adj_matrix, frames, seed, dark)
return anim.show(duration, filename, dpi) |
def save_quiver_data(n, up_to=True, types='ClassicalExceptional', verbose=True):
from sage.combinat.cluster_algebra_quiver.mutation_type import load_data
if (up_to is True):
ranks = range(1, (n + 1))
elif (up_to is False):
ranks = [n]
for i in ranks:
_save_data_dig6(i, types=types, verbose=verbose)
load_data.clear_cache() |
def cached_tally_directory(directory, size=10000, cachedir=None, seed=1):
filename = ('%s_segtally_%d.npy' % (directory, size))
if (seed != 1):
filename = ('%d_%s' % (seed, filename))
if (cachedir is not None):
filename = os.path.join(cachedir, filename.replace('/', '_'))
if (os.path.isfile(filename) and ((not directory.endswith('.npz')) or ('gt' in directory))):
return numpy.load(filename)
os.makedirs(cachedir, exist_ok=True)
result = tally_directory(directory, size, seed=seed)
numpy.save(filename, result)
return result |
def mlp_block(x: tf.Tensor, filters: int, name: str) -> tf.Tensor:
x = layers.Dense(filters, name=f'{name}_dense')(x)
x = layers.BatchNormalization(momentum=0.0, name=f'{name}_batch_norm')(x)
return layers.Activation('relu', name=f'{name}_relu')(x) |
class LukeConfig(PretrainedConfig):
model_type = 'luke'
def __init__(self, vocab_size=50267, entity_vocab_size=500000, hidden_size=768, entity_emb_size=256, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_entity_aware_attention=True, classifier_dropout=None, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.entity_vocab_size = entity_vocab_size
self.hidden_size = hidden_size
self.entity_emb_size = entity_emb_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_entity_aware_attention = use_entity_aware_attention
self.classifier_dropout = classifier_dropout |
def fc_prune(model, blob_in, blob_out, dim_in, dim_out, weight_init=None, bias_init=None, mask_init=None, threshold=1e-05, need_compress_rate=False, comp_lb=0.05, **kwargs):
weight_init = (weight_init if weight_init else ('XavierFill', {}))
bias_init = (bias_init if bias_init else ('ConstantFill', {}))
mask_init = (mask_init if mask_init else ('ConstantFill', {}))
blob_out = (blob_out or model.net.NextName())
compress_rate = (blob_out + '_compress_rate')
if model.init_params:
compress_lb = model.param_init_net.ConstantFill([], (blob_out + '_lb'), shape=[1], value=comp_lb)
weight = model.param_init_net.__getattr__(weight_init[0])([], (blob_out + '_w'), shape=[dim_out, dim_in], **weight_init[1])
mask = model.param_init_net.ConstantFill([], (blob_out + '_m'), shape=[dim_out, dim_in], value=1.0)
ag_dw = model.param_init_net.__getattr__(mask_init[0])([], (blob_out + '_ag_dw'), shape=[dim_out, dim_in], **mask_init[1])
bias = model.param_init_net.__getattr__(bias_init[0])([], (blob_out + '_b'), shape=[dim_out], **bias_init[1])
mask_seq = model.param_init_net.__getattr__(mask_init[0])([], (blob_out + '_mask_seq'), shape=[dim_out, dim_in], **mask_init[1])
thres = model.param_init_net.ConstantFill([], (blob_out + '_thres'), shape=[1], value=threshold)
else:
compress_lb = core.ScopedBlobReference((blob_out + '_lb'), model.param_init_net)
weight = core.ScopedBlobReference((blob_out + '_w'), model.param_init_net)
bias = core.ScopedBlobReference((blob_out + '_b'), model.param_init_net)
mask = core.ScopedBlobReference((blob_out + '_m'), model.param_init_net)
ag_dw = core.ScopedBlobReference((blob_out + '_ag_dw'), model.param_init_net)
mask_seq = core.ScopedBlobReference((blob_out + '_mask_seq'), model.param_init_net)
thres = core.ScopedBlobReference((blob_out + '_thres'), model.param_init_net)
model.AddParameter(weight)
model.AddParameter(bias)
if need_compress_rate:
return model.net.FC_Prune([blob_in, weight, mask, bias, ag_dw, mask_seq, thres, compress_lb], [blob_out, compress_rate], **kwargs)
else:
return model.net.FC_Prune([blob_in, weight, mask, bias, ag_dw, mask_seq, thres, compress_lb], blob_out, **kwargs) |
()
('--num_epochs', default=1000)
('--num_train_tasks', default=10)
('--num_test_tasks', default=5)
('--encoder_hidden_size', default=200)
('--net_size', default=300)
('--num_steps_per_epoch', default=4000)
('--num_initial_steps', default=4000)
('--num_steps_prior', default=750)
('--num_extra_rl_steps_posterior', default=750)
('--batch_size', default=256)
('--embedding_batch_size', default=64)
('--embedding_mini_batch_size', default=64)
('--max_path_length', default=150)
_experiment
def pearl_metaworld_ml10(ctxt=None, seed=1, num_epochs=1000, num_train_tasks=10, num_test_tasks=5, latent_size=7, encoder_hidden_size=200, net_size=300, meta_batch_size=16, num_steps_per_epoch=4000, num_initial_steps=4000, num_tasks_sample=15, num_steps_prior=750, num_extra_rl_steps_posterior=750, batch_size=256, embedding_batch_size=64, embedding_mini_batch_size=64, max_path_length=150, reward_scale=10.0, use_gpu=False):
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size, encoder_hidden_size)
ML_train_envs = [GarageEnv(normalize(mwb.ML10.from_task(task_name))) for task_name in mwb.ML10.get_train_tasks().all_task_names]
ML_test_envs = [GarageEnv(normalize(mwb.ML10.from_task(task_name))) for task_name in mwb.ML10.get_test_tasks().all_task_names]
env_sampler = EnvPoolSampler(ML_train_envs)
env_sampler.grow_pool(num_train_tasks)
env = env_sampler.sample(num_train_tasks)
test_env_sampler = EnvPoolSampler(ML_test_envs)
test_env_sampler.grow_pool(num_test_tasks)
runner = LocalRunner(ctxt)
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env, hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(env=env, policy_class=ContextConditionedPolicy, encoder_class=MLPEncoder, inner_policy=inner_policy, qf=qf, vf=vf, num_train_tasks=num_train_tasks, num_test_tasks=num_test_tasks, latent_dim=latent_size, encoder_hidden_sizes=encoder_hidden_sizes, test_env_sampler=test_env_sampler, meta_batch_size=meta_batch_size, num_steps_per_epoch=num_steps_per_epoch, num_initial_steps=num_initial_steps, num_tasks_sample=num_tasks_sample, num_steps_prior=num_steps_prior, num_extra_rl_steps_posterior=num_extra_rl_steps_posterior, batch_size=batch_size, embedding_batch_size=embedding_batch_size, embedding_mini_batch_size=embedding_mini_batch_size, max_path_length=max_path_length, reward_scale=reward_scale)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
runner.setup(algo=pearl, env=env[0](), sampler_cls=LocalSampler, sampler_args=dict(max_path_length=max_path_length), n_workers=1, worker_class=PEARLWorker)
runner.train(n_epochs=num_epochs, batch_size=batch_size) |
def kaiming_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, init_gain: float=0, fan_mode: str='fan_in', init_nonlinearity: str='leaky_relu', verbose: int=0, **kwargs):
del kwargs
if (verbose > 1):
warnings.warn(('Using nn.init.kaiming_normal_ init fn with parameters: ' + f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}'))
kaiming_normal_ = partial(torch.nn.init.kaiming_normal_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity)
generic_param_init_fn_(module=module, init_fn_=kaiming_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose) |
def get_profiling_event(postfix, profiler):
event_list = (profiler.events() if isinstance(profiler, torch.profiler.profile) else profiler.function_events)
return [event for event in event_list if event.name.endswith(postfix)] |
('data.dsprites', 'class')
class DSpritesData(base.ImageTfdsData):
def __init__(self, predicted_attribute, num_classes=None, data_dir=None):
dataset_builder = tfds.builder('dsprites:2.*.*', data_dir=data_dir)
dataset_builder.download_and_prepare()
info = dataset_builder.info
if (predicted_attribute not in dataset_builder.info.features):
raise ValueError('{} is not a valid attribute to predict.'.format(predicted_attribute))
num_original_classes = info.features[predicted_attribute].num_classes
if (num_classes is None):
num_classes = num_original_classes
if ((not isinstance(num_classes, int)) or (num_classes <= 1) or (num_classes > num_original_classes)):
raise ValueError('The number of classes should be None or in [2, ..., num_classes].')
class_division_factor = (float(num_original_classes) / num_classes)
num_total = dataset_builder.info.splits['train'].num_examples
num_samples_train = ((TRAIN_SPLIT_PERCENT * num_total) // 100)
num_samples_val = ((VAL_SPLIT_PERCENT * num_total) // 100)
num_samples_splits = {'train': num_samples_train, 'val': num_samples_val, 'trainval': (num_samples_val + num_samples_train), 'test': ((num_total - num_samples_val) - num_samples_train), 'train800': 800, 'val200': 200, 'train800val200': 1000}
tfds_splits = {'train': 'train[:{}]'.format(num_samples_splits['train']), 'val': 'train[{}:{}]'.format(num_samples_splits['train'], num_samples_splits['trainval']), 'trainval': 'train[:{}]'.format(num_samples_splits['trainval']), 'test': 'train[{}:]'.format(num_samples_splits['trainval']), 'train800': 'train[:800]', 'val200': 'train[{}:{}]'.format(num_samples_splits['train'], (num_samples_splits['train'] + 200)), 'train800val200': 'train[:800]+train[{}:{}]'.format(num_samples_splits['train'], (num_samples_splits['train'] + 200))}
def preprocess_fn(tensors):
images = (tf.tile(tensors['image'], [1, 1, 3]) * 255)
label = tf.cast(tf.math.floordiv(tf.cast(tensors[predicted_attribute], tf.float32), class_division_factor), info.features[predicted_attribute].dtype)
return dict(image=images, label=label)
super(DSpritesData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=preprocess_fn, num_classes=num_classes) |
class NewT5HFLoader(HFLoader):
def __init__(self, hf_transformers_model_class=T5ForConditionalGeneration):
super().__init__(hf_transformers_model_class=hf_transformers_model_class)
def substitue_state_dict_keys_back_to_original(self, training_state_dict):
d = dict()
for (k, v) in training_state_dict.items():
d[k] = v
if ('shared_embed_weight' in d):
w = d.pop('shared_embed_weight')
d['shared.weight'] = d['encoder.embed_tokens.weight'] = d['decoder.embed_tokens.weight'] = w
return d |
class ContTransformerChain(nn.Module):
def __init__(self, x, tfms):
super().__init__()
self.tfms = []
for tf in tfms:
itf = tf(x)
x = itf.forward(x)
self.tfms += [itf]
def forward(self, x):
for tf in self.tfms:
x = tf.forward(x)
return x
def invert(self, x):
for tf in reversed(self.tfms):
x = tf.invert(x)
return x.float() |
class LRFinder(Callback):
def __init__(self, start_lr=1e-06, end_lr=10):
super(LRFinder, self).__init__()
(self.start_lr, self.end_lr) = (start_lr, end_lr)
self.stop = False
self.best_loss = 0.0
self.best_lr = None
self.loss_history = []
self.smooth_value = SmoothValue(0.8)
self.opt = None
self.find = None
def lr_gen(self):
scale = ((self.end_lr - self.start_lr) / self.batch_per_epoch)
return ((self.start_lr + (scale * (step + 1))) for step in range(self.batch_per_epoch))
def num_it(self):
return self.batch_per_epoch
def on_epoch_begin(self):
if (self.epoch == 1):
self.opt = self.trainer.optimizer
self.opt.param_groups[0]['lr'] = self.start_lr
torch.save(self.model.state_dict(), 'tmp')
self.find = True
def on_backward_begin(self, loss):
if self.find:
if (torch.isnan(loss) or (self.stop is True)):
self.stop = True
return
loss_val = loss.detach().mean().item()
self.loss_history.append(loss_val)
self.smooth_value.add_value(loss_val)
if ((self.best_loss == 0.0) or (self.smooth_value.smooth < self.best_loss)):
self.best_loss = self.smooth_value.smooth
self.best_lr = self.opt.param_groups[0]['lr']
def on_batch_end(self, *args):
if self.find:
lr = next(self.lr_gen, None)
if ((lr is None) or (self.stop is True) or (self.loss_history[(- 1)] > (4 * self.best_loss))):
self.stop = True
return
self.opt.param_groups[0]['lr'] = lr
def on_epoch_end(self):
if (self.epoch == 1):
self.opt.param_groups[0]['lr'] = self.best_lr
self.find = False
states = torch.load('tmp')
self.model.load_state_dict(states)
os.remove('tmp')
self.pbar.write('Model reset. \nFind best lr={}'.format(self.best_lr)) |
def data_load_and_process(dataset, classes=[0, 1], feature_reduction='resize256', binary=True):
if (dataset == 'fashion_mnist'):
((x_train, y_train), (x_test, y_test)) = tf.keras.datasets.fashion_mnist.load_data()
elif (dataset == 'mnist'):
((x_train, y_train), (x_test, y_test)) = tf.keras.datasets.mnist.load_data()
(x_train, x_test) = ((x_train[(..., np.newaxis)] / 255.0), (x_test[(..., np.newaxis)] / 255.0))
if (classes == 'odd_even'):
odd = [1, 3, 5, 7, 9]
X_train = x_train
X_test = x_test
if (binary == False):
Y_train = [(1 if (y in odd) else 0) for y in y_train]
Y_test = [(1 if (y in odd) else 0) for y in y_test]
elif (binary == True):
Y_train = [(1 if (y in odd) else (- 1)) for y in y_train]
Y_test = [(1 if (y in odd) else (- 1)) for y in y_test]
elif (classes == '>4'):
greater = [5, 6, 7, 8, 9]
X_train = x_train
X_test = x_test
if (binary == False):
Y_train = [(1 if (y in greater) else 0) for y in y_train]
Y_test = [(1 if (y in greater) else 0) for y in y_test]
elif (binary == True):
Y_train = [(1 if (y in greater) else (- 1)) for y in y_train]
Y_test = [(1 if (y in greater) else (- 1)) for y in y_test]
else:
x_train_filter_01 = np.where(((y_train == classes[0]) | (y_train == classes[1])))
x_test_filter_01 = np.where(((y_test == classes[0]) | (y_test == classes[1])))
(X_train, X_test) = (x_train[x_train_filter_01], x_test[x_test_filter_01])
(Y_train, Y_test) = (y_train[x_train_filter_01], y_test[x_test_filter_01])
if (binary == False):
Y_train = [(1 if (y == classes[0]) else 0) for y in Y_train]
Y_test = [(1 if (y == classes[0]) else 0) for y in Y_test]
elif (binary == True):
Y_train = [(1 if (y == classes[0]) else (- 1)) for y in Y_train]
Y_test = [(1 if (y == classes[0]) else (- 1)) for y in Y_test]
if (feature_reduction == 'resize256'):
X_train = tf.image.resize(X_train[:], (256, 1)).numpy()
X_test = tf.image.resize(X_test[:], (256, 1)).numpy()
(X_train, X_test) = (tf.squeeze(X_train).numpy(), tf.squeeze(X_test).numpy())
return (X_train, X_test, Y_train, Y_test)
elif ((feature_reduction == 'pca8') or (feature_reduction in pca32) or (feature_reduction in pca30) or (feature_reduction in pca16) or (feature_reduction in pca12)):
X_train = tf.image.resize(X_train[:], (784, 1)).numpy()
X_test = tf.image.resize(X_test[:], (784, 1)).numpy()
(X_train, X_test) = (tf.squeeze(X_train), tf.squeeze(X_test))
if (feature_reduction == 'pca8'):
pca = PCA(8)
elif (feature_reduction in pca32):
pca = PCA(32)
elif (feature_reduction in pca30):
pca = PCA(30)
elif (feature_reduction in pca16):
pca = PCA(16)
elif (feature_reduction in pca12):
pca = PCA(12)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
if ((feature_reduction == 'pca8') or (feature_reduction == 'pca16-compact') or (feature_reduction in pca30) or (feature_reduction in pca12)):
(X_train, X_test) = (((X_train - X_train.min()) * (np.pi / (X_train.max() - X_train.min()))), ((X_test - X_test.min()) * (np.pi / (X_test.max() - X_test.min()))))
return (X_train, X_test, Y_train, Y_test)
elif ((feature_reduction == 'autoencoder8') or (feature_reduction in autoencoder32) or (feature_reduction in autoencoder30) or (feature_reduction in autoencoder16) or (feature_reduction in autoencoder12)):
if (feature_reduction == 'autoencoder8'):
latent_dim = 8
elif (feature_reduction in autoencoder32):
latent_dim = 32
elif (feature_reduction in autoencoder30):
latent_dim = 30
elif (feature_reduction in autoencoder16):
latent_dim = 16
elif (feature_reduction in autoencoder12):
latent_dim = 12
class Autoencoder(Model):
def __init__(self, latent_dim):
super(Autoencoder, self).__init__()
self.latent_dim = latent_dim
self.encoder = tf.keras.Sequential([layers.Flatten(), layers.Dense(latent_dim, activation='relu')])
self.decoder = tf.keras.Sequential([layers.Dense(784, activation='sigmoid'), layers.Reshape((28, 28))])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = Autoencoder(latent_dim)
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
autoencoder.fit(X_train, X_train, epochs=10, shuffle=True, validation_data=(X_test, X_test))
(X_train, X_test) = (autoencoder.encoder(X_train).numpy(), autoencoder.encoder(X_test).numpy())
if ((feature_reduction == 'autoencoder8') or (feature_reduction == 'autoencoder16-compact') or (feature_reduction in autoencoder30) or (feature_reduction in autoencoder12)):
(X_train, X_test) = (((X_train - X_train.min()) * (np.pi / (X_train.max() - X_train.min()))), ((X_test - X_test.min()) * (np.pi / (X_test.max() - X_test.min()))))
return (X_train, X_test, Y_train, Y_test) |
def log_sinkhorn(x: _Array, steps: int, temperature: float, zero_diagonal: bool, noise_rng_key: Optional[_Array]) -> _Array:
assert (x.ndim >= 2)
assert (x.shape[(- 1)] == x.shape[(- 2)])
if (noise_rng_key is not None):
noise = (- jnp.log(((- jnp.log((jax.random.uniform(noise_rng_key, x.shape) + 1e-12))) + 1e-12)))
x = (x + noise)
x /= temperature
if zero_diagonal:
x = (x - (1000000.0 * jnp.eye(x.shape[(- 1)])))
for _ in range(steps):
x = jax.nn.log_softmax(x, axis=(- 1))
x = jax.nn.log_softmax(x, axis=(- 2))
return x |
def load_tokenizer(mode: str, vocab_file: str=None, vocab_list: List[str]=None, slots_file: str=None) -> Tokenizer:
assert ((int((vocab_file is not None)) + int((vocab_list is not None))) <= 1), "For 'vocab_file' and 'vocab_list', at most one argument can be presented"
with tempfile.NamedTemporaryFile('w') as f:
if (vocab_list is not None):
f.writelines([f'''{vocab}
''' for vocab in vocab_list])
f.flush()
vocab_file = f.name
if ((slots_file is not None) and (not mode.endswith('slot'))):
mode = f'{mode}-slot'
if (mode == 'character'):
return CharacterTokenizer.load_from_file(vocab_file)
elif (mode == 'character-slot'):
return CharacterSlotTokenizer.load_from_file(vocab_file, slots_file)
elif (mode == 'subword'):
return SubwordTokenizer.load_from_file(vocab_file)
elif (mode == 'subword-slot'):
return SubwordSlotTokenizer.load_from_file(vocab_file, slots_file)
elif (mode == 'word'):
return WordTokenizer.load_from_file(vocab_file)
elif (mode == 'phoneme'):
return PhonemeTokenizer.load_from_file(vocab_file)
elif mode.startswith('bert-'):
return BertTokenizer.load_from_file(mode)
else:
raise NotImplementedError('`{}` is not yet supported.'.format(mode)) |
def generate_webpage(prefix, regex, perrow=1, perpage=None, verbose=False):
filenames = []
regex_list = regex.split('|')
for regex_single in regex_list:
filenames.extend(glob(os.path.join(prefix, regex_single)))
filenames.sort()
if verbose:
print('find {} files'.format(len(filenames)))
if (perpage is None):
num_pages = 1
else:
num_pages = math.ceil((len(filenames) / perpage))
if (num_pages == 1):
generate_page(prefix, filenames, 0, num_pages=1, perrow=perrow, perpage=perpage, verbose=verbose)
else:
filenames = [filenames[i:(i + perpage)] for i in range(0, len(filenames), perpage)]
for page_id in range(num_pages):
page_filenames = filenames[page_id]
generate_page(prefix, page_filenames, page_id, num_pages=num_pages, perrow=perrow, perpage=perpage, verbose=verbose) |
.parametrize('knn_methods', knn_methods)
def test_ola(knn_methods):
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
ola = OLA(pool_classifiers, knn_classifier=knn_methods)
ola.fit(X_dsel, y_dsel)
assert np.isclose(ola.score(X_test, y_test), 0.) |
class SPEDDataset(Dataset):
def __init__(self, input_transform=None):
self.input_transform = input_transform
self.dbImages = np.load((GT_ROOT + 'SPED/SPED_dbImages.npy'))
self.qImages = np.load((GT_ROOT + 'SPED/SPED_qImages.npy'))
self.ground_truth = np.load((GT_ROOT + 'SPED/SPED_gt.npy'), allow_pickle=True)
self.images = np.concatenate((self.dbImages, self.qImages))
self.num_references = len(self.dbImages)
self.num_queries = len(self.qImages)
def __getitem__(self, index):
img = Image.open((DATASET_ROOT + self.images[index]))
if self.input_transform:
img = self.input_transform(img)
return (img, index)
def __len__(self):
return len(self.images) |
class Pretrain(Dataset):
def __init__(self, dataset, tokenizer, type_path, input_length, output_length, args):
self.args = args
self.tokenizer = tokenizer
self.type_path = type_path
self.category = None
self.whole_dataset = dataset
self.dataset_name = dataset
if (len(dataset.split('/')) > 1):
self.dataset_name = dataset.split('/')[0]
self.dataset_config_name = dataset.split('/')[1]
else:
self.dataset_name = dataset
self.dataset_config_name = None
ids_to_answers = None
(self.dataset, self.prompt, [self.task_prefix, self.input_prefix, self.output_prefix, self.choice_prefix, self.append_choices_to_input]) = dataset_prompt_setting(self.type_path, self.dataset_name, self.dataset_config_name, args)
print(f'Length of dataset retrieving is.. {len(self.dataset)}')
self.input_length = input_length
self.output_length = output_length
self.ids_to_answers = ids_to_answers
def __len__(self):
return len(self.dataset)
def make_bad_example(self, options, prompt_name, index, answer, example_batch, length=2):
if (type(options) == list):
return options
else:
new_option = [answer]
if self.args.answer_dict_dir_path:
with open(os.path.join(self.args.answer_dict_dir_path, (self.whole_dataset + '.json'))) as f:
answer_dict = json.load(f)
while (len(new_option) < length):
flag = True
new_option_candidate = random.choice(answer_dict[prompt_name])
for candidates in new_option:
if exact_match_score(new_option_candidate, candidates):
flag = False
if (flag == True):
new_option.append(new_option_candidate)
return new_option
def convert_to_feature_tokenizer(self, input_, target_, options):
if ('bigscience/T0' in self.args.model_name_or_path):
source = self.tokenizer.batch_encode_plus([str(input_)], max_length=self.input_length, padding='max_length', truncation=True, return_tensors='pt', add_special_tokens=False)
else:
source = self.tokenizer.batch_encode_plus([str(input_)], max_length=self.input_length, padding='max_length', truncation=True, return_tensors='pt')
targets = self.tokenizer.batch_encode_plus([str(target_)], max_length=self.output_length, padding='max_length', truncation=True, return_tensors='pt')
data_label = self.whole_dataset
return (source, targets, data_label, options)
def convert_to_features_binary(self, example_batch, index):
name = prompt_choice(self.whole_dataset, self.type_path, self.prompt, example_batch, self.args)
prompt = self.prompt[name]
source_ids_batch = []
target_ids_batch = []
src_mask_batch = []
target_mask_batch = []
answer = prompt.apply(example_batch)[1]
old_options = prompt.get_answer_choices_list(example_batch)
options = self.make_bad_example(old_options, name, index, answer, example_batch, 2)
good_label = label_(name, options, answer)
bad_label = random.choice([i for i in range(0, len(options)) if (i != good_label)])
gold_label = options[good_label]
black_label = options[bad_label]
if self.args.channel:
(input, _, instruct) = prompt.my_apply_instruct_helper(example_batch)
instruct = prompt.dict_to_string(instruct)
if self.args.channel_base:
input_ = gold_label
target_ = f'instruction: {instruct} input: {input}'
else:
input_ = f'''input: {input}
output: {gold_label}'''
target_ = instruct
else:
result = prompt.apply(example_batch)
input_ = result[0]
target_ = gold_label
(source, targets, data_label, options) = self.convert_to_feature_tokenizer(input_, target_, options)
source_ids = source['input_ids'].squeeze()
target_ids = targets['input_ids'].squeeze()
src_mask = source['attention_mask'].squeeze()
target_mask = targets['attention_mask'].squeeze()
if self.args.channel:
source_ids_batch.append(source_ids)
src_mask_batch.append(src_mask)
else:
target_ids_batch.append(target_ids)
target_mask_batch.append(target_mask)
if self.args.channel:
(input, _, instruct) = prompt.my_apply_instruct_helper(example_batch)
instruct = prompt.dict_to_string(instruct)
if self.args.channel_base:
input_ = black_label
target_ = f'instruction: {instruct} input: {input}'
else:
input_ = f'''input: {input}
output: {black_label}'''
target_ = instruct
else:
result = prompt.apply(example_batch)
input_ = result[0]
target_ = black_label
(source, targets, data_label, options) = self.convert_to_feature_tokenizer(input_, target_, options)
source_ids = source['input_ids'].squeeze()
target_ids = targets['input_ids'].squeeze()
src_mask = source['attention_mask'].squeeze()
target_mask = targets['attention_mask'].squeeze()
if self.args.channel:
source_ids_batch.append(source_ids)
src_mask_batch.append(src_mask)
target_ids_batch = target_ids
target_mask_batch = target_mask
else:
target_ids_batch.append(target_ids)
target_mask_batch.append(target_mask)
source_ids_batch = source_ids
src_mask_batch = src_mask
return (source_ids_batch, src_mask_batch, target_ids_batch, target_mask_batch, data_label, options, good_label)
def bigbench_input(self, input, instruct, options):
def choices_string(choices, options, append_choices_to_input):
if append_choices_to_input:
choices_string = (choices + choices.join(options))
else:
choices_string = ''
return choices_string
input_ = f'{self.task_prefix}{self.input_prefix}{input}{choices_string(self.choice_prefix, options, self.append_choices_to_input)}{self.output_prefix}'
return input_
def convert_to_features(self, example_batch, index):
options = 0
label = None
if (self.type_path == 'bigbench'):
(instruct, input, target_, options, label) = bigbench_load_example(example_batch, self.prompt)
input_ = self.bigbench_input(input, instruct, options)
elif (self.type_path in ['validation', 'test']):
name = prompt_choice(self.whole_dataset, self.type_path, self.prompt, example_batch, self.args)
prompt = self.prompt[name]
result = prompt.apply(example_batch)
input_ = result[0]
target_ = result[1]
if (self.args.mode == 'zerotune'):
options = None
options = prompt.get_answer_choices_list(example_batch)
label = label_(name, options, prompt.apply(example_batch)[1])
elif (self.type_path == 'train'):
name = prompt_choice(self.whole_dataset, self.type_path, self.prompt, example_batch, self.args)
prompt = self.prompt[name]
result = prompt.apply(example_batch)
if self.args.channel_base:
target_ = result[0]
input_ = result[1]
elif self.args.channel:
(input, _, instruct) = prompt.my_apply_instruct_helper(example_batch)
new_instruct = prompt.dict_to_string(instruct)
input_ = f'''input: {input}
output: {result[1]}'''
target_ = new_instruct
else:
input_ = result[0]
target_ = result[1]
if (self.args.mode == 'zerotune'):
options = None
else:
options = prompt.get_answer_choices_list(example_batch)
label = (- 1)
if (self.args.label_generalization != ''):
(options, target_, input_) = var_direct(self.args.label_generalization, options, target_, input_)
(source, targets, data_label, options) = self.convert_to_feature_tokenizer(input_, target_, options)
return (source, targets, data_label, options, label)
def convert_to_features_multiple(self, example_batch, index):
source_ids_batch = []
target_ids_batch = []
src_mask_batch = []
target_mask_batch = []
if (self.type_path == 'bigbench'):
(bigbench_instruct, bigbench_input, _, options, label) = bigbench_load_example(example_batch, self.prompt)
for target in options:
if self.args.channel_base:
input_ = target
target_ = self.bigbench_input(bigbench_input, bigbench_instruct, options)
new_options = options
else:
instruct = {}
input = ''
idx = 0
if (self.task_prefix != ''):
instruct[f'<extra_id_{idx}>'] = self.task_prefix
input += f'<extra_id_{idx}>'
idx += 1
if (self.input_prefix != ''):
instruct[f'<extra_id_{idx}>'] = self.input_prefix
input += f'<extra_id_{idx}>'
idx += 1
input += bigbench_input
if (self.append_choices_to_input == True):
instruct[f'<extra_id_{idx}>'] = self.choice_prefix
for option in options:
input += f'<extra_id_{idx}>{option}'
idx += 1
if (self.output_prefix != ''):
instruct[f'<extra_id_{idx}>'] = self.output_prefix
input += f'<extra_id_{idx}>'
idx += 1
if (self.args.label_generalization != ''):
(new_options, new_target, new_instruct) = var(self.args.label_generalization, options, target, instruct)
else:
new_options = options
new_target = target
new_instruct = instruct
target_ = ''
for (key, value) in new_instruct.items():
target_ += f'{key} {value}'
input_ = f'''input: {input}
output: {new_target}'''
print('channel input:\n', input_)
print('target:\n', target_)
(source, targets, data_label, new_options) = self.convert_to_feature_tokenizer(input_, target_, new_options)
source_ids = source['input_ids'].squeeze()
target_ids = targets['input_ids'].squeeze()
src_mask = source['attention_mask'].squeeze()
target_mask = targets['attention_mask'].squeeze()
source_ids_batch.append(source_ids)
target_ids_batch.append(target_ids)
src_mask_batch.append(src_mask)
target_mask_batch.append(target_mask)
else:
name = prompt_choice(self.whole_dataset, self.type_path, self.prompt, example_batch, self.args)
prompt = self.prompt[name]
options = prompt.get_answer_choices_list(example_batch)
label = label_(name, options, prompt.apply(example_batch)[1])
for target in prompt.get_answer_choices_list(example_batch):
if self.args.channel_base:
result = prompt.apply(example_batch)
input_ = target
target_ = result[0]
new_options = options
if (self.args.label_generalization != ''):
(new_options, input_, target_) = var_direct(self.args.label_generalization, options, input_, target_)
else:
(input, _, instruct) = prompt.my_apply_instruct_helper(example_batch)
if (self.args.label_generalization != ''):
(new_options, new_target, new_instruct) = var(self.args.label_generalization, options, target, instruct)
else:
new_options = options
new_target = target
new_instruct = instruct
new_instruct = prompt.dict_to_string(new_instruct)
input_ = f'''input: {input}
output: {new_target}'''
target_ = new_instruct
print('channel input is', input_)
print('target is', target_)
(source, targets, data_label, new_options) = self.convert_to_feature_tokenizer(input_, target_, new_options)
source_ids = source['input_ids'].squeeze()
target_ids = targets['input_ids'].squeeze()
src_mask = source['attention_mask'].squeeze()
target_mask = targets['attention_mask'].squeeze()
source_ids_batch.append(source_ids)
target_ids_batch.append(target_ids)
src_mask_batch.append(src_mask)
target_mask_batch.append(target_mask)
return (source_ids_batch, src_mask_batch, target_ids_batch, target_mask_batch, data_label, new_options, label)
def __getitem__(self, index):
indexed_data = self.dataset[index]
if (((not self.args.channel) and ((not self.args.ul_loss) or (self.args.ul_loss and (self.type_path == 'validation')))) or ((not self.args.ul_loss) and (self.type_path == 'train') and self.args.channel)):
(source, targets, data_label, options, label) = self.convert_to_features(indexed_data, index)
source_ids = source['input_ids'].squeeze()
target_ids = targets['input_ids'].squeeze()
src_mask = source['attention_mask'].squeeze()
target_mask = targets['attention_mask'].squeeze()
if (options is not None):
option_list = options
else:
option_list = (- 1)
return {'source_ids': source_ids, 'source_mask': src_mask, 'target_ids': target_ids, 'target_mask': target_mask, 'data_label': data_label, 'option_list': option_list, 'label': label}
elif (self.args.ul_loss and (self.type_path == 'train')):
(source_ids_batch, src_mask_batch, target_ids_batch, target_mask_batch, data_label, options, label) = self.convert_to_features_binary(indexed_data, index)
if (options != []):
option_list = options
else:
option_list = (- 1)
if self.args.channel:
return {'source_ids': torch.stack(source_ids_batch).reshape(1, (- 1)).squeeze(), 'source_mask': torch.stack(src_mask_batch).reshape(1, (- 1)).squeeze(), 'target_ids': target_ids_batch, 'target_mask': target_mask_batch, 'label': label}
else:
return {'source_ids': source_ids_batch, 'source_mask': src_mask_batch, 'target_ids': torch.stack(target_ids_batch).reshape(1, (- 1)).squeeze(), 'target_mask': torch.stack(target_mask_batch).reshape(1, (- 1)).squeeze(), 'label': label}
else:
(source_ids_batch, src_mask_batch, target_ids_batch, target_mask_batch, data_label, options, label) = self.convert_to_features_multiple(indexed_data, index)
if (options != []):
option_list = options
else:
option_list = (- 1)
return {'source_ids': torch.stack(source_ids_batch), 'source_mask': torch.stack(src_mask_batch), 'target_ids': torch.stack(target_ids_batch), 'target_mask': torch.stack(target_mask_batch), 'data_label': data_label, 'option_list': option_list, 'label': label} |
def test_getitem():
with pytest.raises(IndexError):
empty[(0,)]
jagged = ak.highlevel.Array([[]])[0:0]
assert (empty[jagged].to_list() == []) |
class QDQBertLMHeadModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class SqueezeBertModule(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def merge_dicts(dicts_in):
dict_out = {}
for k in dicts_in[0].keys():
dict_out[k] = []
for d in dicts_in:
dict_out[k].append(d[k])
dict_out[k] = np.array(dict_out[k])
return dict_out |
def update_density(pos: ti.types.ndarray(ndim=1), den: ti.types.ndarray(ndim=1), pre: ti.types.ndarray(ndim=1)):
for i in range(particle_num):
den[i] = 0.0
for j in range(particle_num):
R = (pos[i] - pos[j])
den[i] += (mass * W(R, h))
pre[i] = (pressure_scale * max((pow((den[i] / rest_density), gamma) - 1), 0)) |
def main(args):
global_exp_name = args.exp_name
search_space_config = json.load(open(args.search_space_file))
hyperparam_space = {k: eval(v['type'])(k, **v['options']) for (k, v) in search_space_config.items()}
for i in range(args.num_searches):
new_env = os.environ.copy()
hyperparam_vals = hyperopt.pyll.stochastic.sample(hyperparam_space)
for (k, v) in hyperparam_vals.items():
new_env[k] = str(v)
print(hyperparam_vals)
exp_name = os.path.join(global_exp_name, ('search_' + str(i)))
new_env['EXP_NAME'] = exp_name
cmd = ['bash', 'Rationale_Analysis/commands/model_a_train_script.sh']
print('Running ', cmd, ' with exp name ', exp_name)
if (not args.dry_run):
subprocess.run(cmd, check=True, env=new_env) |
def get_fixed_dict_and_times_single(exp_fn, checkpoints_eval_fn, checkpoint_every_x_epochs=1, epochs_in_last_checkpoint=None, time_units='hours', subkey=None):
times_list = extract_cumsum_train_times(load_experiment(exp_fn), time_units=time_units)
checkpoints_dict = extract_values(parse_all_eval_results_dict(checkpoints_eval_fn), subkey=subkey)
if (checkpoint_every_x_epochs > 1):
gpipe_dict_ = {(k * checkpoint_every_x_epochs): v for (k, v) in list(checkpoints_dict.items())[:(- 1)]}
if (epochs_in_last_checkpoint is None):
epochs_in_last_checkpoint = (len(times_list) % checkpoint_every_x_epochs)
warnings.warn(f'plot_epochs_vs_accuracy may be inaccurate point for last epoch, infering it: epochs_in_last_checkpoint={epochs_in_last_checkpoint}')
print(f'epochs_in_last_checkpoint={epochs_in_last_checkpoint}')
(k, v) = list(checkpoints_dict.items())[(- 1)]
if (epochs_in_last_checkpoint == 0):
gpipe_dict_[((k * checkpoint_every_x_epochs) + epochs_in_last_checkpoint)] = v
else:
gpipe_dict_[(((k - 1) * checkpoint_every_x_epochs) + epochs_in_last_checkpoint)] = v
times_gpipe_ = [times_list[i] for i in range(0, len(times_list), checkpoint_every_x_epochs)]
if ((len(times_list) % checkpoint_every_x_epochs) > 0):
times_gpipe_.append(times_list[(- 1)])
times_list = times_gpipe_
checkpoints_dict = gpipe_dict_
return (checkpoints_dict, times_list) |
class TestResize():
def setup_method(self):
self.width = 16
self.height = 16
self.env = DummyDiscrete2DEnv()
self.env_r = Resize(DummyDiscrete2DEnv(), width=self.width, height=self.height)
def teardown_method(self):
self.env.close()
self.env_r.close()
def test_resize_invalid_environment_type(self):
with pytest.raises(ValueError):
self.env.observation_space = gym.spaces.Discrete(64)
Resize(self.env, width=self.width, height=self.height)
def test_resize_invalid_environment_shape(self):
with pytest.raises(ValueError):
self.env.observation_space = gym.spaces.Box(low=0, high=255, shape=(4,), dtype=np.uint8)
Resize(self.env, width=self.width, height=self.height)
def test_resize_output_observation_space(self):
assert (self.env_r.observation_space.shape == (self.width, self.height))
def test_resize_output_reset(self):
assert (self.env_r.reset().shape == (self.width, self.height))
def test_resize_output_step(self):
self.env_r.reset()
(obs_r, _, _, _) = self.env_r.step(1)
assert (obs_r.shape == (self.width, self.height)) |
def createResolutionCallbackFromEnv(lookup_base):
def lookupInModule(qualified_name, module):
if ('.' in qualified_name):
parts = qualified_name.split('.')
base = parts[0]
remaining_pieces = '.'.join(parts[1:])
module_value = getattr(module, base)
return lookupInModule(remaining_pieces, module_value)
else:
return getattr(module, qualified_name)
def parseNestedExpr(expr, module) -> Tuple[(Any, int)]:
i = 0
while ((i < len(expr)) and (expr[i] not in (',', '[', ']'))):
i += 1
base = lookupInModule(expr[:i].strip(), module)
assert (base is not None), f'Unresolvable type {expr[:i]}'
if ((i == len(expr)) or (expr[i] != '[')):
return (base, i)
assert (expr[i] == '[')
parts = []
while (expr[i] != ']'):
part_len = 0
i += 1
(part, part_len) = parseNestedExpr(expr[i:], module)
parts.append(part)
i += part_len
if (len(parts) > 1):
return (base[tuple(parts)], (i + 1))
else:
return (base[parts[0]], (i + 1))
def parseExpr(expr, module):
try:
(value, len_parsed) = parseNestedExpr(expr, module)
assert (len_parsed == len(expr)), 'whole expression was not parsed, falling back to c++ parser'
return value
except Exception as e:
return None
return (lambda expr: parseExpr(expr, lookup_base)) |
def test_hdbscan_all_points_membership_vectors():
clusterer = HDBSCAN(prediction_data=True, min_cluster_size=200).fit(X)
vects = all_points_membership_vectors(clusterer)
assert_array_equal(vects, np.zeros(clusterer.prediction_data_.raw_data.shape[0])) |
class Group(Storage):
GroupT = T.TypeVar('GroupT', bound='Group')
def identity(cls: T.Type[GroupT]) -> GroupT:
raise NotImplementedError()
def compose(self: GroupT, other: GroupT) -> GroupT:
raise NotImplementedError()
def inverse(self: GroupT) -> GroupT:
raise NotImplementedError()
def between(self: GroupT, b: GroupT) -> GroupT:
return self.inverse().compose(b) |
class HTML():
def __init__(self, web_dir, title, refresh=0):
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if (not os.path.exists(self.web_dir)):
os.makedirs(self.web_dir)
if (not os.path.exists(self.img_dir)):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if (refresh > 0):
with self.doc.head:
meta( content=str(refresh))
def get_image_dir(self):
return self.img_dir
def add_header(self, str):
with self.doc:
h3(str)
def add_table(self, border=1):
self.t = table(border=border, style='table-layout: fixed;')
self.doc.add(self.t)
def add_images(self, ims, txts, links, width=512):
self.add_table()
with self.t:
with tr():
for (im, txt, link) in zip(ims, txts, links):
with td(style='word-wrap: break-word;', halign='center', valign='top'):
with p():
with a(href=os.path.join('images', link)):
img(style=('width:%dpx' % width), src=os.path.join('images', im))
br()
p(txt)
def save(self):
html_file = ('%s/index.html' % self.web_dir)
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close() |
def cli_main():
parser = options.get_generation_parser(interactive=True)
args = options.parse_args_and_arch(parser)
main(args) |
class GomiDiff(nn.Module):
def __init__(self, in_channels: int, residual_layers: int, residual_channels: int, dilation_cycle_length: int, num_diffusion_steps: int):
super().__init__()
self.dilation_cycle_length = dilation_cycle_length
self.num_diffusion_steps = num_diffusion_steps
self.film_layers = nn.ModuleList([nn.Conv1d(residual_channels, (2 * residual_channels), 1) for _ in range((residual_layers // dilation_cycle_length))])
self.input_projection = Conv1d(1, residual_channels, 1)
self.diffusion_embedding = DiffusionEmbedding(num_diffusion_steps)
self.spectrogram_upsampler = SpectrogramUpsampler()
self.residual_layers = nn.ModuleList([ResidualBlock(in_channels, residual_channels, (2 ** (i % dilation_cycle_length))) for i in range(residual_layers)])
self.skip_projection = Conv1d(residual_channels, residual_channels, 1)
self.output_projection = Conv1d(residual_channels, 1, 1)
nn.init.zeros_(self.output_projection.weight)
def forward(self, signal, timestep, spectrogram):
x = self.input_projection(signal)
x = F.relu(x)
timestep = self.diffusion_embedding(timestep)
spectrogram = self.spectrogram_upsampler(spectrogram)
skip = None
for (i, layer) in enumerate(self.residual_layers):
if ((i % self.dilation_cycle_length) == 0):
film = self.film_layers[(i // self.dilation_cycle_length)](spectrogram)
(scale, shift) = torch.chunk(film, 2, dim=1)
x = (((1 + (0.01 * scale)) * x) + shift)
(x, skip_connection) = layer(x, timestep, spectrogram)
skip = (skip_connection if (skip is None) else (skip_connection + skip))
x = (skip / sqrt(len(self.residual_layers)))
x = self.skip_projection(x)
x = F.relu(x)
x = self.output_projection(x)
return x |
def main():
args = parser.parse_args()
print(('JAX host: %d / %d' % (jax.host_id(), jax.host_count())))
print(('JAX devices:\n%s' % '\n'.join((str(d) for d in jax.devices()))), flush=True)
if (get_model_cfg(args.model) is not None):
validate(args)
else:
models = list_models(pretrained=True)
if (args.model != 'all'):
models = fnmatch.filter(models, args.model)
if (not models):
print(f'ERROR: No models found to validate with pattern {args.model}.')
exit(1)
print('Validating: ', ', '.join(models))
results = []
for m in models:
args.model = m
res = validate(args)
res.update(dict(model=m))
results.append(res)
print('Results:')
for r in results:
print(f"Model: {r['model']}, Top1: {r['top1']}, Top5: {r['top5']}") |
class DICE(nn.Module):
def __init__(self, channel_in, channel_out, height, width, kernel_size=3, dilation=[1, 1, 1], shuffle=True):
super().__init__()
assert (len(dilation) == 3)
padding_1 = (int(((kernel_size - 1) / 2)) * dilation[0])
padding_2 = (int(((kernel_size - 1) / 2)) * dilation[1])
padding_3 = (int(((kernel_size - 1) / 2)) * dilation[2])
self.conv_channel = nn.Conv2d(channel_in, channel_in, kernel_size=kernel_size, stride=1, groups=channel_in, padding=padding_1, bias=False, dilation=dilation[0])
self.conv_width = nn.Conv2d(width, width, kernel_size=kernel_size, stride=1, groups=width, padding=padding_2, bias=False, dilation=dilation[1])
self.conv_height = nn.Conv2d(height, height, kernel_size=kernel_size, stride=1, groups=height, padding=padding_3, bias=False, dilation=dilation[2])
self.br_act = BR((3 * channel_in))
self.weight_avg_layer = CBR((3 * channel_in), channel_in, kSize=1, stride=1, groups=channel_in)
groups_proj = math.gcd(channel_in, channel_out)
self.proj_layer = CBR(channel_in, channel_out, kSize=3, stride=1, groups=groups_proj)
self.linear_comb_layer = nn.Sequential(nn.AdaptiveAvgPool2d(output_size=1), nn.Conv2d(channel_in, (channel_in // 4), kernel_size=1, bias=False), nn.ReLU(inplace=True), nn.Conv2d((channel_in // 4), channel_out, kernel_size=1, bias=False), nn.Sigmoid())
self.vol_shuffle = Shuffle(3)
self.width = width
self.height = height
self.channel_in = channel_in
self.channel_out = channel_out
self.shuffle = shuffle
self.ksize = kernel_size
self.dilation = dilation
def forward(self, x):
(bsz, channels, height, width) = x.size()
out_ch_wise = self.conv_channel(x)
x_h_wise = x.clone()
if (height != self.height):
if (height < self.height):
x_h_wise = F.interpolate(x_h_wise, mode='bilinear', size=(self.height, width), align_corners=True)
else:
x_h_wise = F.adaptive_avg_pool2d(x_h_wise, output_size=(self.height, width))
x_h_wise = x_h_wise.transpose(1, 2).contiguous()
out_h_wise = self.conv_height(x_h_wise).transpose(1, 2).contiguous()
h_wise_height = out_h_wise.size(2)
if (height != h_wise_height):
if (h_wise_height < height):
out_h_wise = F.interpolate(out_h_wise, mode='bilinear', size=(height, width), align_corners=True)
else:
out_h_wise = F.adaptive_avg_pool2d(out_h_wise, output_size=(height, width))
x_w_wise = x.clone()
if (width != self.width):
if (width < self.width):
x_w_wise = F.interpolate(x_w_wise, mode='bilinear', size=(height, self.width), align_corners=True)
else:
x_w_wise = F.adaptive_avg_pool2d(x_w_wise, output_size=(height, self.width))
x_w_wise = x_w_wise.transpose(1, 3).contiguous()
out_w_wise = self.conv_width(x_w_wise).transpose(1, 3).contiguous()
w_wise_width = out_w_wise.size(3)
if (width != w_wise_width):
if (w_wise_width < width):
out_w_wise = F.interpolate(out_w_wise, mode='bilinear', size=(height, width), align_corners=True)
else:
out_w_wise = F.adaptive_avg_pool2d(out_w_wise, output_size=(height, width))
outputs = torch.cat((out_ch_wise, out_h_wise, out_w_wise), 1)
outputs = self.br_act(outputs)
if self.shuffle:
outputs = self.vol_shuffle(outputs)
outputs = self.weight_avg_layer(outputs)
linear_wts = self.linear_comb_layer(outputs)
proj_out = self.proj_layer(outputs)
return (proj_out * linear_wts)
def __repr__(self):
s = '{name}(in_channels={channel_in}, out_channels={channel_out}, kernel_size={ksize}, vol_shuffle={shuffle}, width={width}, height={height}, dilation={dilation})'
return s.format(name=self.__class__.__name__, **self.__dict__) |
def test_categoricals_are_not_preprocessed():
data = pd.DataFrame(data={'age': [56, 61, 36, 52, 42], 'therapy': [True, False, True, False, True], 'alcohol': ['medium', 'medium', 'low', 'high', 'low']})
metadata = SingleTableMetadata.load_from_dict({'columns': {'age': {'sdtype': 'numerical'}, 'therapy': {'sdtype': 'boolean'}, 'alcohol': {'sdtype': 'categorical'}}})
synth1 = CTGANSynthesizer(metadata)
synth1.auto_assign_transformers(data)
transformers1 = synth1.get_transformers()
assert isinstance(transformers1['age'], FloatFormatter)
assert (transformers1['therapy'] == transformers1['alcohol'] is None)
synth2 = CTGANSynthesizer(metadata, epochs=1)
synth2.fit(data)
transformers2 = synth2.get_transformers()
assert isinstance(transformers2['age'], FloatFormatter)
assert (transformers2['therapy'] == transformers2['alcohol'] is None) |
class _FSMTapeCacheDetectAll_(_FSMTapeCache_):
def compare_to_tape(self, track_number, word):
track_cache = self.cache[track_number]
it_word = iter(word)
for _ in track_cache:
next(it_word)
for _ in it_word:
(successful, _) = self.read(track_number)
if (not successful):
return False
return True |
def save_args(filename, args):
args_dict = {}
for (key, value) in vars(args).items():
if isinstance(value, pathlib.Path):
args_dict[key] = str(value)
else:
args_dict[key] = value
save_json(filename, args_dict) |
class DogsCatsSD(DataInterface):
def __init__(self, validation_fraction=(1 / 5), **kwargs):
super().__init__(**kwargs)
self.validation_fraction = validation_fraction
def shard_descriptor(self):
return self._shard_descriptor
_descriptor.setter
def shard_descriptor(self, shard_descriptor):
self._shard_descriptor = shard_descriptor
self._shard_dataset = DogsCatsShardDataset(shard_descriptor.get_dataset('train'))
validation_size = max(1, int((len(self._shard_dataset) * self.validation_fraction)))
self.train_indexes = np.arange((len(self._shard_dataset) - validation_size))
self.val_indexes = np.arange((len(self._shard_dataset) - validation_size), len(self._shard_dataset))
def get_train_loader(self, **kwargs):
train_sampler = SubsetRandomSampler(self.train_indexes)
return DataLoader(self._shard_dataset, num_workers=8, batch_size=self.kwargs['train_bs'], sampler=train_sampler)
def get_valid_loader(self, **kwargs):
val_sampler = SubsetRandomSampler(self.val_indexes)
return DataLoader(self._shard_dataset, num_workers=8, batch_size=self.kwargs['valid_bs'], sampler=val_sampler)
def get_train_data_size(self):
return len(self.train_indexes)
def get_valid_data_size(self):
return len(self.val_indexes) |
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(('Total number of parameters: %d' % num_params)) |
class UtteranceBuilder(BaseUtteranceBuilder):
def scene_to_sent(self, variables, vocab):
sent_ids = variables.data.cpu().numpy()
sent_words = [vocab.to_word(x) for x in sent_ids]
title = 'KB SCENARIO:'
book = ' Book count: {}, value: {}'.format(sent_words[0], sent_words[1])
hat = ' Hat count: {}, value: {}'.format(sent_words[2], sent_words[3])
ball = ' Ball count: {}, value: {}'.format(sent_words[4], sent_words[5])
return [title, book, hat, ball]
def selection_to_sent(self, variables, vocab):
select_ids = variables.data.cpu().numpy()
sel = [vocab.to_word(x) for x in select_ids]
title = 'OUTCOME PRED:'
mine = ' My book: {}, hat: {}, ball {}'.format(sel[0], sel[1], sel[2])
theirs = ' Their book: {}, hat: {}, ball: {}'.format(sel[3], sel[4], sel[5])
return [title, mine, theirs]
def _entity_to_str(self, entity_token, kb):
return str(entity_token.canonical.value) |
def _dump_loader_info(loader):
(yield ('class: %s.%s' % (type(loader).__module__, type(loader).__name__)))
for (key, value) in sorted(loader.__dict__.items()):
if key.startswith('_'):
continue
if isinstance(value, (tuple, list)):
if (not all((isinstance(x, (str, text_type)) for x in value))):
continue
(yield ('%s:' % key))
for item in value:
(yield (' - %s' % item))
continue
elif (not isinstance(value, (str, text_type, int, float, bool))):
continue
(yield ('%s: %r' % (key, value))) |
class PairedData(object):
def __init__(self, data_loader_A, data_loader_B, max_dataset_size, flip):
self.data_loader_A = data_loader_A
self.data_loader_B = data_loader_B
self.stop_A = False
self.stop_B = False
self.max_dataset_size = max_dataset_size
self.flip = flip
def __iter__(self):
self.stop_A = False
self.stop_B = False
self.data_loader_A_iter = iter(self.data_loader_A)
self.data_loader_B_iter = iter(self.data_loader_B)
self.iter = 0
return self
def __next__(self):
(A, A_paths) = (None, None)
(B, B_paths) = (None, None)
try:
(A, A_paths) = next(self.data_loader_A_iter)
except StopIteration:
if ((A is None) or (A_paths is None)):
self.stop_A = True
self.data_loader_A_iter = iter(self.data_loader_A)
(A, A_paths) = next(self.data_loader_A_iter)
try:
(B, B_paths) = next(self.data_loader_B_iter)
except StopIteration:
if ((B is None) or (B_paths is None)):
self.stop_B = True
self.data_loader_B_iter = iter(self.data_loader_B)
(B, B_paths) = next(self.data_loader_B_iter)
if ((self.stop_A and self.stop_B) or (self.iter > self.max_dataset_size)):
self.stop_A = False
self.stop_B = False
raise StopIteration()
else:
self.iter += 1
if (self.flip and (random.random() < 0.5)):
idx = [i for i in range((A.size(3) - 1), (- 1), (- 1))]
idx = torch.LongTensor(idx)
A = A.index_select(3, idx)
B = B.index_select(3, idx)
return {'A': A, 'A_paths': A_paths, 'B': B, 'B_paths': B_paths} |
class MahalanobisDistance(NumpyArrayMetric):
def __init__(self, metric: str='MAHLNBS'):
super().__init__(metric)
def calculate(self):
gt_n = np.count_nonzero(self.reference)
seg_n = np.count_nonzero(self.prediction)
if (gt_n == 0):
warnings.warn('Unable to compute Mahalanobis distance due to empty reference mask, returning inf', NotComputableMetricWarning)
return float('inf')
if (seg_n == 0):
warnings.warn('Unable to compute Mahalanobis distance due to empty prediction mask, returning inf', NotComputableMetricWarning)
return float('inf')
gt_indices = np.flip(np.where((self.reference == 1)), axis=0)
gt_mean = gt_indices.mean(axis=1)
gt_cov = np.cov(gt_indices)
seg_indices = np.flip(np.where((self.prediction == 1)), axis=0)
seg_mean = seg_indices.mean(axis=1)
seg_cov = np.cov(seg_indices)
common_cov = (((gt_n * gt_cov) + (seg_n * seg_cov)) / (gt_n + seg_n))
common_cov_inv = np.linalg.inv(common_cov)
mean = (gt_mean - seg_mean)
return math.sqrt(mean.dot(common_cov_inv).dot(mean.T)) |
def _gen_qubit_mapping(circuit: QuantumCircuit) -> dict:
dic = {}
try:
from qiskit.transpiler.layout import TranspileLayout
if isinstance(circuit._layout, TranspileLayout):
layout = circuit._layout.initial_layout
else:
layout = circuit._layout
bit_locations = {bit: {'register': register, 'index': index} for register in layout.get_registers() for (index, bit) in enumerate(register)}
for (index, qubit) in enumerate(layout.get_virtual_bits()):
if (qubit not in bit_locations):
bit_locations[qubit] = {'register': None, 'index': index}
for (key, val) in layout.get_virtual_bits().items():
bit_register = bit_locations[key]['register']
if ((bit_register is None) or (bit_register.name != 'ancilla')):
dic[bit_locations[key]['index']] = val
except:
for i in range(circuit.num_qubits):
dic[i] = i
return dic |
def _Constant(t, symbols, inferred_symbols):
if isinstance(t.value, (str, bytes)):
return dtypes.pointer(dtypes.int8)
return dtypes.result_type_of(dtypes.typeclass(type(t.value)), dtypes.typeclass(np.min_scalar_type(t.value).name)) |
def _morphological_process(image, kernel_size=5):
if (len(image.shape) == 3):
raise ValueError('Binary segmentation result image should be a single channel image')
if (image.dtype is not np.uint8):
image = np.array(image, np.uint8)
kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))
closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=1)
return closing |
class Examples(SegmentationBase):
def __init__(self, size=256, random_crop=False, interpolation='bicubic'):
super().__init__(data_csv='data/coco_examples.txt', data_root='data/coco_images', segmentation_root='data/coco_segmentations', size=size, random_crop=random_crop, interpolation=interpolation, n_labels=183, shift_segmentation=True) |
def apply_learned_embed_in_clip(learned_embeds, text_encoder, tokenizer, token: Optional[Union[(str, List[str])]]=None, idempotent=False):
if isinstance(token, str):
trained_tokens = [token]
elif isinstance(token, list):
assert (len(learned_embeds.keys()) == len(token)), 'The number of tokens and the number of embeds should be the same'
trained_tokens = token
else:
trained_tokens = list(learned_embeds.keys())
for token in trained_tokens:
print(token)
embeds = learned_embeds[token]
dtype = text_encoder.get_input_embeddings().weight.dtype
num_added_tokens = tokenizer.add_tokens(token)
i = 1
if (not idempotent):
while (num_added_tokens == 0):
print(f'The tokenizer already contains the token {token}.')
token = f'{token[:(- 1)]}-{i}>'
print(f'Attempting to add the token {token}.')
num_added_tokens = tokenizer.add_tokens(token)
i += 1
elif ((num_added_tokens == 0) and idempotent):
print(f'The tokenizer already contains the token {token}.')
print(f'Replacing {token} embedding.')
text_encoder.resize_token_embeddings(len(tokenizer))
token_id = tokenizer.convert_tokens_to_ids(token)
text_encoder.get_input_embeddings().weight.data[token_id] = embeds
return token |
def _read_signal(sim, signal_name):
signal_name = _find_signal(sim, signal_name)
return sim.io[signal_name] |
def start_server():
daemon = Pyro4.Daemon(config.SPACEMOUSE_HOSTNAME)
ns = Pyro4.locateNS()
uri = daemon.register(DeviceState)
ns.register('example.greeting', uri)
print('uri:', uri)
print('Server ready.')
daemon.requestLoop() |
def get_test_runner(project_module):
__import__(project_module)
test = sys.modules[project_module].test
version = sys.modules[project_module].__version__
mod_path = sys.modules[project_module].__file__
mod_path = os.path.abspath(os.path.join(os.path.dirname(mod_path)))
return (test, version, mod_path) |
def _color_from_level(level):
if (level == PrettyPrintLevel.INFO):
return '92'
if (level == PrettyPrintLevel.WARNING):
return '93'
if (level == PrettyPrintLevel.ERROR):
return '91'
if (level == PrettyPrintLevel.SUCCESS):
return '92'
else:
raise ValueError(('Unknown PrettyPrintLevel: %s' % level)) |
def validate_string(property_name, var, string_list=None, case_sensitive=False):
if isinstance(var, str):
if (string_list is None):
return var
if (not case_sensitive):
test_var = var.casefold()
def fold_input(input_variable):
if isinstance(input_variable, (list, tuple)):
return [fold_input(x) for x in input_variable]
return input_variable.casefold()
test_string_list = fold_input(string_list)
else:
test_var = var
test_string_list = string_list
for (test, item) in zip(test_string_list, string_list):
if isinstance(test, (list, tuple)):
if (test_var in test):
return item[0]
if (test_var == test):
return item
raise ValueError(f'{property_name!r} must be in {string_list!r}. Got {var!r}')
else:
raise TypeError(f'{property_name!r} must be a str. Got {type(var)}') |
def _seg_33():
return [(13070, 'M', u''), (13071, 'M', u''), (13072, 'M', u''), (13073, 'M', u''), (13074, 'M', u''), (13075, 'M', u''), (13076, 'M', u''), (13077, 'M', u''), (13078, 'M', u''), (13079, 'M', u''), (13080, 'M', u''), (13081, 'M', u''), (13082, 'M', u''), (13083, 'M', u''), (13084, 'M', u''), (13085, 'M', u''), (13086, 'M', u''), (13087, 'M', u''), (13088, 'M', u''), (13089, 'M', u''), (13090, 'M', u''), (13091, 'M', u''), (13092, 'M', u''), (13093, 'M', u''), (13094, 'M', u''), (13095, 'M', u''), (13096, 'M', u''), (13097, 'M', u''), (13098, 'M', u''), (13099, 'M', u''), (13100, 'M', u''), (13101, 'M', u''), (13102, 'M', u''), (13103, 'M', u''), (13104, 'M', u''), (13105, 'M', u''), (13106, 'M', u''), (13107, 'M', u''), (13108, 'M', u''), (13109, 'M', u''), (13110, 'M', u''), (13111, 'M', u''), (13112, 'M', u''), (13113, 'M', u''), (13114, 'M', u''), (13115, 'M', u''), (13116, 'M', u''), (13117, 'M', u''), (13118, 'M', u''), (13119, 'M', u''), (13120, 'M', u''), (13121, 'M', u''), (13122, 'M', u''), (13123, 'M', u''), (13124, 'M', u''), (13125, 'M', u''), (13126, 'M', u''), (13127, 'M', u''), (13128, 'M', u''), (13129, 'M', u''), (13130, 'M', u''), (13131, 'M', u''), (13132, 'M', u''), (13133, 'M', u''), (13134, 'M', u''), (13135, 'M', u''), (13136, 'M', u''), (13137, 'M', u''), (13138, 'M', u''), (13139, 'M', u''), (13140, 'M', u''), (13141, 'M', u''), (13142, 'M', u''), (13143, 'M', u''), (13144, 'M', u'0'), (13145, 'M', u'1'), (13146, 'M', u'2'), (13147, 'M', u'3'), (13148, 'M', u'4'), (13149, 'M', u'5'), (13150, 'M', u'6'), (13151, 'M', u'7'), (13152, 'M', u'8'), (13153, 'M', u'9'), (13154, 'M', u'10'), (13155, 'M', u'11'), (13156, 'M', u'12'), (13157, 'M', u'13'), (13158, 'M', u'14'), (13159, 'M', u'15'), (13160, 'M', u'16'), (13161, 'M', u'17'), (13162, 'M', u'18'), (13163, 'M', u'19'), (13164, 'M', u'20'), (13165, 'M', u'21'), (13166, 'M', u'22'), (13167, 'M', u'23'), (13168, 'M', u'24'), (13169, 'M', u'hpa')] |
def get_dict(name, clear=False, **kwargs):
return get_mpdict_value('dict', ('d_' + name), clear=clear) |
def main():
top_females = read_collection.aggregate(top_sources_by_gender(args, field='sourcesFemale'))
top_males = read_collection.aggregate(top_sources_by_gender(args, field='sourcesMale'))
delete_existing_docs(write_collection)
update_db(write_collection, top_females)
update_db(write_collection, top_males) |
_level_function()
def almost_equal(left, right, *, rtol: float=1e-05, atol: float=1e-08, dtype_exact: bool=True, check_parameters: bool=True, check_regular: bool=True):
(yield (left, right))
left_behavior = behavior_of(left)
right_behavior = behavior_of(right)
left_backend = backend_of_obj(left, default=cpu)
right_backend = backend_of_obj(right, default=cpu)
if (left_backend is not right_backend):
return False
backend = left_backend
left_layout = to_layout(left, allow_record=False).to_packed()
right_layout = to_layout(right, allow_record=False).to_packed()
if (not backend.nplike.known_data):
raise NotImplementedError('Awkward Arrays with typetracer backends cannot yet be compared with `ak.almost_equal`.')
def is_approx_dtype(left, right) -> bool:
if (not dtype_exact):
for family in (np.integer, np.floating):
if np.issubdtype(left, family):
return np.issubdtype(right, family)
return (left == right)
def packed_list_content(layout):
layout = layout.to_ListOffsetArray64(False)
return layout.content[layout.offsets[0]:layout.offsets[(- 1)]]
def visitor(left, right) -> bool:
if (left.is_indexed and (not left.is_option)):
left = left.project()
if (right.is_indexed and (not right.is_option)):
right = right.project()
if left.is_option:
left = left.to_IndexedOptionArray64()
if right.is_option:
right = right.to_IndexedOptionArray64()
if (left.is_numpy and (left.purelist_depth > 1)):
left = left.to_RegularArray()
if (right.is_numpy and (right.purelist_depth > 1)):
right = right.to_RegularArray()
if (left.length != right.length):
return False
if (check_parameters and (not parameters_are_equal(left.parameters, right.parameters))):
return False
if (not ((get_array_class(left, left_behavior) is get_array_class(right, right_behavior)) or (not check_parameters))):
return False
if (left.is_regular and right.is_regular):
return ((left.size == right.size) and visitor(left.content, right.content))
elif (left.is_list and right.is_list):
if (left.is_regular and (not right.is_regular)):
return ((not check_regular) and visitor(left.content, packed_list_content(right)))
elif (right.is_regular and (not left.is_regular)):
return ((not check_regular) and visitor(packed_list_content(left), right.content))
else:
return visitor(packed_list_content(left), packed_list_content(right))
elif (left.is_numpy and right.is_numpy):
if (np.issubdtype(left.dtype, np.datetime64) or np.issubdtype(right.dtype, np.datetime64) or np.issubdtype(left.dtype, np.timedelta64) or np.issubdtype(right.dtype, np.timedelta64)):
return ((left.dtype == right.dtype) and backend.nplike.all((left.data == right.data)) and (left.shape == right.shape))
else:
return (is_approx_dtype(left.dtype, right.dtype) and backend.nplike.all(backend.nplike.isclose(left.data, right.data, rtol=rtol, atol=atol, equal_nan=False)) and (left.shape == right.shape))
elif (left.is_option and right.is_option):
return (backend.index_nplike.array_equal(left.mask_as_bool(True), right.mask_as_bool(True)) and visitor(left.project(), right.project()))
elif (left.is_union and right.is_union):
def ordered_unique_values(values):
(unique, unique_index, *_) = backend.index_nplike.unique_all(values)
return values[backend.index_nplike.sort(unique_index)]
left_tag_order = ordered_unique_values(left.tags.data)
right_tag_order = ordered_unique_values(right.tags.data)
left_tag_to_right_tag = backend.index_nplike.empty(left_tag_order.size, dtype=np.int64)
left_tag_to_right_tag[left_tag_order] = right_tag_order
new_left_tag = left_tag_to_right_tag[left.tags.data]
if (not backend.index_nplike.all((new_left_tag == right.tags.data))):
return False
for (i, j) in zip(left_tag_order, right_tag_order):
if (not visitor(left.project(i), right.project(j))):
return False
return True
elif (left.is_record and right.is_record):
return (((get_record_class(left, left_behavior) is get_record_class(right, right_behavior)) or (not check_parameters)) and (left.is_tuple == right.is_tuple) and (left.is_tuple or (len(left.fields) == len(right.fields))) and all((visitor(left.content(f), right.content(f)) for f in left.fields)))
elif (left.is_unknown and right.is_unknown):
return True
else:
return False
return visitor(left_layout, right_layout) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.