code stringlengths 101 5.91M |
|---|
def replace_tail(tail):
xxs_tail = re.compile(".*(Person.) .* (Person.)\\'s .*", re.I)
xy_tail = re.compile('.*(Person.) .* (Person.).*', re.I)
px_tail = re.compile('.*(Person.).*', re.I)
a_underline = re.compile('.* a ___.*')
the_underline = re.compile('.* the ___.*')
some_underline = re.compile('.* some ___.*')
underline = re.compile('.* ___.*')
has_xxs = xxs_tail.match(tail)
if has_xxs:
if (has_xxs.group(1).lower() == has_xxs.group(2).lower()):
tail = re.sub(has_xxs.group(1), 'someone', tail, 1)
tail = re.sub((has_xxs.group(2) + "'s"), 'his', tail, 1)
else:
tail = re.sub(has_xxs.group(1), 'someone', tail, 1)
tail = re.sub((has_xxs.group(2) + "'s"), "someone else's", tail, 1)
has_xy = xy_tail.match(tail)
if has_xy:
if (has_xy.group(1).lower() == has_xy.group(2).lower()):
tail = re.sub(has_xy.group(1), 'someone', tail, 1)
tail = re.sub(has_xy.group(2), 'himself', tail, 1)
else:
tail = re.sub(has_xy.group(1), 'someone', tail, 1)
tail = re.sub(has_xy.group(2), 'someone else', tail, 1)
has_px = px_tail.match(tail)
if has_px:
tail = tail.replace(has_px.group(1), 'someone')
(has_a_underline, has_the_underline, has_some_underline) = (a_underline.match(tail), the_underline.match(tail), some_underline.match(tail))
if (has_a_underline or has_the_underline or has_some_underline):
tail = tail.replace('a ___', 'something').replace('the ___', 'something').replace('every ___', 'something')
has_underline = underline.match(tail)
if has_underline:
tail = tail.replace('___', 'something')
return tail |
def conv3x3(in_planes, out_planes, groups=1, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False, groups=groups) |
class BigDLMetric(object):
def __init__(self, val_method, outputs, labels):
self.val_method = val_method
self.outputs = outputs
self.labels = labels |
def list_detectors():
print("\nAVAILABLE DETECTORS (for fc.detector(s,'DETECTOR')):\n")
print_dirs(os.path.join(data_path, 'Detectors')) |
def enable_multi_fs_save(save_func: Callable) -> Callable:
(save_func)
def fs_save(obj, path, *args, **kwargs):
from bigdl.dllib.utils.file_utils import is_local_path
if is_local_path(path):
return save_func(obj, path, *args, **kwargs)
else:
import uuid
import tempfile
from bigdl.dllib.utils.file_utils import append_suffix
file_name = str(uuid.uuid1())
file_name = append_suffix(file_name, path)
with tempfile.TemporaryDirectory() as tmpdir:
temp_path = os.path.join(tmpdir, file_name)
result = save_func(obj, temp_path, *args, **kwargs)
put_local_file_to_remote(temp_path, path)
return result
return fs_save |
def readme():
with open('README.rst', encoding='utf-8') as f:
content = f.read()
return content |
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 16, 3: 16, 4: 16, 5: 16, 6: 16, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='custom03'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True) |
class FlaxRobertaPreLayerNormForCausalLM(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def set_accelerator(accel_obj):
global accelerator
_validate_accelerator(accel_obj)
if (accel_logger is not None):
accel_logger.info(f'Setting accelerator to {accel_obj._name} (model specified)')
accelerator = accel_obj |
class QUVA(data.Dataset):
def __init__(self, dataset_path, subset, sample_duration, n_samples_for_each_video=10, spatial_transform=None, target_transform=None, get_loader=get_default_video_loader):
(self.data, self.max_n_frames) = make_dataset(dataset_path, subset, sample_duration, n_samples_for_each_video)
self.spatial_transform = spatial_transform
self.target_transform = target_transform
self.loader = get_loader()
self.mean = [0.0, 0.0, 0.0]
self.var = [0.0, 0.0, 0.0]
self.readed_num = 0.0
def __getitem__(self, index):
path = self.data[index]['video']
frame_indices = self.data[index]['frame_indices']
clip = self.loader(path, frame_indices)
if (self.spatial_transform is not None):
self.spatial_transform.randomize_parameters()
clip = [self.spatial_transform(img) for img in clip]
clip = torch.stack(clip, 0).permute(1, 0, 2, 3)
target = self.data[index]['label']
sample_len = clip.size(1)
if (clip.size(1) != self.max_n_frames):
clip_zeros = torch.zeros([clip.size(0), (self.max_n_frames - clip.size(1)), clip.size(2), clip.size(3)], dtype=torch.float)
clip = torch.cat([clip, clip_zeros], dim=1)
return (clip, (- 1), (- 1), len(target), sample_len)
def __len__(self):
return len(self.data) |
_model
def hardcorenas_c(pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs)
return model |
def vgg_johnson(vgg, img, rec):
ff = vgg.fw_relu(img, 4)[(- 1)]
fn = vgg.fw_relu(rec, 4)[(- 1)]
vgg_imgs = []
vgg_imgs.append((ff - fn).pow(2).mean(dim=1, keepdim=True))
loss = vgg_imgs[(- 1)].mean()
return (loss, vgg_imgs) |
class NMTFlow(Flow):
def __init__(self, levels, num_steps, features, src_features, factors, hidden_features=None, inverse=False, transform='affine', coupling_type='conv', kernel_size=3, rnn_mode='LSTM', heads=1, pos_enc='add', max_length=100, dropout=0.0):
super(NMTFlow, self).__init__(inverse)
assert (levels == len(num_steps))
assert (levels == (len(factors) + 1))
blocks = []
self.levels = levels
self.features = features
pos_attn = ((coupling_type == 'self_attn') and (pos_enc == 'attn'))
for level in range(levels):
if (level == (levels - 1)):
block = NMTFlowBlock(num_steps[level], features, src_features, hidden_features=hidden_features, inverse=inverse, prior=False, coupling_type=coupling_type, transform=transform, kernel_size=kernel_size, rnn_mode=rnn_mode, heads=heads, max_length=max_length, dropout=dropout, pos_attn=pos_attn)
else:
factor = factors[level]
block = NMTFlowBlock(num_steps[level], features, src_features, hidden_features=hidden_features, inverse=inverse, prior=True, factor=factor, coupling_type=coupling_type, transform=transform, kernel_size=kernel_size, rnn_mode=rnn_mode, heads=heads, max_length=max_length, dropout=dropout, pos_attn=pos_attn)
features = (block.z_features * 2)
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
def sync(self):
for block in self.blocks:
block.sync()
def forward(self, input: torch.Tensor, tgt_mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
logdet_accum = input.new_zeros(input.size(0))
out = input
outputs = []
for (i, block) in enumerate(self.blocks):
(out, logdet) = block.forward(out, tgt_mask, src, src_mask)
logdet_accum = (logdet_accum + logdet)
if (i < (self.levels - 1)):
(out1, out2) = split(out, block.z_features)
outputs.append(out2)
(out, tgt_mask) = squeeze(out1, tgt_mask)
for _ in range((self.levels - 1)):
out2 = outputs.pop()
out = unsqueeze(out)
out = unsplit([out, out2])
assert (len(outputs) == 0)
return (out, logdet_accum)
def backward(self, input: torch.Tensor, tgt_mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
outputs = []
masks = []
out = input
for i in range((self.levels - 1)):
(out1, out2) = split(out, self.blocks[i].z_features)
outputs.append(out2)
masks.append(tgt_mask)
(out, tgt_mask) = squeeze(out1, tgt_mask)
logdet_accum = input.new_zeros(input.size(0))
for (i, block) in enumerate(reversed(self.blocks)):
if (i > 0):
out2 = outputs.pop()
tgt_mask = masks.pop()
out1 = unsqueeze(out)
out = unsplit([out1, out2])
(out, logdet) = block.backward(out, tgt_mask, src, src_mask)
logdet_accum = (logdet_accum + logdet)
assert (len(outputs) == 0)
assert (len(masks) == 0)
return (out, logdet_accum)
def init(self, data: torch.Tensor, tgt_mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[(torch.Tensor, torch.Tensor)]:
logdet_accum = data.new_zeros(data.size(0))
out = data
outputs = []
for (i, block) in enumerate(self.blocks):
(out, logdet) = block.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = (logdet_accum + logdet)
if (i < (self.levels - 1)):
(out1, out2) = split(out, block.z_features)
outputs.append(out2)
(out, tgt_mask) = squeeze(out1, tgt_mask)
for _ in range((self.levels - 1)):
out2 = outputs.pop()
out = unsqueeze(out)
out = unsplit([out, out2])
assert (len(outputs) == 0)
return (out, logdet_accum)
def from_params(cls, params: Dict) -> 'NMTFlow':
return NMTFlow(**params) |
def LayerNorm(normalized_shape, eps=1e-05, elementwise_affine=True, export=False):
if ((not export) and torch.cuda.is_available()):
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return nn.LayerNorm(normalized_shape, eps, elementwise_affine) |
class RecurrentTransformerEncoder(Module):
def __init__(self, layers, norm_layer=None, event_dispatcher=''):
super(RecurrentTransformerEncoder, self).__init__()
self.layers = ModuleList(layers)
self.norm = norm_layer
self.event_dispatcher = EventDispatcher.get(event_dispatcher)
def forward(self, x, state=None, memory=None):
state = check_state(state, memory)
if (state is None):
state = ([None] * len(self.layers))
for (i, layer) in enumerate(self.layers):
(x, s) = layer(x, state[i])
state[i] = s
self.event_dispatcher.dispatch(IntermediateOutput(self, x))
if (self.norm is not None):
x = self.norm(x)
return (x, state) |
class GradientDescent():
def __init__(self, problem: MinimizationProblem, variable: TensorList, step_length: float, momentum: float=0.0, debug=False, plotting=False, fig_num=(10, 11)):
self.problem = problem
self.x = variable
self.step_legnth = step_length
self.momentum = momentum
self.debug = (debug or plotting)
self.plotting = plotting
self.fig_num = fig_num
self.losses = torch.zeros(0)
self.gradient_mags = torch.zeros(0)
self.residuals = None
self.clear_temp()
def clear_temp(self):
self.dir = None
def run(self, num_iter, dummy=None):
if (num_iter == 0):
return
lossvec = None
if self.debug:
lossvec = torch.zeros((num_iter + 1))
grad_mags = torch.zeros((num_iter + 1))
for i in range(num_iter):
self.x.requires_grad_(True)
loss = self.problem(self.x)
grad = TensorList(torch.autograd.grad(loss, self.x))
if (self.dir is None):
self.dir = grad
else:
self.dir = (grad + (self.momentum * self.dir))
self.x.detach_()
self.x -= (self.step_legnth * self.dir)
if self.debug:
lossvec[i] = loss.item()
grad_mags[i] = sum((grad.view((- 1)) grad.view((- 1)))).sqrt().item()
if self.debug:
self.x.requires_grad_(True)
loss = self.problem(self.x)
grad = TensorList(torch.autograd.grad(loss, self.x))
lossvec[(- 1)] = loss.item()
grad_mags[(- 1)] = sum((grad.view((- 1)) grad.view((- 1)))).cpu().sqrt().item()
self.losses = torch.cat((self.losses, lossvec))
self.gradient_mags = torch.cat((self.gradient_mags, grad_mags))
if self.plotting:
plot_graph(self.losses, self.fig_num[0], title='Loss')
plot_graph(self.gradient_mags, self.fig_num[1], title='Gradient magnitude')
self.x.detach_()
self.clear_temp() |
def extract_audio(dataset_json_file, mdl, tar_path, total_split=16):
if (os.path.exists(tar_path) == False):
os.makedirs(tar_path)
with open(dataset_json_file, 'r') as fp:
data = json.load(fp)
num_sample = len(data)
num_each_split = math.ceil((num_sample / total_split))
cur_start = (int(argument) * num_each_split)
data = data[cur_start:(cur_start + num_each_split)]
print(cur_start, len(data))
for (idx, entry) in enumerate(data):
wav = entry
if (os.path.exists((((tar_path + '/') + gen_filename(wav)) + '.npz')) == False):
try:
(_, audio_rep) = mdl.transcribe_audio(wav)
audio_rep = audio_rep[0]
audio_rep = torch.permute(audio_rep, (2, 0, 1)).detach().cpu().numpy()
audio_rep = skimage.measure.block_reduce(audio_rep, (1, 20, 1), np.mean)
audio_rep = audio_rep[1:]
except Exception as e:
print(f'Error loading file {e}')
audio_rep = torch.zeros((32, 25, 1280))
np.savez_compressed((((tar_path + '/') + gen_filename(wav)) + '.npz'), audio_rep)
if ((idx % 50) == 0):
print(idx) |
class DBSNLoss_Pretrain(nn.Module):
def __init__(self):
super(DBSNLoss_Pretrain, self).__init__()
def forward(self, target, mu, sigma_mu, sigma_n, sigma_y):
loss = 0
eps = 1e-06
target = target.detach()
mu = mu.detach()
t1 = (((target - mu) ** 2) / sigma_y)
t2 = sigma_n.clamp(eps).log()
t3 = (sigma_mu / sigma_n.clamp(eps))
loss = ((t1 + t2) + t3)
loss = loss.mean()
if ((t1.max() > .0) or (t3.max() > .0)):
loss.data.zero_()
return loss |
def cook_test(test, xxx_todo_changeme, eff=None, n=4):
(reflen, refmaxcounts) = xxx_todo_changeme
(testlen, counts) = precook(test, n, True)
result = {}
if (eff == 'closest'):
result['reflen'] = min(((abs((l - testlen)), l) for l in reflen))[1]
else:
result['reflen'] = reflen
result['testlen'] = testlen
result['guess'] = [max(0, ((testlen - k) + 1)) for k in range(1, (n + 1))]
result['correct'] = ([0] * n)
for (ngram, count) in counts.items():
result['correct'][(len(ngram) - 1)] += min(refmaxcounts.get(ngram, 0), count)
return result |
def main(args):
import glob
import random
import numpy as np
import json
mpnn_alphabet = 'ACDEFGHIKLMNPQRSTVWYX'
mpnn_alphabet_dict = {'A': 0, 'C': 1, 'D': 2, 'E': 3, 'F': 4, 'G': 5, 'H': 6, 'I': 7, 'K': 8, 'L': 9, 'M': 10, 'N': 11, 'P': 12, 'Q': 13, 'R': 14, 'S': 15, 'T': 16, 'V': 17, 'W': 18, 'Y': 19, 'X': 20}
with open(args.input_path, 'r') as json_file:
json_list = list(json_file)
my_dict = {}
for json_str in json_list:
result = json.loads(json_str)
all_chain_list = [item[(- 1):] for item in list(result) if (item[:10] == 'seq_chain_')]
bias_by_res_dict = {}
for chain in all_chain_list:
chain_length = len(result[f'seq_chain_{chain}'])
bias_per_residue = np.zeros([chain_length, 21])
if (chain == 'A'):
residues = [0, 1, 2, 3, 4, 5, 11, 12, 13, 14, 15]
amino_acids = [5, 9]
for res in residues:
for aa in amino_acids:
bias_per_residue[(res, aa)] = 100.5
if (chain == 'C'):
residues = [0, 1, 2, 3, 4, 5, 11, 12, 13, 14, 15]
amino_acids = range(21)[1:]
for res in residues:
for aa in amino_acids:
bias_per_residue[(res, aa)] = (- 100.5)
bias_by_res_dict[chain] = bias_per_residue.tolist()
my_dict[result['name']] = bias_by_res_dict
with open(args.output_path, 'w') as f:
f.write((json.dumps(my_dict) + '\n')) |
def callback(lcl, glb):
total = (sum(lcl['episode_rewards'][(- 101):(- 1)]) / 100)
totalt = lcl['t']
is_solved = ((totalt > 2000) and (total >= (- 50)))
return is_solved |
def make_mujoco_env(env_id, seed, reward_scale=1.0):
rank = MPI.COMM_WORLD.Get_rank()
myseed = ((seed + (1000 * rank)) if (seed is not None) else None)
set_global_seeds(myseed)
env = gym.make(env_id)
logger_path = (None if (logger.get_dir() is None) else os.path.join(logger.get_dir(), str(rank)))
env = Monitor(env, logger_path, allow_early_resets=True)
env.seed(seed)
if (reward_scale != 1.0):
from baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env |
def apply_momentum(updates, params=None, momentum=0.9):
if (params is None):
params = updates.keys()
updates = OrderedDict(updates)
for param in params:
value = param.get_value(borrow=True)
velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
x = ((momentum * velocity) + updates[param])
updates[velocity] = (x - param)
updates[param] = x
return updates |
def shufflenet_v2_x1_0(pretrained=False, progress=True, quantize=False, **kwargs):
return _shufflenetv2('shufflenetv2_x1.0', pretrained, progress, quantize, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs) |
def filter_greater_than(boxlist, thresh, scope=None):
with tf.name_scope(scope, 'FilterGreaterThan'):
if (not isinstance(boxlist, box_list.BoxList)):
raise ValueError('boxlist must be a BoxList')
if (not boxlist.has_field('scores')):
raise ValueError("input boxlist must have 'scores' field")
scores = boxlist.get_field('scores')
if (len(scores.shape.as_list()) > 2):
raise ValueError('Scores should have rank 1 or 2')
if ((len(scores.shape.as_list()) == 2) and (scores.shape.as_list()[1] != 1)):
raise ValueError('Scores should have rank 1 or have shape consistent with [None, 1]')
high_score_indices = tf.cast(tf.reshape(tf.where(tf.greater(scores, thresh)), [(- 1)]), tf.int32)
return gather(boxlist, high_score_indices) |
class TestPostCSEOptimizer(unittest.TestCase):
def setUpClass(self):
build_fake_yaml()
import tensorflow as tf
self.enable_s8 = bool(((tf.version.VERSION.find('1.15.0-up') != (- 1)) or (tf.version.VERSION >= '2.1.0')))
def tearDownClass(self):
os.remove('fake_yaml.yaml')
_random()
def test_post_cse(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name='input')
x = tf.nn.relu(x)
xw = tf.constant(np.random.random((2, 2, 16, 16)), dtype=tf.float32, name='y')
x = tf.nn.conv2d(input=x, filters=xw, strides=[1, 1, 1, 1], padding='VALID')
y = tf.constant(np.random.random((1, 55, 55, 16)), dtype=tf.float32, name='y')
z = tf.math.add(x, y, name='add')
conv_weights = tf.compat.v1.get_variable('weight', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(z, conv_weights, strides=[1, 2, 2, 1], padding='VALID')
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed)
conv_weights2 = tf.compat.v1.get_variable('weight2', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv2 = tf.nn.conv2d(z, conv_weights2, strides=[1, 2, 2, 1], padding='VALID')
normed2 = tf.compat.v1.layers.batch_normalization(conv2)
relu2 = tf.nn.relu(normed2)
add = tf.math.add(relu, relu2, name='op_to_store')
out_name = add.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
quantize_v2_count = 0
for i in output_graph.graph_def.node:
if (i.op == 'QuantizeV2'):
quantize_v2_count += 1
if self.enable_s8:
self.assertEqual(quantize_v2_count, 2)
else:
self.assertEqual(quantize_v2_count, 1)
_random()
def test_post_cse2(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name='input')
x = tf.nn.relu(x)
xw = tf.constant(np.random.random((2, 2, 16, 16)), dtype=tf.float32, name='y')
x = tf.nn.conv2d(input=x, filters=xw, strides=[1, 1, 1, 1], padding='VALID')
y = tf.constant(np.random.random((1, 55, 55, 16)), dtype=tf.float32, name='y')
z = tf.math.add(x, y, name='add')
conv_weights = tf.compat.v1.get_variable('weight', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(z, conv_weights, strides=[1, 2, 2, 1], padding='VALID')
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed)
conv_weights2 = tf.compat.v1.get_variable('weight2', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv2 = tf.nn.conv2d(z, conv_weights2, strides=[1, 2, 2, 1], padding='VALID')
normed2 = tf.compat.v1.layers.batch_normalization(conv2)
relu2 = tf.nn.relu(normed2)
add = tf.math.add(relu, relu2)
ones_const = tf.constant(1, dtype=tf.float32)
ones_const2 = tf.constant(1, dtype=tf.float32)
mul1 = tf.math.multiply(add, ones_const)
mul2 = tf.math.multiply(mul1, ones_const)
mul3 = tf.math.multiply(mul2, ones_const2, name='op_to_store')
out_name = mul3.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
quantize_v2_count = 0
for i in output_graph.graph_def.node:
if (i.op == 'QuantizeV2'):
quantize_v2_count += 1
if self.enable_s8:
self.assertEqual(quantize_v2_count, 2)
else:
self.assertEqual(quantize_v2_count, 1) |
def clear_double_syspool(vrblvl=0):
if (vrblvl > 0):
print('in clear_double_syspool ...')
phc = get_phcfun()
adim = pointer(c_int32(0))
bbb = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> clear_double_syspool calls phc', end='')
retval = phc(697, adim, bbb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return retval |
class BartForQuestionAnswering():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def test_bbox2result_kitti2d():
(data_root, ann_file, classes, pts_prefix, pipeline, modality, split) = _generate_kitti_dataset_config()
kitti_dataset = KittiDataset(data_root, ann_file, split, pts_prefix, pipeline, classes, modality)
bboxes = np.array([[[46.1218, (- 4.6496), (- 0.9275), 0.5316, 0.5], [33.3189, 0.1981, 0.3136, 0.5656, 0.5]], [[46.1366, (- 4.6404), (- 0.951), 0.5162, 0.5], [33.2646, 0.2297, 0.3446, 0.5746, 0.5]]])
det_annos = kitti_dataset.bbox2result_kitti2d([bboxes], classes)
expected_name = np.array(['Pedestrian', 'Pedestrian', 'Cyclist', 'Cyclist'])
expected_bbox = np.array([[46.1218, (- 4.6496), (- 0.9275), 0.5316], [33.3189, 0.1981, 0.3136, 0.5656], [46.1366, (- 4.6404), (- 0.951), 0.5162], [33.2646, 0.2297, 0.3446, 0.5746]])
expected_score = np.array([0.5, 0.5, 0.5, 0.5])
assert np.all((det_annos[0]['name'] == expected_name))
assert np.allclose(det_annos[0]['bbox'], expected_bbox)
assert np.allclose(det_annos[0]['score'], expected_score) |
_inducing_shape.register(InducingVariables)
def _getter(x):
assert (not isinstance(InducingVariables, MultioutputInducingVariables))
return list(x.Z.shape) |
class Block(nn.Module):
def __init__(self, in_planes, out_planes, pool_method, stride):
super(Block, self).__init__()
self.branches = nn.ModuleList([nn.Sequential(_make_conv(in_planes, out_planes, kernel_size=1, padding=0), _make_conv(out_planes, out_planes, stride=stride)), nn.Sequential(_make_conv(in_planes, out_planes, kernel_size=1, padding=0), _make_conv(out_planes, out_planes), _make_conv(out_planes, out_planes, stride=stride))])
if (pool_method == 'Avg'):
assert (stride == 1)
self.branches.append(_make_conv(in_planes, out_planes, kernel_size=1, padding=0))
self.branches.append(nn.Sequential(nn.AvgPool2d(kernel_size=3, stride=1, padding=1), _make_conv(in_planes, out_planes, kernel_size=1, padding=0)))
else:
self.branches.append(nn.MaxPool2d(kernel_size=3, stride=stride, padding=1))
def forward(self, x):
return torch.cat([b(x) for b in self.branches], 1) |
class MinPooling(_AbstractPoolingBase):
def __init__(self, kernel_size, keep_size=True, name=None, deterministic=False, random_state=None):
super(MinPooling, self).__init__(kernel_size=kernel_size, keep_size=keep_size, name=name, deterministic=deterministic, random_state=random_state)
def _pool_image(self, image, kernel_size_h, kernel_size_w):
return ia.pool(image, (kernel_size_h, kernel_size_w), np.min, cval=255, preserve_dtype=True) |
class ACubeNet(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(ACubeNet, self).__init__()
n_resgroups = args.n_resgroups
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
reduction = args.reduction
scale = args.scale[0]
act = nn.ReLU(True)
self.aff = common.AFF(n_feats, conv)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.sub_mean = common.MeanShift(args.rgb_range)
modules_head = [conv(args.n_colors, n_feats, kernel_size)]
self.body = nn.ModuleList()
for _ in range(n_resgroups):
self.body.append(RDAG(conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks))
self.sq = nn.ModuleList()
for _ in range(n_resgroups):
self.sq.append(conv(n_feats, 1, kernel_size=1))
m_VFF = []
m_VFF.append(conv(n_feats, n_feats, kernel_size))
modules_tail = [common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
self.head = nn.Sequential(*modules_head)
self.VFF = nn.Sequential(*m_VFF)
self.tail = nn.Sequential(*modules_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
y = x
RBs_sq = []
RBs_up = []
for i in range(4):
y = self.body[i](y)
z = self.avg_pool(y)
z = self.sq[i](z)
z = z.unsqueeze((- 2))
t = y.unsqueeze((- 1))
RBs_up.append(t)
RBs_sq.append(z)
output1 = torch.cat(RBs_up, dim=(- 1))
output2 = torch.cat(RBs_sq, dim=(- 2))
out = self.aff(output1, output2)
y = (y + out)
res = self.VFF(y)
res += x
x = self.tail(res)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=False):
own_state = self.state_dict()
for (name, param) in state_dict.items():
if (name in own_state):
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if (name.find('tail') >= 0):
print('Replace pre-trained upsampler to new one...')
else:
raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))
elif strict:
if (name.find('tail') == (- 1)):
raise KeyError('unexpected key "{}" in state_dict'.format(name))
if strict:
missing = (set(own_state.keys()) - set(state_dict.keys()))
if (len(missing) > 0):
raise KeyError('missing keys in state_dict: "{}"'.format(missing)) |
class Discriminator(nn.Module):
def __init__(self, inhw, c1_channels=64, c2_channels=128, c3_channels=256, c4_channels=512, i_channels_in_2=True):
super().__init__()
self.c1_channels = c1_channels
if i_channels_in_2:
self.c2_channels = (self.c1_channels * 2)
self.c3_channels = (self.c2_channels * 2)
self.c4_channels = (self.c3_channels * 2)
else:
self.c2_channels = c2_channels
self.c3_channels = c3_channels
self.c4_channels = c4_channels
self.conv1 = nn.Conv2d(in_channels=3, out_channels=self.c1_channels, kernel_size=4, stride=2, padding=1, bias=False)
self.conv2 = nn.Conv2d(in_channels=self.c1_channels, out_channels=self.c2_channels, kernel_size=4, stride=2, padding=1, bias=False)
self.bnorm2 = nn.BatchNorm2d(num_features=self.c2_channels)
self.conv3 = nn.Conv2d(in_channels=self.c2_channels, out_channels=self.c3_channels, kernel_size=4, stride=2, padding=1, bias=False)
self.bnorm3 = nn.BatchNorm2d(num_features=self.c3_channels)
self.conv4 = nn.Conv2d(in_channels=self.c3_channels, out_channels=self.c4_channels, kernel_size=4, stride=2, padding=1, bias=False)
self.bnorm4 = nn.BatchNorm2d(num_features=self.c4_channels)
self.conv5 = nn.Conv2d(in_channels=self.c4_channels, out_channels=1, kernel_size=4, padding=0, stride=1, bias=False)
self.lrelu = nn.LeakyReLU(negative_slope=0.2)
self.sigmoid = nn.Sigmoid()
def forward(self, img):
batch_size = img.shape[0]
x = self.lrelu(self.conv1(img))
x = self.lrelu(self.bnorm2(self.conv2(x)))
x = self.lrelu(self.bnorm3(self.conv3(x)))
x = self.lrelu(self.bnorm4(self.conv4(x)))
x = self.conv5(x)
x = self.sigmoid(x)
return x.view((- 1), 1).squeeze()
def out_shape(self, inp_dim, kernel_size=4, padding=1, stride=2):
return ((((inp_dim - kernel_size) + (2 * padding)) // stride) + 1) |
def get_all_content_words(sentences, N, tokenize):
all_words = []
if tokenize:
for s in sentences:
all_words.extend([stemmer.stem(r) for r in tokenizer.tokenize(s)])
elif isinstance(sentences, list):
all_words = sentences[0].split()
else:
all_words = sentences.split()
if (N == 1):
content_words = [w for w in all_words if (w not in stopset)]
else:
content_words = all_words
normalized_content_words = map(normalize_word, content_words)
if (N > 1):
return [gram for gram in ngrams(normalized_content_words, N) if is_ngram_content(gram)]
return normalized_content_words |
class CuIRFFTOp(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * (inp.type.ndim - 1)), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuIFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuIFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuIFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[1:(- 1)]
s = T.set_subtensor(s[(- 1)], ((s[(- 1)] - 1) * 2))
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
assert (input_shape[1:(- 2)] == s[:(- 1)]).all()
assert ((((input_shape[(- 2)] - 1) * 2) + (s[(- 1)] % 2)) == s[(- 1)]).all()
output_shape = ([input_shape[0]] + list(s))
output_shape = tuple(output_shape)
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.float32, batch=output_shape[0])
input_pycuda.sync()
output_pycuda.sync()
fft.ifft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
gf = curfft_op(gout, s)
idx = ((([slice(None)] * (gf.ndim - 2)) + [slice(1, ((s[(- 1)] // 2) + (s[(- 1)] % 2)))]) + [slice(None)])
gf = T.set_subtensor(gf[idx], (gf[idx] * 2))
return [gf, DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]] |
class MultiViewPreprocessing(TransformerMixin):
def __init__(self, preprocessing_list):
self.preprocessing_list = preprocessing_list
def fit(self, views, y=None):
if (len(self.preprocessing_list) == 1):
self.preprocessing_list = (self.preprocessing_list * len(views))
elif (len(self.preprocessing_list) != len(views)):
raise ValueError('Length of preprocessing_list must be 1 (apply the same preprocessing to each view) or equal to the number of representations')
check_Xs(views, enforce_views=range(len(self.preprocessing_list)))
for (view, preprocessing) in zip(views, self.preprocessing_list):
if (preprocessing is not None):
preprocessing.fit(view, y)
return self
def transform(self, X, y=None):
[check_is_fitted(preprocessing) for preprocessing in self.preprocessing_list if (preprocessing is not None)]
check_Xs(X, enforce_views=range(len(self.preprocessing_list)))
return [(view if (preprocessing is None) else preprocessing.transform(view)) for (view, preprocessing) in zip(X, self.preprocessing_list)] |
class TLU(nn.LayerBase):
def __init__(self, in_ch, dtype=None, **kwargs):
self.in_ch = in_ch
if (dtype is None):
dtype = nn.floatx
self.dtype = dtype
super().__init__(**kwargs)
def build_weights(self):
self.tau = tf.get_variable('tau', (self.in_ch,), dtype=self.dtype, initializer=tf.initializers.zeros())
def get_weights(self):
return [self.tau]
def forward(self, x):
if (nn.data_format == 'NHWC'):
shape = (1, 1, 1, self.in_ch)
else:
shape = (1, self.in_ch, 1, 1)
tau = tf.reshape(self.tau, shape)
return tf.math.maximum(x, tau) |
(x='double[::1]', acc_container='AcceleratorContainer', digest=str, returns='AcceleratorContainer')
def fetch_acc(x):
digest = AcceleratorContainer.hash_interpolation_domain(x)
acc_container = acc_store.get(digest)
if (acc_container is None):
acc_container = AcceleratorContainer(x)
else:
acc_counter[digest] += 1
return acc_container |
.slow
def test_harmonic_oscillator_vmc_random_particle(caplog):
model_omega = 5
spring_constant = 1.5
nchains = (100 * jax.local_device_count())
nburn = 100
nepochs = 100
nsteps_per_param_update = 5
std_move = 0.25
learning_rate = 0.001
(log_psi_model, params, random_particle_positions, amplitudes, key) = _make_initial_params_and_data(model_omega, nchains)
data = make_simple_position_amplitude_data(random_particle_positions, amplitudes)
local_energy_fn = qho.make_harmonic_oscillator_local_energy(spring_constant, log_psi_model.apply, local_energy_type='random_particle')
(_, params, _, _) = sgd_vmc_loop_with_logging(caplog, data, params, key, nchains, nburn, nepochs, nsteps_per_param_update, std_move, learning_rate, log_psi_model, local_energy_fn, local_energy_type='random_particle')
np.testing.assert_allclose(jax.tree_util.tree_leaves(params)[0], jnp.sqrt(spring_constant), atol=1.0) |
def histogram(iterable, k=10, interval=None, *args, **kwargs):
if ('range' in kwargs):
interval = kwargs['range']
a = (iterable if isinstance(iterable, list) else list(iterable))
r = (interval or (min(a), max(a)))
k = max(int(k), 1)
w = (float(((r[1] - r[0]) + 1e-06)) / k)
h = [[] for i in range(k)]
for x in a:
i = int(floor(((x - r[0]) / w)))
if (0 <= i < len(h)):
h[i].append(x)
return dict(((((r[0] + (w * i)), ((r[0] + w) + (w * i))), v) for (i, v) in enumerate(h))) |
def compute_exact(a_gold, a_pred):
return int((normalize_answer(a_gold) == normalize_answer(a_pred))) |
def set_default_adaptor_args(args):
args.adaptor_n_layers = getattr(args, 'adaptor_n_layers', 3)
args.adaptor_kernel_size = getattr(args, 'adaptor_kernel_size', 3)
args.adaptor_stride = getattr(args, 'adaptor_stride', 2)
args.adaptor_layerdrop = getattr(args, 'adaptor_layerdrop', 0.0)
args.adaptor_layernorm = getattr(args, 'adaptor_layernorm', False)
args.adaptor_proj = getattr(args, 'adaptor_proj', False) |
def test_reference_wrapper():
assert (m.refwrap_builtin(42) == 420)
assert (m.refwrap_usertype(UserType(42)) == 42)
assert (m.refwrap_usertype_const(UserType(42)) == 42)
with pytest.raises(TypeError) as excinfo:
m.refwrap_builtin(None)
assert ('incompatible function arguments' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.refwrap_usertype(None)
assert ('incompatible function arguments' in str(excinfo.value))
assert (m.refwrap_lvalue().value == 1)
assert (m.refwrap_lvalue_const().value == 1)
a1 = m.refwrap_list(copy=True)
a2 = m.refwrap_list(copy=True)
assert ([x.value for x in a1] == [2, 3])
assert ([x.value for x in a2] == [2, 3])
assert ((not (a1[0] is a2[0])) and (not (a1[1] is a2[1])))
b1 = m.refwrap_list(copy=False)
b2 = m.refwrap_list(copy=False)
assert ([x.value for x in b1] == [1, 2])
assert ([x.value for x in b2] == [1, 2])
assert ((b1[0] is b2[0]) and (b1[1] is b2[1]))
assert (m.refwrap_iiw(IncType(5)) == 5)
assert (m.refwrap_call_iiw(IncType(10), m.refwrap_iiw) == [10, 10, 10, 10]) |
def RRSE_np(pred, true, mask_value=None):
if (mask_value != None):
mask = np.where((true > mask_value), True, False)
true = true[mask]
pred = pred[mask]
mean = true.mean()
return np.divide(np.sqrt(np.sum(((pred - true) ** 2))), np.sqrt(np.sum(((true - mean) ** 2)))) |
class Custom_Dataset(Dataset):
def __init__(self, tokenizer, dataset_name, valid_subset_path, type_path, input_length, output_length, args):
self.args = args
self.tokenizer = tokenizer
self.input_length = input_length
self.output_length = output_length
self.dataset_name = dataset_name
self.type_path = type_path
self.valid_subset_path = valid_subset_path
if (self.type_path == 'train'):
self.dataset = pd.read_csv(dataset_name, lineterminator='\n')
batch_size = ((self.args.train_batch_size * self.args.gradient_accumulation_steps) * self.args.ngpu)
if (len(self.dataset) != batch_size):
raise Exception('Effective batch size should be the same as length of train set')
elif ('.csv' in self.dataset_name):
self.dataset = pd.read_csv(dataset_name, lineterminator='\n')
elif ('.json' in self.dataset_name):
self.dataset = pd.read_json(dataset_name)
else:
if valid_subset_path:
dataset = load_dataset(self.dataset_name, valid_subset_path, split=type_path, ignore_verifications=True, cache_dir=args.cache_dir)
else:
dataset = load_dataset(self.dataset_name, split=type_path, ignore_verifications=True, cache_dir=args.cache_dir)
self.dataset = dataset.to_pandas()
if (self.dataset_name == 'ai2_arc'):
self.dataset['length'] = self.dataset['choices'].apply((lambda x: len(x['text'])))
self.dataset = self.dataset[(self.dataset['length'] == 4)]
self.dataset = self.dataset.dropna()
def __len__(self):
return len(self.dataset)
def input_to_target(self, input):
input_s = input.split(' ')
input_ = ' '.join(input_s[:(len(input_s) - 1)])
target = (' ' + input_s[(len(input_s) - 1)])
return (input_, target)
def create_dialogue_prompt(self, turns):
prompt = ''
for (i, turn) in enumerate(turns):
turn = normalize_reply(turn)
if ((i % 2) == 0):
prompt += f'''User 1: {turn}
'''
else:
prompt += f'''User 2: {turn}
'''
if (i % 2):
prompt += f'User 1:'
else:
prompt += f'User 2:'
return prompt
def convert_to_features(self, example_batch):
try:
doc_id = torch.tensor(example_batch['doc_id'], dtype=torch.int)
except KeyError:
doc_id = ''
choices = []
answer_index = 0
(task, task_type) = ('', '')
if (self.type_path == 'train'):
input_ = example_batch['text']
target_ = example_batch['text']
elif ('lambada' in self.dataset_name):
(input_, target_) = self.input_to_target(example_batch['text'])
task_type = 'completion'
task = 'lambada'
elif (self.dataset_name == 'piqa'):
input_ = example_batch['goal']
choices = [(' ' + example_batch['sol1']), (' ' + example_batch['sol2'])]
target_ = choices[int(example_batch['label'])]
answer_index = int(example_batch['label'])
task_type = 'classification'
elif (self.dataset_name == 'hellaswag'):
input_ = example_batch['ctx']
choices = []
choices = [(' ' + c) for c in example_batch['endings']]
target_ = choices[int(example_batch['label'])]
answer_index = int(example_batch['label'])
task_type = 'classification'
elif (self.dataset_name == 'ai2_arc'):
input_ = example_batch['question']
choices = [(' ' + c) for c in example_batch['choices']['text']]
answer_index = example_batch['choices']['label'].tolist().index(example_batch['answerKey'])
target_ = choices[answer_index]
task_type = 'classification'
elif (self.dataset_name == 'winogrande'):
(input_, rest) = example_batch['sentence'].split(' _')
choices = [((' ' + example_batch['option1']) + rest), ((' ' + example_batch['option2']) + rest)]
answer_index = (int(example_batch['answer']) - 1)
target_ = choices[answer_index]
task_type = 'classification'
elif (self.dataset_name == 'math_qa'):
input_ = example_batch['Problem']
choices = [c[4:].rstrip(' ,') for c in re.findall('[abcd] \\) .*?, |e \\) .*?$', example_batch['options'])]
answer_index = ['a', 'b', 'c', 'd', 'e'].index(example_batch['correct'])
target_ = choices[answer_index]
task_type = 'classification'
elif ('pubmed_qa' in self.dataset_name):
input_ = f'''Context: {example_batch['abstract']}
Question: {example_batch['question']}
Answer:'''
choices = [' yes', ' maybe', ' no']
answer_index = ['yes', 'maybe', 'no'].index(example_batch['final_decision'])
target_ = choices[answer_index]
task = 'pubmed_qa'
task_type = 'classification'
elif ((self.dataset_name == 'super_glue') and (self.valid_subset_path == 'copa')):
input_ = example_batch['premise']
choices = [(' ' + example_batch['choice1']), (' ' + example_batch['choice2'])]
answer_index = int(example_batch['label'])
target_ = choices[answer_index]
task_type = 'classification'
elif any(((d in self.dataset_name) for d in DIALOG_DATASETS)):
input_ = self.create_dialogue_prompt(example_batch['text'][:(- 1)])
target_ = normalize_reply(example_batch['text'][(- 1)])
task = self.dataset_name.split('.')[0].split('/')[1]
task_type = 'dialog'
elif ('pile' in self.dataset_name):
(input_, target_) = (example_batch['text'], example_batch['text'])
task = 'pile'
task_type = 'ppl'
elif ('wikitext' in self.dataset_name):
(input_, target_) = (example_batch['text'], example_batch['text'])
task = 'wikitext'
task_type = 'ppl'
else:
(input_, target_) = (example_batch['text'], example_batch['text'])
task = 'target'
task_type = 'target'
if (not task):
if self.valid_subset_path:
task = f'{self.dataset_name}_{self.valid_subset_path}'
else:
task = f'{self.dataset_name}'
source = self.tokenizer(input_, max_length=self.input_length, padding='max_length', truncation=True, return_tensors='pt')
targets = self.tokenizer(target_, max_length=self.output_length, add_special_tokens=False, padding='max_length', truncation=True, return_tensors='pt')
return (source, targets, doc_id, task, task_type, choices, answer_index)
def __getitem__(self, index):
data = self.dataset.iloc[index]
try:
(source, targets, doc_id, task, task_type, choices, answer_index) = self.convert_to_features(data)
except:
print(data)
source_ids = source['input_ids'].squeeze()
target_ids = targets['input_ids'].squeeze()
src_mask = source['attention_mask'].squeeze()
target_mask = targets['attention_mask'].squeeze()
return {'source_ids': source_ids, 'source_mask': src_mask, 'target_ids': target_ids, 'target_mask': target_mask, 'doc_id': doc_id, 'task': task, 'task_type': task_type, 'choices': choices, 'answer_index': answer_index} |
class StopOnTokens(StoppingCriteria):
def __init__(self, min_length: int, start_length: int, stop_token_id: List[int]):
self.min_length = min_length
self.start_length = start_length
self.stop_token_id = stop_token_id
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
if (scores is not None):
if (len(scores) > self.min_length):
for stop_id in self.stop_token_id:
if (input_ids[0][((self.start_length - 1) + len(scores))] == stop_id):
return True
elif ((input_ids.shape[(- 1)] - self.start_length) > self.min_length):
for stop_id in self.stop_token_id:
if (input_ids[0][(input_ids.shape[(- 1)] - 1)] == stop_id):
return True
return False |
def get_tsdataset():
df = get_ts_df()
return TSDataset.from_pandas(df, dt_col='datetime', target_col=['value 1', 'value 2'], extra_feature_col=['extra feature 1', 'extra feature 2'], id_col='id') |
def main(image_root: Path, output_root: Path, transform: str, plot_subdir: Path, moment: str, log: bool, vmin: float, vmax: float, img_dirs: List[str], overwrite: bool, num_workers: int, experiment: str, fraction: float, zoom: bool, diff: bool, fixed_height: int):
output_dir = (output_root / 'frequency_analysis')
plot_dir = (output_dir / 'plots')
if (plot_subdir is not None):
plot_dir = (plot_dir / plot_subdir)
plot_dir.mkdir(exist_ok=True, parents=True)
mean_std = parallel_map(apply_to_imgdir, [(image_root / dirname) for dirname in img_dirs], num_workers=num_workers, mode='multiprocessing', func_kwargs=dict(func=TRANSFORMS[transform], grayscale=True, cache_dir=(output_dir / 'cache'), overwrite=overwrite))
(means, stds) = zip(*mean_std)
df = pd.DataFrame({'mean': means, 'std': stds}, index=img_dirs)
labels = img_dirs
if (image_root / 'labels.json').exists():
with open((image_root / 'labels.json')) as f:
label_updates = json.load(f)
labels = [(label_updates[label] if (label in label_updates) else label) for label in labels]
data = np.stack(df[moment])
if (transform == 'density'):
plt.figure(figsize=get_figsize(fraction=fraction))
if diff:
data = ((data[1:] / data[0]) - 1)
plot_power_spectrum(data=data, labels=labels[1:], log=log, zoom=zoom, first_black=False)
plt.ylabel('Spectral Density Error')
plt.ylim((- 1), 1)
else:
plot_power_spectrum(data=data, labels=labels, log=log, zoom=zoom)
plt.ylim((10 ** (- 5)), (10 ** 3))
else:
plot_spectra(data=np.abs(data), labels=labels, width=get_figsize(fraction=fraction)[0], log=log, vmin=vmin, vmax=vmax, fixed_height=fixed_height)
filename = get_filename(file_format='pdf', kind=transform, variant=moment, data='_'.join(img_dirs), experiment=experiment, identifiers=('diff' if diff else None))
plt.savefig((plot_dir / filename))
plt.close() |
class PreOptimization():
def __init__(self, model, new_api, device):
self.model = model
if (version1_gte_version2(tf.version.VERSION, '2.1.0') or version1_eq_version2(tf.version.VERSION, '1.15.0-up3')):
self.optimization = {'pruning': True, 'shape': True, 'constfold': False, 'arithmetic': False, 'dependency': True, 'debug_stripper': True, 'loop': True}
else:
self.optimization = {'pruning': True, 'shape': True, 'dependency': True, 'debug_stripper': True, 'loop': True}
node_names = [node.name for node in model.graph_def.node]
if ('init_all_tables' in node_names):
self.optimization['dependency'] = False
self.optimization['pruning'] = False
self.new_api = new_api
self.device = device
self.analyzer = GraphAnalyzer()
self.analyzer.graph = model.graph_def
self.analyzer.parse_graph()
self._tmp_graph_def = None
self._excluded_node_names = []
def get_excluded_node_names(self):
return self._excluded_node_names
_elapsed_time('Pass Pre Optimization')
def get_optimized_model(self, itex_mode=False):
from neural_compressor.model import Model
origin_model = Model(self.model._model, **self.model.kwargs, backend=('itex' if itex_mode else 'default'))
origin_model.name = self.model.name
origin_model.model_type = self.model.model_type
origin_model.output_tensor_names = self.model.output_tensor_names
origin_model.input_tensor_names = self.model.input_tensor_names
origin_model.workspace_path = self.model.workspace_path
output_node_names = self.model.output_node_names
input_node_names = self.model.input_node_names
input_output_names = (output_node_names + input_node_names)
if version1_gte_version2(tf.version.VERSION, '2.10.0'):
cur_graph = GraphAnalyzer()
cur_graph.graph = self.model.graph_def
graph_info = cur_graph.parse_graph()
if (self.device == 'cpu'):
cpus = tf.config.list_physical_devices('CPU')
node_device = cpus[0].name.replace('physical_device:', '')
else:
gpus = tf.config.list_physical_devices('GPU')
if (len(gpus) == 0):
xpus = tf.config.list_physical_devices('XPU')
if (len(xpus) == 0):
cpus = tf.config.list_physical_devices('CPU')
node_device = cpus[0].name.replace('physical_device:', '')
else:
node_device = xpus[0].name.replace('physical_device:', '')
else:
node_device = gpus[0].name.replace('physical_device:', '')
for node_name in list(graph_info.keys()):
node = graph_info[node_name].node
node.device = node_device
self._tmp_graph_def = cur_graph.dump_graph()
self._tmp_graph_def = ConvertLayoutOptimizer(self._tmp_graph_def, output_node_names).do_transformation()
else:
self._tmp_graph_def = ConvertLayoutOptimizer(self.model.graph_def, output_node_names).do_transformation()
self._tmp_graph_def = ConvertPlaceholderToConst(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = SwitchOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = GrapplerOptimizer(self._tmp_graph_def, input_output_names, self.optimization).do_transformation()
self._tmp_graph_def = StripUnusedNodesOptimizer(self._tmp_graph_def, input_node_names, output_node_names).do_transformation()
self._tmp_graph_def = RemoveTrainingNodesOptimizer(self._tmp_graph_def, protected_nodes=input_output_names).do_transformation()
self._tmp_graph_def = SplitSharedInputOptimizer(self._tmp_graph_def).do_transformation()
if self.new_api:
self._tmp_graph_def = FuseDecomposedBNOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = FuseDecomposedINOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = FuseLayerNormOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = GraphFoldConstantOptimizer(self._tmp_graph_def).do_transformation()
if (not self.new_api):
self._tmp_graph_def = FuseDecomposedBNOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = FuseColumnWiseMulOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = StripUnusedNodesOptimizer(self._tmp_graph_def, input_node_names, output_node_names).do_transformation()
self._tmp_graph_def = FuseGeluOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = GraphCseOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = FoldBatchNormNodesOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = RenameBatchNormOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = ConvertLeakyReluOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = ConvertAddToBiasAddOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = FuseTransposeReshapeOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = FuseConvWithMathOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = ExpandDimsOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = FetchWeightFromReshapeOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = MoveSqueezeAfterReluOptimizer(self._tmp_graph_def).do_transformation()
if ((not self.new_api) and (not itex_mode)):
self._tmp_graph_def = InjectDummyBiasAddOptimizer(self._tmp_graph_def, output_node_names).do_transformation()
self._tmp_graph_def = FuseBiasAddAndAddOptimizer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = ConvertNanToRandom(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = StripEquivalentNodesOptimizer(self._tmp_graph_def, output_node_names).do_transformation()
if (self.new_api or itex_mode):
self._tmp_graph_def = DilatedContraction(self._tmp_graph_def).do_transformation()
if version1_lt_version2(tf.version.VERSION, '2.0.0'):
from tensorflow._api.v1.config import experimental
list_physical_devices = experimental.list_physical_devices
else:
list_physical_devices = tf.config.list_physical_devices
cur_graph = GraphAnalyzer()
cur_graph.graph = self._tmp_graph_def
graph_info = cur_graph.parse_graph()
if (self.device == 'cpu'):
cpus = list_physical_devices('CPU')
node_device = cpus[0].name.replace('physical_device:', '')
else:
gpus = list_physical_devices('GPU')
if (len(gpus) == 0):
xpus = list_physical_devices('XPU')
if (len(xpus) == 0):
cpus = list_physical_devices('CPU')
node_device = cpus[0].name.replace('physical_device:', '')
else:
node_device = xpus[0].name.replace('physical_device:', '')
else:
node_device = gpus[0].name.replace('physical_device:', '')
for node_name in list(graph_info.keys()):
node = graph_info[node_name].node
node.device = node_device
self._tmp_graph_def = cur_graph.dump_graph()
self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library)
for function_def in self.model.graph_def.library.function:
if (function_def.signature.name == 'swish_f32'):
self._tmp_graph_def.library.function.extend([copy.deepcopy(function_def)])
origin_model.graph_def = self._tmp_graph_def
return origin_model
def get_matched_nodes(self, patterns):
self.analyzer.graph = self._tmp_graph_def
self.analyzer.parse_graph()
res = []
for sub_pattern in patterns:
res.extend([i for i in self.analyzer.query_fusion_pattern_nodes(sub_pattern) if (i not in res)])
return res
def has_positive_input(self, node_name):
return self.analyzer.has_positive_input(node_name) |
_grad()
def test(model, x, evaluator, y, train_idx, val_idx, test_idx, out=None):
model.eval()
out = (model(x) if (out is None) else out)
pred = out.argmax(dim=(- 1), keepdim=True)
train_acc = evaluator.eval({'y_true': y[train_idx], 'y_pred': pred[train_idx]})['acc']
val_acc = evaluator.eval({'y_true': y[val_idx], 'y_pred': pred[val_idx]})['acc']
test_acc = evaluator.eval({'y_true': y[test_idx], 'y_pred': pred[test_idx]})['acc']
return (train_acc, val_acc, test_acc, out) |
def test_no_unlinked_images():
linked_images = metadata['filename']
all_images = os.listdir('images')
assert (set(all_images).difference(set(linked_images)) == set(['FAFA-A1BF-49A8-A1D3-66FAFA41B7345D.jpg'])) |
def test_dice_lose():
from mmseg.models import build_loss
loss_cfg = dict(type='DiceLoss', reduction='none', class_weight=[1.0, 2.0, 3.0], loss_weight=1.0, ignore_index=1, loss_name='loss_dice')
dice_loss = build_loss(loss_cfg)
logits = torch.rand(8, 3, 4, 4)
labels = (torch.rand(8, 4, 4) * 3).long()
dice_loss(logits, labels)
import os
import tempfile
import mmcv
import numpy as np
tmp_file = tempfile.NamedTemporaryFile()
mmcv.dump([1.0, 2.0, 3.0], f'{tmp_file.name}.pkl', 'pkl')
loss_cfg = dict(type='DiceLoss', reduction='none', class_weight=f'{tmp_file.name}.pkl', loss_weight=1.0, ignore_index=1, loss_name='loss_dice')
dice_loss = build_loss(loss_cfg)
dice_loss(logits, labels, ignore_index=None)
np.save(f'{tmp_file.name}.npy', np.array([1.0, 2.0, 3.0]))
loss_cfg = dict(type='DiceLoss', reduction='none', class_weight=f'{tmp_file.name}.pkl', loss_weight=1.0, ignore_index=1, loss_name='loss_dice')
dice_loss = build_loss(loss_cfg)
dice_loss(logits, labels, ignore_index=None)
tmp_file.close()
os.remove(f'{tmp_file.name}.pkl')
os.remove(f'{tmp_file.name}.npy')
loss_cfg = dict(type='DiceLoss', smooth=2, exponent=3, reduction='sum', loss_weight=1.0, ignore_index=0, loss_name='loss_dice')
dice_loss = build_loss(loss_cfg)
logits = torch.rand(8, 2, 4, 4)
labels = (torch.rand(8, 4, 4) * 2).long()
dice_loss(logits, labels)
loss_cfg = dict(type='DiceLoss', smooth=2, exponent=3, reduction='sum', loss_weight=1.0, ignore_index=0, loss_name='loss_dice')
dice_loss = build_loss(loss_cfg)
assert (dice_loss.loss_name == 'loss_dice') |
class kitenet(nn.Module):
def __init__(self):
super(kitenet, self).__init__()
self.encoder1 = nn.Conv2d(1, 32, 3, stride=1, padding=1)
self.encoder2 = nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.encoder3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 2, 3, stride=1, padding=1)
self.soft = nn.Softmax(dim=1)
def forward(self, x):
out = F.relu(F.interpolate(self.encoder1(x), scale_factor=(2, 2), mode='bilinear'))
out = F.relu(F.interpolate(self.encoder2(out), scale_factor=(2, 2), mode='bilinear'))
out = F.relu(F.interpolate(self.encoder3(out), scale_factor=(2, 2), mode='bilinear'))
out = F.relu(F.max_pool2d(self.decoder3(out), 2, 2))
out = F.relu(F.max_pool2d(self.decoder4(out), 2, 2))
out = F.relu(F.max_pool2d(self.decoder5(out), 2, 2))
return out |
def save_pkl(filename, save_object):
writer = open(filename, 'wb')
pickle.dump(save_object, writer)
writer.close() |
class GraphEmbedder(nn.Module):
def __init__(self, hidden_layer_size, edge_names, embedding_dim, num_layers):
super().__init__()
self.ggnn = ggnn_sparse.GGNNSparse(ggnn_base.GGNNParams(hidden_layer_size, edge_names, num_layers))
mlp_project_up = mlp.get_mlp(mlp.MlpParams(hidden_layer_size, embedding_dim, []))
mlp_gate = mlp.get_mlp(mlp.MlpParams(hidden_layer_size, embedding_dim, []))
mlp_down = (lambda x: x)
self.embedding_dim = embedding_dim
self.ggnn_top = ggnn_sparse.GraphFeaturesStackIndexAdd(mlp_project_up, mlp_gate, mlp_down)
def forward(self, g_adjlist: graph_as_adj_list.DirectedGraphAsAdjList):
g_adjlist: graph_as_adj_list.DirectedGraphAsAdjList = self.ggnn(g_adjlist)
graph_feats = self.ggnn_top(g_adjlist.node_features, g_adjlist.node_to_graph_id)
return graph_feats |
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None, class_weight=None, ignore_index=None, **kwargs):
assert (ignore_index is None), 'BCE loss does not support ignore_index'
assert ((reduction == 'mean') and (avg_factor is None))
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[(inds, label)].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, weight=class_weight, reduction='mean')[None] |
def valid_dataloader_creator(config):
import torch
from torch.utils.data import DataLoader
RandomDataset = gen_RandomDataset()
return DataLoader(RandomDataset(size=400), batch_size=config['batch_size'], shuffle=True) |
def build_backbone_layers(backbone_net, layers, pretrained, backbone_output_stride=8, convert_bn=None):
if (backbone_net == 'pyconvhgresnet'):
if (layers == 50):
backbone = pyconvhgresnet.pyconvhgresnet50()
elif (layers == 101):
backbone = pyconvhgresnet.pyconvhgresnet101()
elif (layers == 152):
backbone = pyconvhgresnet.pyconvhgresnet152()
if pretrained:
print('Load pretrained model: ', pretrained)
backbone.load_state_dict(torch.load(pretrained), strict=True)
if (convert_bn and (not isinstance(convert_bn, torch.nn.BatchNorm2d))):
print('Converting Batch Norm to: ', convert_bn)
backbone = convert_BN(backbone, convert_bn)
layer0 = nn.Sequential(backbone.conv1, backbone.bn1, backbone.relu)
(layer1, layer2, layer3, layer4) = (backbone.layer1, backbone.layer2, backbone.layer3, backbone.layer4)
if (backbone_output_stride == 8):
for (n, m) in layer3.named_modules():
if ('conv2_1' in n):
(m.dilation, m.padding, m.stride) = ((2, 2), (2, 2), (1, 1))
elif ('conv2_2' in n):
(m.dilation, m.padding, m.stride) = ((2, 2), (4, 4), (1, 1))
elif ('downsample.0' in n):
m.stride = (1, 1)
for (n, m) in layer4.named_modules():
if ('conv2' in n):
(m.dilation, m.padding, m.stride) = ((4, 4), (4, 4), (1, 1))
elif ('downsample.0' in n):
m.stride = (1, 1)
if (backbone_output_stride == 16):
for (n, m) in layer4.named_modules():
if ('conv2' in n):
(m.dilation, m.padding, m.stride) = ((2, 2), (2, 2), (1, 1))
elif ('downsample.0' in n):
m.stride = (1, 1)
return (layer0, layer1, layer2, layer3, layer4)
if (backbone_net == 'pyconvresnet'):
if (layers == 50):
backbone = pyconvresnet.pyconvresnet50()
elif (layers == 101):
backbone = pyconvresnet.pyconvresnet101()
elif (layers == 152):
backbone = pyconvresnet.pyconvresnet152()
if pretrained:
print('Load pretrained model: ', pretrained)
backbone.load_state_dict(torch.load(pretrained), strict=True)
if (convert_bn and (not isinstance(convert_bn, torch.nn.BatchNorm2d))):
print('Converting Batch Norm to: ', convert_bn)
backbone = convert_BN(backbone, convert_bn)
layer0 = nn.Sequential(backbone.conv1, backbone.bn1, backbone.relu)
(layer1, layer2, layer3, layer4) = (backbone.layer1, backbone.layer2, backbone.layer3, backbone.layer4)
if (backbone_output_stride == 8):
for (n, m) in layer3.named_modules():
if ('conv2_1' in n):
(m.dilation, m.padding, m.stride) = ((2, 2), (2, 2), (1, 1))
elif ('conv2_2' in n):
(m.dilation, m.padding, m.stride) = ((2, 2), (4, 4), (1, 1))
elif ('downsample.0' in n):
m.stride = (1, 1)
for (n, m) in layer4.named_modules():
if ('conv2' in n):
(m.dilation, m.padding, m.stride) = ((4, 4), (4, 4), (1, 1))
elif ('downsample.0' in n):
m.stride = (1, 1)
if (backbone_output_stride == 16):
for (n, m) in layer4.named_modules():
if ('conv2' in n):
(m.dilation, m.padding, m.stride) = ((2, 2), (2, 2), (1, 1))
elif ('downsample.0' in n):
m.stride = (1, 1)
return (layer0, layer1, layer2, layer3, layer4)
if (backbone_net == 'resnet'):
if (layers == 50):
backbone = resnet.resnet50()
elif (layers == 101):
backbone = resnet.resnet101()
elif (layers == 152):
backbone = resnet.resnet152()
if pretrained:
print('Load pretrained model: ', pretrained)
backbone.load_state_dict(torch.load(pretrained), strict=True)
if (convert_bn and (not isinstance(convert_bn, torch.nn.BatchNorm2d))):
print('Converting Batch Norm to: ', convert_bn)
backbone = convert_BN(backbone, convert_bn)
layer0 = nn.Sequential(backbone.conv1, backbone.bn1, backbone.relu, backbone.maxpool)
(layer1, layer2, layer3, layer4) = (backbone.layer1, backbone.layer2, backbone.layer3, backbone.layer4)
if (backbone_output_stride == 8):
for (n, m) in layer3.named_modules():
if ('conv2' in n):
(m.dilation, m.padding, m.stride) = ((2, 2), (2, 2), (1, 1))
elif ('downsample.0' in n):
m.stride = (1, 1)
for (n, m) in layer4.named_modules():
if ('conv2' in n):
(m.dilation, m.padding, m.stride) = ((4, 4), (4, 4), (1, 1))
elif ('downsample.0' in n):
m.stride = (1, 1)
if (backbone_output_stride == 16):
for (n, m) in layer4.named_modules():
if ('conv2' in n):
(m.dilation, m.padding, m.stride) = ((2, 2), (2, 2), (1, 1))
elif ('downsample.0' in n):
m.stride = (1, 1)
return (layer0, layer1, layer2, layer3, layer4) |
class LazyFrames(object):
def __init__(self, frames):
self._frames = frames
self._out = None
def _force(self):
if (self._out is None):
self._out = np.concatenate(self._frames, axis=(- 1))
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if (dtype is not None):
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[(frames.ndim - 1)]
def frame(self, i):
return self._force()[(..., i)] |
class DeformRoIPoolFunction(Function):
def symbolic(g, input, rois, offset, output_size, spatial_scale, sampling_ratio, gamma):
return g.op('MMCVDeformRoIPool', input, rois, offset, pooled_height=output_size[0], pooled_width=output_size[1], spatial_scale=spatial_scale, sampling_ratio=sampling_ratio, gamma=gamma)
def forward(ctx, input, rois, offset, output_size, spatial_scale=1.0, sampling_ratio=0, gamma=0.1):
if (offset is None):
offset = input.new_zeros(0)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = float(spatial_scale)
ctx.sampling_ratio = int(sampling_ratio)
ctx.gamma = float(gamma)
assert (rois.size(1) == 5), 'RoI must be (idx, x1, y1, x2, y2)!'
output_shape = (rois.size(0), input.size(1), ctx.output_size[0], ctx.output_size[1])
output = input.new_zeros(output_shape)
ext_module.deform_roi_pool_forward(input, rois, offset, output, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, gamma=ctx.gamma)
ctx.save_for_backward(input, rois, offset)
return output
_differentiable
def backward(ctx, grad_output):
(input, rois, offset) = ctx.saved_tensors
grad_input = grad_output.new_zeros(input.shape)
grad_offset = grad_output.new_zeros(offset.shape)
ext_module.deform_roi_pool_backward(grad_output, input, rois, offset, grad_input, grad_offset, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, gamma=ctx.gamma)
if (grad_offset.numel() == 0):
grad_offset = None
return (grad_input, None, grad_offset, None, None, None, None) |
class RandomRotate(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, img, mask):
rotate_degree = (((random.random() * 2) * self.degree) - self.degree)
return (tf.affine(img, translate=(0, 0), scale=1.0, angle=rotate_degree, resample=Image.BILINEAR, fillcolor=(0, 0, 0), shear=0.0), tf.affine(mask, translate=(0, 0), scale=1.0, angle=rotate_degree, resample=Image.NEAREST, fillcolor=250, shear=0.0)) |
def Add_Window_Horizon(data, window=3, horizon=1, single=False):
length = len(data)
end_index = (((length - horizon) - window) + 1)
X = []
Y = []
index = 0
if single:
while (index < end_index):
X.append(data[index:(index + window)])
Y.append(data[(((index + window) + horizon) - 1):((index + window) + horizon)])
index = (index + 1)
else:
while (index < end_index):
X.append(data[index:(index + window)])
Y.append(data[(index + window):((index + window) + horizon)])
index = (index + 1)
X = np.array(X)
Y = np.array(Y)
return (X, Y) |
def pyconvresnet34(pretrained=False, **kwargs):
model = PyConvResNet(PyConvBasicBlock2, [3, 4, 6, 3], **kwargs)
if pretrained:
raise NotImplementedError('Not available the pretrained model yet!')
return model |
class CollisionObjectManager(object):
def __init__(self, root='/world', listener=None, max_dt=1.0):
self.objs = {}
self.urdfs = {}
self.frames = {}
if (listener is None):
self.listener = tf.TransformListener()
else:
self.listener = listener
self.root = root
self.max_dt = max_dt
rospy.wait_for_service('/get_planning_scene')
self.co_pub = rospy.Publisher('collision_object', CollisionObject, queue_size=1000)
def addUrdf(self, name, rosparam, tf_frame=None):
urdf = _getUrdf(name, rosparam)
self.objs[name] = None
self.urdfs[name] = urdf
if (tf_frame is None):
tf_frame = name
self.frames[name] = tf_frame
def tick(self):
self.t = rospy.Time.now()
for (name, urdf) in self.urdfs.items():
if (self.objs[name] == None):
operation = CollisionObject.ADD
else:
operation = CollisionObject.MOVE
if (not self.listener.frameExists(name)):
continue
try:
t = self.listener.getLatestCommonTime(self.root, name)
except tf.Exception as e:
rospy.logerr(str(e))
continue
dt = (self.t - t).to_sec()
if (dt > self.max_dt):
rospy.logwarn(('object %s has not been observed in the last %f seconds' % (name, dt)))
continue
pose = self.listener.lookupTransform(self.root, name, t)
pose = pm.fromTf(pose)
co = _getCollisionObject(name, urdf, pose, operation)
co.header.frame_id = self.root
self.objs[name] = co
self.co_pub.publish(co) |
def get_vehicle_dyn_scenario() -> SimContext:
scenario_name = 'USA_Lanker-1_1_T-1'
(scenario, planning_problem_set) = load_commonroad_scenario(scenario_name)
x0_p1 = VehicleStateDyn(x=0, y=0, psi=deg2rad(0), vx=kmh2ms(50), delta=0)
x0_p2 = VehicleStateDyn(x=25, y=(- 10), psi=deg2rad(90), vx=kmh2ms(0), delta=1)
x0_p3 = VehicleStateDyn(x=(- 10), y=(- 15), psi=deg2rad(90), vx=kmh2ms(0), vy=kmh2ms((- 10)), delta=(- 1))
x0_p4 = VehicleState(x=0, y=(- 15), psi=deg2rad(90), vx=kmh2ms(0), delta=(- 1))
models = {P1: VehicleModelDyn.default_car(x0_p1), P2: VehicleModelDyn.default_bicycle(x0_p2), P3: VehicleModelDyn.default_car(x0_p3), P4: VehicleModel.default_car(x0_p4)}
cmds_p1 = DgSampledSequence[VehicleCommands](timestamps=[0, 1, 2, 3, 4, 5], values=[VehicleCommands(acc=5, ddelta=1), VehicleCommands(acc=5, ddelta=1), VehicleCommands(acc=5, ddelta=(- 5)), VehicleCommands(acc=5, ddelta=4), VehicleCommands(acc=5, ddelta=(- 3)), VehicleCommands(acc=5, ddelta=(- 3))])
cmds_p2 = DgSampledSequence[VehicleCommands](timestamps=[0, 1, 2, 3, 4, 5], values=[VehicleCommands(acc=0, ddelta=0), VehicleCommands(acc=5, ddelta=0), VehicleCommands(acc=5, ddelta=(- 1)), VehicleCommands(acc=3, ddelta=(- 1)), VehicleCommands(acc=5, ddelta=(- 3)), VehicleCommands(acc=0, ddelta=3)])
players = {P1: NPAgent(cmds_p1), P2: NPAgent(cmds_p2), P3: NPAgent(cmds_p2), P4: NPAgent(cmds_p2)}
return SimContext(dg_scenario=DgScenario(scenario=scenario, use_road_boundaries=True), models=models, players=players, param=SimParameters(dt=D('0.01'), dt_commands=D('0.1'), sim_time_after_collision=D(4), max_sim_time=D(10))) |
class ActionPublisher():
def __init__(self):
if rospy.get_param('train_mode'):
raise Exception('This node should be used solely in eval mode!')
rospy.init_node('action_publisher', anonymous=True)
self._step_size = rospy.get_param('step_size')
self._update_rate = rospy.get_param('update_rate')
self._real_second_in_sim = (self._step_size * self._update_rate)
self._action_publish_rate = rospy.get_param('/robot_action_rate')
rate = ((1 / self._action_publish_rate) / self._real_second_in_sim)
ns_prefix = ('' if ('/single_env' in rospy.get_param_names()) else '/eval_sim/')
self._pub_cmd_vel = rospy.Publisher(f'{ns_prefix}cmd_vel', Twist, queue_size=1)
self._pub_cycle_trigger = rospy.Publisher(f'{ns_prefix}next_cycle', Bool, queue_size=1)
self._sub = rospy.Subscriber(f'{ns_prefix}cmd_vel_pub', Twist, self.callback_receive_cmd_vel, queue_size=1)
self._action = Twist()
self._signal = Bool()
self._clock = Clock().clock.to_sec()
last_action = self._action
while (not rospy.is_shutdown()):
if (self._sub.get_num_connections() < 1):
print(f'ActionPublisher: No publisher to {ns_prefix}cmd_vel_pub yet.. ')
time.sleep(1)
continue
self._pub_cmd_vel.publish(self._action)
self._pub_cycle_trigger.publish(self._signal)
print(f'Published same action: {(last_action == self._action)}')
last_action = self._action
time.sleep(rate)
def callback_receive_cmd_vel(self, msg_cmd_vel: Twist):
self._action = msg_cmd_vel
def callback_clock(self, msg_clock: Clock):
self._clock = msg_clock.clock.to_sec() |
def to_cuda(samples, targets, device):
samples = samples.to(device, non_blocking=True)
targets = [{k: v.to(device, non_blocking=True) for (k, v) in t.items()} for t in targets]
return (samples, targets) |
def IoU(r1, r2):
(x11, y11, w1, h1) = r1
(x21, y21, w2, h2) = r2
x12 = ((x11 + w1) - 1)
y12 = ((y11 + h1) - 1)
x22 = ((x21 + w2) - 1)
y22 = ((y21 + h2) - 1)
x_overlap = max(0, (min(x12, x22) - max(x11, x21)))
y_overlap = max(0, (min(y12, y22) - max(y11, y21)))
I = ((1.0 * x_overlap) * y_overlap)
U = ((((y12 - y11) * (x12 - x11)) + ((y22 - y21) * (x22 - x21))) - I)
J = (I / U)
return J |
def random_resize(data_loader, exp, epoch, rank, is_distributed):
tensor = torch.LongTensor(1).cuda()
if is_distributed:
synchronize()
if (rank == 0):
if (epoch > (exp.max_epoch - 10)):
size = exp.input_size
else:
size = random.randint(*exp.random_size)
size = int((32 * size))
tensor.fill_(size)
if is_distributed:
synchronize()
dist.broadcast(tensor, 0)
input_size = data_loader.change_input_dim(multiple=tensor.item(), random_range=None)
return input_size |
def get_data_parallel_world_size():
return torch.distributed.get_world_size(group=get_data_parallel_group()) |
class CheckpointModule(nn.Module):
def __init__(self, module, num_segments=1):
super(CheckpointModule, self).__init__()
assert ((num_segments == 1) or isinstance(module, nn.Sequential))
self.module = module
self.num_segments = num_segments
def forward(self, x):
if (self.num_segments > 1):
return checkpoint_sequential(self.module, self.num_segments, x)
else:
return checkpoint(self.module, x) |
def test_empty_list_assignment():
run_cell('a = [5]')
run_cell('b = a + [6]')
run_cell('logging.info(b)')
run_cell('a = [6]')
run_cell('logging.info(b)')
assert_detected('`b` depends on stale `a`')
run_cell('b = a + [7]')
run_cell('a = []')
run_cell('logging.info(b)')
assert_detected('`b` depends on stale `a`') |
class resnet_block(nn.Module):
def __init__(self, channel, kernel, stride, padding):
super(resnet_block, self).__init__()
self.channel = channel
self.kernel = kernel
self.strdie = stride
self.padding = padding
self.conv1 = nn.Conv2d(channel, channel, kernel, stride, 0)
self.conv1_norm = nn.InstanceNorm2d(channel)
self.conv2 = nn.Conv2d(channel, channel, kernel, stride, 0)
self.conv2_norm = nn.InstanceNorm2d(channel)
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def forward(self, input):
x = F.pad(input, (self.padding, self.padding, self.padding, self.padding), 'reflect')
x = F.relu(self.conv1_norm(self.conv1(x)))
x = F.pad(x, (self.padding, self.padding, self.padding, self.padding), 'reflect')
x = self.conv2_norm(self.conv2(x))
return (input + x) |
def tf_top_k_top_p_filtering(*args, **kwargs):
requires_backends(tf_top_k_top_p_filtering, ['tf']) |
_gs_scheduler('base_gs')
class BaseGsSchedule(object):
def __init__(self, args):
self.tau_max = args.gumbel_softmax_max
self.tau_r = args.gumbel_softmax_tau_r
self.tau_min = args.gumbel_softmax_min
self.update_freq = args.gumbel_softmax_update_freq
self.step_update(0)
def add_args(parser):
parser.add_argument('--gumbel-softmax-max', type=float, default=10.0)
parser.add_argument('--gumbel-softmax-min', type=float, default=1)
parser.add_argument('--gumbel-softmax-tau-r', type=float, default=0.0001)
parser.add_argument('--gumbel-softmax-update-freq', type=int, default=5000)
def step_update(self, num_updates):
update_cliped = (math.floor((num_updates / self.update_freq)) * self.update_freq)
self.tau = max((self.tau_max * math.exp(((- self.tau_r) * update_cliped))), self.tau_min)
return self.tau
def get_gs_tau(self):
return self.tau |
def _unitary(norm):
if (norm not in (None, 'ortho', 'no_norm')):
raise ValueError(("Invalid value %s for norm, must be None, 'ortho' or 'no norm'" % norm))
return norm |
def parse_args():
parser = argparse.ArgumentParser(description='Synthesize images with pre-trained models.')
parser.add_argument('model_name', type=str, help='Name to the pre-trained model.')
parser.add_argument('--save_dir', type=str, default=None, help='Directory to save the results. If not specified, the results will be saved to `work_dirs/synthesis/` by default. (default: %(default)s)')
parser.add_argument('--num', type=int, default=100, help='Number of samples to synthesize. (default: %(default)s)')
parser.add_argument('--batch_size', type=int, default=1, help='Batch size. (default: %(default)s)')
parser.add_argument('--generate_html', type=bool_parser, default=True, help='Whether to use HTML page to visualize the synthesized results. (default: %(default)s)')
parser.add_argument('--save_raw_synthesis', type=bool_parser, default=False, help='Whether to save raw synthesis. (default: %(default)s)')
parser.add_argument('--seed', type=int, default=0, help='Seed for sampling. (default: %(default)s)')
parser.add_argument('--trunc_psi', type=float, default=0.7, help='Psi factor used for truncation. This is particularly applicable to StyleGAN (v1/v2). (default: %(default)s)')
parser.add_argument('--trunc_layers', type=int, default=8, help='Number of layers to perform truncation. This is particularly applicable to StyleGAN (v1/v2). (default: %(default)s)')
parser.add_argument('--randomize_noise', type=bool_parser, default=False, help='Whether to randomize the layer-wise noise. This is particularly applicable to StyleGAN (v1/v2). (default: %(default)s)')
return parser.parse_args() |
_task('masked_lm', dataclass=MaskedLMConfig)
class MaskedLMTask(FairseqTask):
cfg: MaskedLMConfig
def __init__(self, cfg: MaskedLMConfig, dictionary=None):
super().__init__(cfg)
self.dictionary = (dictionary or self.load_dict(cfg))
self.mask_idx = self.dictionary.add_symbol('<mask>')
def setup_task(cls, cfg: MaskedLMConfig, **kwargs):
dictionary = cls.load_dict(cfg)
return cls(cfg, dictionary)
def load_dict(cls, cfg):
paths = utils.split_paths(cfg.data)
assert (len(paths) > 0)
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
return dictionary
def _load_dataset_split(self, split, epoch, combine):
paths = utils.split_paths(self.cfg.data)
assert (len(paths) > 0)
data_path = paths[((epoch - 1) % len(paths))]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, combine=combine)
if (dataset is None):
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
dataset = maybe_shorten_dataset(dataset, split, self.cfg.shorten_data_split_list, self.cfg.shorten_method, self.cfg.tokens_per_sample, self.cfg.seed)
dataset = TokenBlockDataset(dataset, dataset.sizes, (self.cfg.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.cfg.sample_break_mode)
logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path))
return PrependTokenDataset(dataset, self.source_dictionary.bos())
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
dataset = self._load_dataset_split(split, epoch, combine)
mask_whole_words = (get_whole_word_mask(self.args, self.source_dictionary) if self.cfg.mask_whole_words else None)
(src_dataset, tgt_dataset) = MaskTokensDataset.apply_mask(dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.cfg.seed, mask_prob=self.cfg.mask_prob, leave_unmasked_prob=self.cfg.leave_unmasked_prob, random_token_prob=self.cfg.random_token_prob, freq_weighted_replacement=self.cfg.freq_weighted_replacement, mask_whole_words=mask_whole_words, mask_multiple_length=self.cfg.mask_multiple_length, mask_stdev=self.cfg.mask_stdev, skip_masking=self.cfg.skip_masking)
with data_utils.numpy_seed(self.cfg.seed):
shuffle = np.random.permutation(len(src_dataset))
target_dataset = RightPadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad())
if self.cfg.d2v2_multi:
dataset = self._d2v2_multi_dataset(src_dataset)
else:
dataset = self._regular_dataset(src_dataset, target_dataset)
self.datasets[split] = SortDataset(dataset, sort_order=[shuffle, src_dataset.sizes])
def _regular_dataset(self, src_dataset, target_dataset):
input_dict = {'src_tokens': RightPadDataset(src_dataset, pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_dataset, reduce=False)}
if self.cfg.include_target_tokens:
input_dict['target_tokens'] = target_dataset
if self.cfg.include_index:
input_dict['src_id'] = IdDataset()
dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': input_dict, 'target': target_dataset, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True)}, sizes=[src_dataset.sizes])
return dataset
def _d2v2_multi_dataset(self, src_dataset):
input_dict = {'source': RightPadDataset(src_dataset, pad_idx=self.source_dictionary.pad()), 'id': IdDataset(), 'padding_mask': RightPaddingMaskDataset(src_dataset)}
dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': input_dict, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True)}, sizes=[src_dataset.sizes])
return dataset
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = RightPadDataset(TokenBlockDataset(src_tokens, src_lengths, (self.cfg.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos'), pad_idx=self.source_dictionary.pad())
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False)}}, sizes=src_lengths)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary
def begin_epoch(self, epoch, model):
model.set_epoch(epoch)
def max_positions(self):
return self.cfg.tokens_per_sample |
def efficientnet_b1(pretrained=False, **kwargs):
model = _gen_efficientnet('efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model |
class Space():
def __init__(self, shape=(), dtype=np.int32, domain=(0, 1), categorical=False, name=None):
self.name = name
(self.shape, self.dtype) = (shape, dtype)
(self.categorical, (self.lo, self.hi)) = (categorical, domain)
def is_discrete(self) -> bool:
return np.issubdtype(self.dtype, np.integer)
def is_continuous(self) -> bool:
return np.issubdtype(self.dtype, np.floating)
def is_spatial(self) -> bool:
return ((len(self.shape) > 1) or (type(self.hi) in [list, tuple]))
def size(self) -> int:
if (self.is_discrete() and self.categorical):
if self.is_spatial():
return self.hi
return (self.hi - self.lo)
sz = 1
if (len(self.shape) == 1):
sz = self.shape[0]
return sz
def sample(self, n=1):
if self.is_discrete():
return np.random.randint(self.lo, (self.hi + 1), ((n,) + self.shape))
if self.is_continuous():
return np.random.uniform(self.lo, (self.hi + 1e-10), ((n,) + self.shape))
def __repr__(self):
mid = str(self.shape)
if self.categorical:
mid += (', cat: ' + str(self.hi))
return ('Space(%s, %s, %s)' % (self.name, mid, str(self.dtype).strip("<class>' "))) |
def create_stem(in_chs, out_chs, stem_type='', preact=True, conv_layer=None, norm_layer=None):
stem = OrderedDict()
assert (stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same'))
if ('deep' in stem_type):
mid_chs = (out_chs // 2)
stem['conv1'] = conv_layer(in_chs, mid_chs, kernel_size=3, stride=2)
stem['conv2'] = conv_layer(mid_chs, mid_chs, kernel_size=3, stride=1)
stem['conv3'] = conv_layer(mid_chs, out_chs, kernel_size=3, stride=1)
else:
stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2)
if (not preact):
stem['norm'] = norm_layer(out_chs)
if ('fixed' in stem_type):
stem['pad'] = nn.ConstantPad2d(1, 0.0)
stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
elif ('same' in stem_type):
stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same')
else:
stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
return nn.Sequential(stem) |
def upSampleConv_Res(nin, nout, upscale=2, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):
return nn.Sequential(nn.Upsample(scale_factor=upscale), ResidualConv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ)) |
def make_parser():
parser = argparse.ArgumentParser('YOLOX Eval')
parser.add_argument('-expn', '--experiment-name', type=str, default=None)
parser.add_argument('-n', '--name', type=str, default=None, help='model name')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--dist-url', default=None, type=str, help='url used to set up distributed training')
parser.add_argument('-b', '--batch-size', type=int, default=64, help='batch size')
parser.add_argument('-d', '--devices', default=None, type=int, help='device for training')
parser.add_argument('--local_rank', default=0, type=int, help='local rank for dist training')
parser.add_argument('--num_machines', default=1, type=int, help='num of node for training')
parser.add_argument('--machine_rank', default=0, type=int, help='node rank for multi-node training')
parser.add_argument('-f', '--exp_file', default=None, type=str, help='pls input your expriment description file')
parser.add_argument('--fp16', dest='fp16', default=False, action='store_true', help='Adopting mix precision evaluating.')
parser.add_argument('--fuse', dest='fuse', default=False, action='store_true', help='Fuse conv and bn for testing.')
parser.add_argument('--trt', dest='trt', default=False, action='store_true', help='Using TensorRT model for testing.')
parser.add_argument('--test', dest='test', default=False, action='store_true', help='Evaluating on test-dev set.')
parser.add_argument('--speed', dest='speed', default=False, action='store_true', help='speed test only.')
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
parser.add_argument('-c', '--ckpt', default=None, type=str, help='ckpt for eval')
parser.add_argument('--conf', default=0.01, type=float, help='test conf')
parser.add_argument('--nms', default=0.7, type=float, help='test nms threshold')
parser.add_argument('--tsize', default=None, type=int, help='test img size')
parser.add_argument('--seed', default=None, type=int, help='eval seed')
parser.add_argument('--track_thresh', type=float, default=0.6, help='tracking confidence threshold')
parser.add_argument('--track_buffer', type=int, default=30, help='the frames for keep lost tracks')
parser.add_argument('--match_thresh', type=float, default=0.9, help='matching threshold for tracking')
parser.add_argument('--min-box-area', type=float, default=100, help='filter out tiny boxes')
parser.add_argument('--mot20', dest='mot20', default=False, action='store_true', help='test mot20.')
return parser |
_config
def il_source_rmt():
cfg = {}
cfg['training'] = {'sources': ['rgb_filled', 'map', 'target'], 'sources_as_dict': True} |
def simple_algorithm_plot(experiment_name, data_path=_DEFAULT_DATA_PATH):
df = load_data(experiment_name, data_path)
plt_df = df.groupby(['t', 'agent']).agg({'instant_regret': np.mean}).reset_index()
p = (((((gg.ggplot(plt_df) + gg.aes('t', 'instant_regret', colour='agent')) + gg.geom_line(size=1.25, alpha=0.75)) + gg.xlab('time period (t)')) + gg.ylab('per-period regret')) + gg.scale_colour_brewer(name='agent', type='qual', palette='Set1'))
plot_dict = {(experiment_name + '_simple'): p}
return plot_dict |
def AutogradSkipConnectRNN(num_layers=1, batch_first=False, bidirectional=False, lstm=False):
rec_factory = SkipConnectRecurrent
if bidirectional:
layer = (rec_factory(), rec_factory(reverse=True))
else:
layer = (rec_factory(),)
func = StackedRNN(layer, num_layers, lstm=lstm)
def forward(input, skip_connect, cells, hidden, mask):
if batch_first:
input = input.transpose(0, 1)
skip_connect = skip_connect.transpose(0, 1)
if (mask is not None):
mask = mask.transpose(0, 1)
(nexth, output) = func(input, skip_connect, hidden, cells, mask)
if batch_first:
output = output.transpose(0, 1)
return (output, nexth)
return forward |
class DictionaryMatcher(TaggingRule):
def __init__(self, name, terms, uncased=False, match_lemmas=False, i_label='I', abs_label='ABS'):
self.name = name
self.uncased = uncased
self.match_lemmas = match_lemmas
self.i_label = i_label
self.abs_label = abs_label
self._load_terms(terms)
def apply_instance(self, instance):
tokens = self._normalize_instance_tokens(instance['tokens'])
labels = ([self.abs_label] * len(instance['tokens']))
i = 0
while (i < len(tokens)):
if (tokens[i] in self.term_dict):
candidates = self.term_dict[tokens[i]]
for c in candidates:
if ((i + len(c)) <= len(tokens)):
equal = True
for j in range(len(c)):
if (tokens[(i + j)] != c[j]):
equal = False
break
if equal:
for j in range(i, (i + len(c))):
labels[j] = self.i_label
i = ((i + len(c)) - 1)
break
i += 1
if self.match_lemmas:
tokens = self._normalize_instance_tokens(instance['tokens'], lemmas=True)
i = 0
while (i < len(tokens)):
if (tokens[i] in self.term_dict):
candidates = self.term_dict[tokens[i]]
for c in candidates:
if ((i + len(c)) <= len(tokens)):
equal = True
for j in range(len(c)):
if ((tokens[(i + j)] != c[j]) or (labels[(i + j)] != self.abs_label)):
equal = False
break
if equal:
for j in range(i, (i + len(c))):
labels[j] = self.i_label
i = ((i + len(c)) - 1)
break
i += 1
return labels
def _get_tr_name(self):
return self.name
def _normalize_instance_tokens(self, tokens, lemmas=False):
if lemmas:
normalized_tokens = [token.lemma_ for token in tokens]
else:
normalized_tokens = [token.text for token in tokens]
if self.uncased:
normalized_tokens = [token.lower() for token in normalized_tokens]
return normalized_tokens
def _normalize_terms(self, tokens):
if self.uncased:
return [token.lower() for token in tokens]
return tokens
def _load_terms(self, terms):
self.term_dict = {}
for term in terms:
normalized_term = self._normalize_terms(term)
if (normalized_term[0] not in self.term_dict):
self.term_dict[normalized_term[0]] = []
self.term_dict[normalized_term[0]].append(normalized_term)
for first_token in self.term_dict.keys():
to_sort = self.term_dict[first_token]
self.term_dict[first_token] = sorted(to_sort, reverse=True, key=(lambda x: len(x))) |
def resource_to_bytes(resource_str):
if (not resource_str):
return resource_str
matched = re.compile('([0-9]+)([a-z]+)?').match(resource_str.lower())
fraction_matched = re.compile('([0-9]+\\.[0-9]+)([a-z]+)?').match(resource_str.lower())
if fraction_matched:
invalidInputError(False, 'Fractional values are not supported. Input was: {}'.format(resource_str))
try:
value = int(matched.group(1))
postfix = matched.group(2)
if (postfix == 'b'):
value = value
elif (postfix == 'k'):
value = (value * 1000)
elif (postfix == 'm'):
value = ((value * 1000) * 1000)
elif (postfix == 'g'):
value = (((value * 1000) * 1000) * 1000)
else:
invalidInputError(False, 'Not supported type: {}'.format(resource_str))
return value
except Exception:
invalidInputError(False, 'Size must be specified as bytes(b),kilobytes(k), megabytes(m), gigabytes(g). E.g. 50b, 100k, 250m, 30g') |
def _preprocess_commonsense_qa(path):
data = []
candidates = ['A', 'B', 'C', 'D', 'E']
with open(path) as f:
for (sample_index, line) in enumerate(f):
sample = json.loads(line)
question = sample['question']['stem'].strip()
choices = [c['text'] for c in sample['question']['choices']]
assert ((sample['answerKey'] in candidates) or print(sample_index, sample))
answer_index = (ord(sample['answerKey']) - ord('A'))
(question, answer) = verbalize_multichoice(question, choices, answer_index, sample_index=sample_index, n_choices=5)
assert ((sample['answerKey'] == answer) or print(sample_index, sample))
data.append({'question': question, 'answer': answer})
return data |
class MultitaskLossBase(nn.Module):
def __init__(self):
super().__init__()
self._sigmoid_xent_loss = SigmoidCrossEntropy()
self._multilabel_sigmoid_xent_loss = MultilabelSigmoidCrossEntropy()
self._batched_xent_loss = nn.CrossEntropyLoss()
def _mse_loss(self, pred, label):
return (pred - label).abs()
def _bce_loss(self, pred, label):
return (- ((jactorch.log_sigmoid(pred) * label) + (jactorch.log_sigmoid((- pred)) * (1 - label))).mean())
def _bce_logprob_loss(self, pred, label):
return ((pred * label) + ((1 - label) * jactorch.log1mexp(pred)))
def _bce_prob_loss(self, pred, label):
return (- ((torch.log(pred) * label) + (torch.log((1 - pred)) * (1 - label))).mean())
def _xent_loss(self, pred, label):
logp = F.log_softmax(pred, dim=(- 1))
return (- logp[label].mean()) |
def process_yaml_config(global_config, local_configs, default_config):
pruners_info = []
default_all = global_config
for key in default_config.keys():
default_all[key] = reset_none_to_default(default_all, key, default_config[key])
if (len(local_configs) == 0):
update_params(default_all)
check_config(default_all)
pruner_info = DotDict(default_all)
pruners_info.append(pruner_info)
else:
for pruner in local_configs:
for key in default_config.keys():
pruner_info = pruner.pruner_config
pruner_info[key] = reset_none_to_default(pruner_info, key, default_all[key])
update_params(pruner_info)
check_config(pruner_info)
pruner_info = DotDict(pruner_info)
pruners_info.append(pruner_info)
return pruners_info |
def _latency_errors(data, num_steps, threshold, tau, first_spike_time, normalize):
if ((threshold <= 0) or (threshold >= 1)):
raise Exception('Threshold must be between 0 and 1.')
if (tau <= 0):
raise Exception('``tau`` must be greater than 0.')
if (first_spike_time and num_steps and (first_spike_time > (num_steps - 1))):
raise Exception(f'first_spike_time ({first_spike_time}) must be equal to or less than num_steps-1 ({(num_steps - 1)}).')
if (first_spike_time and (torch.max(data) > 1) and (torch.min(data) < 0)):
raise Exception('`first_spike_time` can only be applied to data between `0` and `1`.')
if (first_spike_time < 0):
raise Exception('``first_spike_time`` [{first_spike_time}] cannot be negative.')
if (num_steps < 0):
raise Exception('``num_steps`` [{num_steps}] cannot be negative.')
if (normalize and (not num_steps)):
raise Exception('`num_steps` should not be empty if normalize is set to True.') |
def var_gauss(t, y, w, freq, dphi):
gaussian = (lambda x: np.exp(((- 0.5) * (x ** 2))))
var = 0.0
for (i, (T, Y, W)) in enumerate(zip(t, y, w)):
mbar = 0.0
wtot = 0.0
for (j, (T2, Y2, W2)) in enumerate(zip(t, y, w)):
dph = dphase(abs((T2 - T)), freq)
wgt = (W2 * gaussian((dph / dphi)))
mbar += (wgt * Y2)
wtot += wgt
var += (W * ((Y - (mbar / wtot)) ** 2))
return var |
class Block(nn.Module):
def __init__(self, in_channels, norm_args=None, act_args=None, aggr_args={'feature_type': 'dp_fj', 'reduction': 'max'}, group_args={'NAME': 'ballquery'}, conv_args=None, expansion=1, use_res=True, num_posconvs=2, **kwargs):
super().__init__()
self.use_res = use_res
mid_channels = (in_channels * expansion)
self.convs = LocalAggregation([in_channels, in_channels], norm_args=norm_args, act_args=(act_args if (num_posconvs > 0) else None), group_args=group_args, conv_args=conv_args, **aggr_args)
if (num_posconvs < 1):
channels = []
elif (num_posconvs == 1):
channels = [in_channels, in_channels]
else:
channels = [in_channels, mid_channels, in_channels]
pwconv = []
for i in range((len(channels) - 1)):
pwconv.append(create_linearblock(channels[i], channels[(i + 1)], norm_args=norm_args, act_args=(act_args if (i != (len(channels) - 2)) else None), **conv_args))
self.pwconv = nn.Sequential(*pwconv)
self.act = create_act(act_args)
def forward(self, pxb):
(p, x, b) = pxb
identity = x
x = self.convs(pxb)
x = self.pwconv(x)
if ((x.shape[(- 1)] == identity.shape[(- 1)]) and self.use_res):
x += identity
x = self.act(x)
return [p, x, b] |
def pair_id_to_image_ids(pair_id):
image_id2 = (pair_id % )
image_id1 = ((pair_id - image_id2) / )
return (image_id1, image_id2) |
def test_new_format_string():
run_cell('a = 5\nb = 7')
run_cell('expr_str = f"{a} + {b} = {a+b}"')
run_cell('a = 9')
run_cell('logging.info(expr_str)')
assert_detected('`expr_str` depends on stale `a`') |
def eval_func_onnx(model, dataloader, metric, postprocess=None):
metric.reset()
sess = ort.InferenceSession(model.SerializeToString(), providers=ort.get_available_providers())
input_names = [i.name for i in sess.get_inputs()]
for (input_data, label) in dataloader:
output = sess.run(None, dict(zip(input_names, [input_data])))
if postprocess:
(output, label) = postprocess((output, label))
if isinstance(output, list):
if (len(output) == 1):
output = output[0]
else:
output = output[1]
metric.update(output, label)
return metric.result() |
class MicroPoolingOptOPSResolverRule(MicroOPSResolverRule):
def valid_tag(self, mace_op, mace_net):
tag = ''
kernels = NetUtil.get_arg(mace_op, MaceKeyword.mace_kernel_str)
mace_check((kernels is not None), 'Get kernels failed.')
size = (kernels.ints[0] * kernels.ints[1])
if (size >= 4):
tag = 's4'
return (tag == self._tag) |
class NllbTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ['input_ids', 'attention_mask']
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[Dict[(str, Any)]]=None, additional_special_tokens=None, **kwargs):
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
self.fairseq_offset = 1
self.sp_model_size = len(self.sp_model)
self.lang_code_to_id = {code: ((self.sp_model_size + i) + self.fairseq_offset) for (i, code) in enumerate(FAIRSEQ_LANGUAGE_CODES)}
self.id_to_lang_code = {v: k for (k, v) in self.lang_code_to_id.items()}
self.fairseq_tokens_to_ids['<mask>'] = ((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset)
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()}
self._additional_special_tokens = list(self.lang_code_to_id.keys())
if (additional_special_tokens is not None):
self._additional_special_tokens.extend([t for t in additional_special_tokens if (t not in self._additional_special_tokens)])
self._src_lang = (src_lang if (src_lang is not None) else 'eng_Latn')
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
state['sp_model_proto'] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def vocab_size(self):
return (((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset) + 1)
def src_lang(self) -> str:
return self._src_lang
_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
prefix_ones = ([1] * len(self.prefix_tokens))
suffix_ones = ([1] * len(self.suffix_tokens))
if (token_ids_1 is None):
return ((prefix_ones + ([0] * len(token_ids_0))) + suffix_ones)
return (((prefix_ones + ([0] * len(token_ids_0))) + ([0] * len(token_ids_1))) + suffix_ones)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return ((self.prefix_tokens + token_ids_0) + self.suffix_tokens)
return (((self.prefix_tokens + token_ids_0) + token_ids_1) + self.suffix_tokens)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):
if ((src_lang is None) or (tgt_lang is None)):
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs['forced_bos_token_id'] = tgt_lang_id
return inputs
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
if (token in self.fairseq_tokens_to_ids):
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return ((spm_id + self.fairseq_offset) if spm_id else self.unk_token_id)
def _convert_id_to_token(self, index):
if (index in self.fairseq_ids_to_tokens):
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece((index - self.fairseq_offset))
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def prepare_seq2seq_batch(self, src_texts: List[str], src_lang: str='eng_Latn', tgt_texts: Optional[List[str]]=None, tgt_lang: str='fra_Latn', **kwargs) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
self.cur_lang_code = self.lang_code_to_id[src_lang]
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
def set_tgt_lang_special_tokens(self, lang: str) -> None:
self.cur_lang_code = self.lang_code_to_id[lang]
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.