code stringlengths 101 5.91M |
|---|
.parametrize('observation_shape', [(100,)])
.parametrize('action_size', [10])
.parametrize('batch_size', [32])
.parametrize('eps', [0.3])
def test_standard_reward_scaler_with_transition_picker(observation_shape: Sequence[int], action_size: int, batch_size: int, eps: float) -> None:
shape = (batch_size, *observation_shape)
observations = np.random.random(shape)
actions = np.random.random((batch_size, action_size))
rewards: Float32NDArray = np.random.random(batch_size).astype(np.float32)
terminals: Float32NDArray = np.zeros(batch_size, dtype=np.float32)
terminals[(- 1)] = 1.0
episodes = EpisodeGenerator(observations=observations, actions=actions, rewards=rewards, terminals=terminals)()
rewards_without_first = []
for episode in episodes:
rewards_without_first += episode.rewards.tolist()
mean = np.mean(rewards_without_first)
std = np.std(rewards_without_first)
scaler = StandardRewardScaler(eps=eps)
assert (not scaler.built)
scaler.fit_with_transition_picker(episodes, BasicTransitionPicker())
assert scaler.built
assert ((scaler.mean is not None) and (scaler.std is not None))
assert np.allclose(scaler.mean, mean)
assert np.allclose(scaler.std, std) |
def _tensorviewer_from_slices(target_slices, names, batch_size):
default_backend = pyhf.default_backend
ranges = []
for sl in target_slices:
ranges.append(default_backend.astensor(range(sl.start, sl.stop)))
if (not target_slices):
return None
return _TensorViewer(ranges, names=names, batch_size=batch_size) |
class TwoMaxLayerPoolingAggregator(Layer):
def __init__(self, input_dim, output_dim, model_size='small', neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(TwoMaxLayerPoolingAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if (neigh_input_dim is None):
neigh_input_dim = input_dim
if (name is not None):
name = ('/' + name)
else:
name = ''
if (model_size == 'small'):
hidden_dim_1 = self.hidden_dim_1 = 512
hidden_dim_2 = self.hidden_dim_2 = 256
elif (model_size == 'big'):
hidden_dim_1 = self.hidden_dim_1 = 1024
hidden_dim_2 = self.hidden_dim_2 = 512
self.mlp_layers = []
self.mlp_layers.append(Dense(input_dim=neigh_input_dim, output_dim=hidden_dim_1, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging))
self.mlp_layers.append(Dense(input_dim=hidden_dim_1, output_dim=hidden_dim_2, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging))
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['neigh_weights'] = glorot([hidden_dim_2, output_dim], name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
def _call(self, inputs):
(self_vecs, neigh_vecs) = inputs
neigh_h = neigh_vecs
dims = tf.shape(neigh_h)
batch_size = dims[0]
num_neighbors = dims[1]
h_reshaped = tf.reshape(neigh_h, ((batch_size * num_neighbors), self.neigh_input_dim))
for l in self.mlp_layers:
h_reshaped = l(h_reshaped)
neigh_h = tf.reshape(h_reshaped, (batch_size, num_neighbors, self.hidden_dim_2))
neigh_h = tf.reduce_max(neigh_h, axis=1)
from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars['self_weights'])
if (not self.concat):
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
if self.bias:
output += self.vars['bias']
return self.act(output) |
class TestPruningInfo(unittest.TestCase):
def setUp(self):
self.mock_pruning_masks = {'Layer1': np.array([1, 0, 1]), 'Layer2': np.array([0, 1])}
self.mock_importance_scores = {'Layer1': np.array([0.5, 0.3, 0.7]), 'Layer2': np.array([0.2, 0.8])}
self.pruning_info = mct.pruning.PruningInfo(self.mock_pruning_masks, self.mock_importance_scores)
def test_get_pruning_mask(self):
self.assertEqual(self.pruning_info.pruning_masks, self.mock_pruning_masks)
def test_get_importance_score(self):
self.assertEqual(self.pruning_info.importance_scores, self.mock_importance_scores) |
class VisionDataset(Dataset):
preprocess = transforms.Compose([transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), transforms.ToTensor(), transforms.Normalize(mean=MEAN, std=STD)])
def __init__(self, image_paths: list):
self.image_paths = image_paths
def __getitem__(self, idx):
return self.preprocess(Image.open(self.image_paths[idx]).convert('RGB'))
def __len__(self):
return len(self.image_paths) |
def kmax_pooling(x, dim, k):
index = x.topk(k, dim=dim)[1].sort(dim=dim)[0]
return x.gather(dim, index).squeeze(dim) |
def get_crfrnn_model_def():
(channels, height, width) = (3, 500, 500)
input_shape = (height, width, 3)
img_input = Input(shape=input_shape)
x = ZeroPadding2D(padding=(100, 100))(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='valid', name='conv1_1')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2', padding='same')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3', padding='same')(x)
pool3 = x
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4', padding='same')(x)
pool4 = x
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5', padding='same')(x)
x = Conv2D(4096, (7, 7), activation='relu', padding='valid', name='fc6')(x)
x = Dropout(0.5)(x)
x = Conv2D(4096, (1, 1), activation='relu', padding='valid', name='fc7')(x)
x = Dropout(0.5)(x)
x = Conv2D(21, (1, 1), padding='valid', name='score-fr')(x)
score2 = Conv2DTranspose(21, (4, 4), strides=2, name='score2')(x)
score_pool4 = Conv2D(21, (1, 1), name='score-pool4')(pool4)
score_pool4c = Cropping2D((5, 5))(score_pool4)
score_fused = Add()([score2, score_pool4c])
score4 = Conv2DTranspose(21, (4, 4), strides=2, name='score4', use_bias=False)(score_fused)
score_pool3 = Conv2D(21, (1, 1), name='score-pool3')(pool3)
score_pool3c = Cropping2D((9, 9))(score_pool3)
score_final = Add()([score4, score_pool3c])
upsample = Conv2DTranspose(21, (16, 16), strides=8, name='upsample', use_bias=False)(score_final)
upscore = Cropping2D(((31, 37), (31, 37)))(upsample)
output = CrfRnnLayer(image_dims=(height, width), num_classes=21, theta_alpha=160.0, theta_beta=3.0, theta_gamma=3.0, num_iterations=10, name='crfrnn')([upscore, img_input])
model = Model(img_input, output, name='crfrnn_net')
return model |
class ManifoldSubsetClosure(ManifoldSubset):
def __init__(self, subset, name=None, latex_name=None):
self._subset = subset
base_manifold = subset.manifold()
if (latex_name is None):
if (name is None):
latex_name = (('\\mathop{\\mathrm{cl}}(' + subset._latex_name) + ')')
else:
latex_name = name
if (name is None):
name = ('cl_' + subset._name)
ManifoldSubset.__init__(self, base_manifold, name, latex_name=latex_name)
self.declare_superset(subset)
self.declare_subset((superset for superset in subset.supersets() if superset.is_closed()))
def _repr_(self):
return 'Topological closure {} of the {}'.format(self._name, self._subset)
def is_closed(self):
return True |
def get_model_name(config):
name = ''
spec = config.MODEL.SPEC
if (config.MODEL.NAME in ['cls_resnet', 'cls_resnet_d2']):
num_groups = spec.NUM_GROUPS
depth = spec.NUM_LAYERS
if (num_groups == 1):
model_type = 'r{}'.format(depth)
else:
model_type = 'x{}-{}x{}d'.format(depth, num_groups, spec.WIDTH_PER_GROUP)
if (('DEEP_STEM' in spec) and spec.DEEP_STEM):
name = '{}-deepstemAvgdown{}'.format(model_type, int(spec.AVG_DOWN))
else:
name = '{}-s{}a{}'.format(model_type, spec.KERNEL_SIZE_STEM, int(spec.AVG_DOWN))
if ('WITH_SE' in spec):
name = ('se-' + name)
elif ('cls_hrnet' in config.MODEL.NAME):
name = 'h{}'.format(spec.STAGES_SPEC.NUM_CHANNELS[0][0])
elif (config.MODEL.NAME == 'cls_bit_resnet'):
name = '{}'.format(spec.SPEC)
else:
raise ValueError('Known MODEL.NAME: {}'.format(config.MODEL.NAME))
return name |
def testval(config, test_dataset, testloader, model, sv_dir='', sv_pred=False):
model.eval()
confusion_matrix = np.zeros((config.DATASET.NUM_CLASSES, config.DATASET.NUM_CLASSES))
with torch.no_grad():
for (index, batch) in enumerate(tqdm(testloader)):
(image, label, _, name) = batch
size = label.size()
pred = test_dataset.multi_scale_inference(model, image, scales=config.TEST.SCALE_LIST, flip=config.TEST.FLIP_TEST)
if ((pred.size()[(- 2)] != size[(- 2)]) or (pred.size()[(- 1)] != size[(- 1)])):
pred = F.upsample(pred, (size[(- 2)], size[(- 1)]), mode='bilinear')
confusion_matrix += get_confusion_matrix(label, pred, size, config.DATASET.NUM_CLASSES, config.TRAIN.IGNORE_LABEL)
if sv_pred:
sv_path = os.path.join(sv_dir, 'test_val_results')
if (not os.path.exists(sv_path)):
os.mkdir(sv_path)
test_dataset.save_pred(pred, sv_path, name)
if ((index % 100) == 0):
logging.info(('processing: %d images' % index))
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
IoU_array = (tp / np.maximum(1.0, ((pos + res) - tp)))
mean_IoU = IoU_array.mean()
logging.info(('mIoU: %.4f' % mean_IoU))
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
pixel_acc = (tp.sum() / pos.sum())
mean_acc = (tp / np.maximum(1.0, pos)).mean()
IoU_array = (tp / np.maximum(1.0, ((pos + res) - tp)))
mean_IoU = IoU_array.mean()
return (mean_IoU, IoU_array, pixel_acc, mean_acc) |
def get_matrix_variance(opset, graph, func_input, func_name, mean_out, axes, input_shape):
nl = []
sub_out = fork_name((func_input + '_sub'))
n = onnx.helper.make_node('Sub', [func_input, mean_out], [sub_out])
nl.append(n)
mul_out = fork_name((func_input + '_mul'))
n = onnx.helper.make_node('Mul', [sub_out, sub_out], [mul_out])
nl.append(n)
sum_out = fork_name((func_input + '_sum'))
if (opset == '13'):
axes_shape = (len(axes),)
axes_param_name = fork_name('ReduceSumAxes')
add_param(graph, axes_param_name, TensorProto.INT64, axes_shape, np.array(axes).astype(np.int64).tostring())
n = onnx.helper.make_node('ReduceSum', [mul_out, axes_param_name], [sum_out], keepdims=1, noop_with_empty_axes=0)
else:
n = onnx.helper.make_node('ReduceSum', [mul_out], [sum_out], axes=axes, keepdims=True)
nl.append(n)
count = [input_shape[i] for i in axes]
constant = fork_name('constant')
c = generate_constant(constant, (func_name + '_constant'), TensorProto.FLOAT, [1], [np.prod(count)])
nl.append(c)
var_out = (fork_name(func_input) + '_div')
n = onnx.helper.make_node('Div', [sum_out, constant], [var_out])
nl.append(n)
return (nl, var_out) |
def hist_viz(hist: List[Tuple[(np.ndarray, np.ndarray)]], nrows: List[int], col: str, yscale: str, plot_width: int, plot_height: int, show_yticks: bool, df_labels: List[str], orig: Optional[List[str]]=None) -> Figure:
tooltips = [('Bin', ''), ('Frequency', ''), ('Percent', '{0.2f}%'), ('Source', '')]
fig = Figure(plot_height=plot_height, plot_width=plot_width, title=col, toolbar_location=None, y_axis_type=yscale)
for (i, hst) in enumerate(hist):
(counts, bins) = hst
if (sum(counts) == 0):
fig.rect(x=0, y=0, width=0, height=0)
continue
intvls = _format_bin_intervals(bins)
df = pd.DataFrame({'intvl': intvls, 'left': bins[:(- 1)], 'right': bins[1:], 'freq': counts, 'pct': ((counts / nrows[i]) * 100), 'orig': (orig[i] if orig else None)})
bottom = (0 if ((yscale == 'linear') or df.empty) else (counts.min() / 2))
fig.quad(source=df, left='left', right='right', bottom=bottom, alpha=0.5, top='freq', fill_color=CATEGORY10[i], line_color=CATEGORY10[i])
hover = HoverTool(tooltips=tooltips, attachment='vertical', mode='vline')
fig.add_tools(hover)
tweak_figure(fig, 'hist', show_yticks)
fig.yaxis.axis_label = 'Frequency'
_format_axis(fig, df.iloc[0]['left'], df.iloc[(- 1)]['right'], 'x')
x_axis_label = ''
if show_yticks:
x_axis_label += col
if (yscale == 'linear'):
_format_axis(fig, 0, df['freq'].max(), 'y')
if orig:
if (orig != df_labels):
if x_axis_label:
x_axis_label += f", this vairable is only in {','.join(orig)}"
else:
x_axis_label += f"This vairable is only in {','.join(orig)}"
fig.xaxis.axis_label = x_axis_label
fig.xaxis.axis_label_standoff = 0
return fig |
def get_platform_toolset_str():
default = 'v110'
vstr = check_output(['msbuild', '/ver'])
lines = vstr.split('\n')
lline = lines[(- 1)]
tokens = lline.split('.')
if (len(tokens) < 2):
return default
elif (tokens[0] == '15'):
return 'v141'
else:
return (('v' + tokens[0]) + tokens[1]) |
def check_eol(filename):
eol = u'\n'
with open(filename, 'rb') as f:
d = f.read()
if (b'\r\n' in d):
eol = u'\r\n'
elif (b'\n' in d):
eol = u'\n'
elif (b'\r' in d):
eol = u'\r'
return eol |
class BaseModel(ABC):
def __init__(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = (torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu'))
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
if (opt.preprocess != 'scale_width'):
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
def modify_commandline_options(parser, is_train):
return parser
def set_input(self, input):
pass
def forward(self):
pass
def is_train(self):
return True
def optimize_parameters(self):
pass
def setup(self, opt):
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if ((not self.isTrain) or opt.continue_train):
self.load_networks(opt.epoch)
self.print_networks(opt.verbose)
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net' + name))
net.eval()
def test(self):
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
pass
def get_image_paths(self):
return self.image_paths
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print(('learning rate = %.7f' % lr))
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, ('loss_' + name)))
return errors_ret
def save_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = ('%s_net_%s.pth' % (epoch, name))
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, ('net' + name))
if ((len(self.gpu_ids) > 0) and torch.cuda.is_available()):
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
key = keys[i]
if ((i + 1) == len(keys)):
if (module.__class__.__name__.startswith('InstanceNorm') and ((key == 'running_mean') or (key == 'running_var'))):
if (getattr(module, key) is None):
state_dict.pop('.'.join(keys))
if (module.__class__.__name__.startswith('InstanceNorm') and (key == 'num_batches_tracked')):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, (i + 1))
def load_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
load_filename = ('%s_net_%s.pth' % (epoch, name))
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, ('net' + name))
if isinstance(net, torch.nn.DataParallel):
net = net.module
print(('loading the model from %s' % load_path))
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
for key in list(state_dict.keys()):
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def print_networks(self, verbose):
print(' Networks initialized ')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net' + name))
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print(('[Network %s] Total number of parameters : %.3f M' % (name, (num_params / 1000000.0))))
print('')
def set_requires_grad(self, nets, requires_grad=False):
if (not isinstance(nets, list)):
nets = [nets]
for net in nets:
if (net is not None):
for param in net.parameters():
param.requires_grad = requires_grad |
def ResNet101(input_channels=3, imsize=32, output_dim=10):
return ResNet(Bottleneck, [3, 4, 23, 3], input_channels, imsize, output_dim) |
class TFAutoModelForZeroShotImageClassification(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING |
def _palette_is_grayscale(pil_image):
if (pil_image.mode != 'P'):
raise ValueError('pil_image.mode must be equal to "P".')
palette = np.asarray(pil_image.getpalette()).reshape(((- 1), 3))
(start, stop) = pil_image.getextrema()
valid_palette = palette[start:(stop + 1)]
return np.allclose(np.diff(valid_palette), 0) |
class FileSequence(AppendableSequence, Closeable):
def __init__(self, path, serializer=None):
if (serializer is None):
serializer = UnicodeSerializer()
self._path = path
self._ser = serializer
self._f_read = open_or_create(path, 'r')
self._f_write = open_or_create(path, 'a')
self._offsets = FileSequenceOffsets(self)
self._meta = FileSequenceMetaData(self)
def close(self):
self._meta.close()
self._offsets.close()
self._f_write.close()
self._f_read.close()
def closed(self):
return (self._meta.closed and self._offsets.closed and self._f_write.closed and self._f_read.closed)
def __repr__(self):
return 'FileSequence at {}'.format(self._path)
def path(self):
return self._path
def _strip_newline(self, line):
return line[:(- 1)]
def __getitem__(self, i):
if isinstance(i, slice):
return SequenceSlice(self, i)
f = self._f_read
f.seek(self._offsets[i])
line = f.readline()
line = self._strip_newline(line)
return self._ser.from_line(line)
def __len__(self):
return len(self._offsets)
def append(self, item):
self.extend([item])
def extend(self, items):
f = self._f_write
offsets = []
for item in items:
offset = f.tell()
offsets.append(offset)
line = self._ser.to_line(item)
f.write(line)
f.write('\n')
f.flush()
self._meta.length += len(offsets)
self._offsets.extend(offsets)
def iter_raw_lines(self):
for line in self._f_read:
(yield line)
def __iter__(self):
for line in self.iter_raw_lines():
line = self._strip_newline(line)
(yield self._ser.from_line(line)) |
class SpacyTokenizer(Tokenizer):
def __init__(self, **kwargs):
model = kwargs.get('model', 'en')
self.annotators = copy.deepcopy(kwargs.get('annotators', set()))
nlp_kwargs = {'parser': False}
if (not any([(p in self.annotators) for p in ['lemma', 'pos', 'ner']])):
nlp_kwargs['tagger'] = False
if ('ner' not in self.annotators):
nlp_kwargs['entity'] = False
self.nlp = spacy.load(model, **nlp_kwargs)
def tokenize(self, text):
clean_text = text.replace('\n', ' ')
tokens = self.nlp.tokenizer(clean_text)
if any([(p in self.annotators) for p in ['lemma', 'pos', 'ner']]):
self.nlp.tagger(tokens)
if ('ner' in self.annotators):
self.nlp.entity(tokens)
data = []
for i in range(len(tokens)):
start_ws = tokens[i].idx
if ((i + 1) < len(tokens)):
end_ws = tokens[(i + 1)].idx
else:
end_ws = (tokens[i].idx + len(tokens[i].text))
data.append((tokens[i].text, text[start_ws:end_ws], (tokens[i].idx, (tokens[i].idx + len(tokens[i].text))), tokens[i].tag_, tokens[i].lemma_, tokens[i].ent_type_))
return Tokens(data, self.annotators, opts={'non_ent': ''}) |
def env_1():
env = Warehouse(3, 8, 3, 2, 0, 1, 5, None, None, RewardType.GLOBAL)
env.reset()
env.agents[0].x = 4
env.agents[0].y = 27
env.agents[0].dir = Direction.DOWN
env.shelfs[0].x = 4
env.shelfs[0].y = 27
env.agents[0].carrying_shelf = env.shelfs[0]
env.agents[1].x = 3
env.agents[1].y = 3
env.request_queue[0] = env.shelfs[0]
env._recalc_grid()
return env |
def inception_v2_base(inputs, final_endpoint='Mixed_5c', min_depth=16, depth_multiplier=1.0, scope=None):
end_points = {}
if (depth_multiplier <= 0):
raise ValueError('depth_multiplier is not greater than zero.')
depth = (lambda d: max(int((d * depth_multiplier)), min_depth))
with tf.variable_scope(scope, 'InceptionV2', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d, slim.separable_conv2d], stride=1, padding='SAME'):
end_point = 'Conv2d_1a_7x7'
depthwise_multiplier = min(int((depth(64) / 3)), 8)
net = slim.separable_conv2d(inputs, depth(64), [7, 7], depth_multiplier=depthwise_multiplier, stride=2, weights_initializer=trunc_normal(1.0), scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, depth(64), [1, 1], scope=end_point, weights_initializer=trunc_normal(0.1))
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_4a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = tf.concat(3, [branch_0, branch_1, branch_2])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_5a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = tf.concat(3, [branch_0, branch_1, branch_2])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
raise ValueError(('Unknown final endpoint %s' % final_endpoint)) |
class ResultVisualizer():
def __init__(self, show=False, wait_time=0, score_thr=0):
self.show = show
self.wait_time = wait_time
self.score_thr = score_thr
def _save_image_gts_results(self, dataset, results, mAPs, out_dir=None):
mmcv.mkdir_or_exist(out_dir)
for mAP_info in mAPs:
(index, mAP) = mAP_info
data_info = dataset.prepare_train_img(index)
filename = data_info['filename']
if (data_info['img_prefix'] is not None):
filename = osp.join(data_info['img_prefix'], filename)
else:
filename = data_info['filename']
(fname, name) = osp.splitext(osp.basename(filename))
save_filename = (((fname + '_') + str(round(mAP, 3))) + name)
out_file = osp.join(out_dir, save_filename)
imshow_gt_det_bboxes(data_info['img'], data_info, results[index], dataset.CLASSES, gt_bbox_color=dataset.PALETTE, gt_text_color=(200, 200, 200), gt_mask_color=dataset.PALETTE, det_bbox_color=dataset.PALETTE, det_text_color=(200, 200, 200), det_mask_color=dataset.PALETTE, show=self.show, score_thr=self.score_thr, wait_time=self.wait_time, out_file=out_file)
def evaluate_and_show(self, dataset, results, topk=20, show_dir='work_dir', eval_fn=None):
assert (topk > 0)
if ((topk * 2) > len(dataset)):
topk = (len(dataset) // 2)
if (eval_fn is None):
eval_fn = bbox_map_eval
else:
assert callable(eval_fn)
prog_bar = mmcv.ProgressBar(len(results))
_mAPs = {}
for (i, (result,)) in enumerate(zip(results)):
data_info = dataset.prepare_train_img(i)
mAP = eval_fn(result, data_info['ann_info'])
_mAPs[i] = mAP
prog_bar.update()
_mAPs = list(sorted(_mAPs.items(), key=(lambda kv: kv[1])))
good_mAPs = _mAPs[(- topk):]
bad_mAPs = _mAPs[:topk]
good_dir = osp.abspath(osp.join(show_dir, 'good'))
bad_dir = osp.abspath(osp.join(show_dir, 'bad'))
self._save_image_gts_results(dataset, results, good_mAPs, good_dir)
self._save_image_gts_results(dataset, results, bad_mAPs, bad_dir) |
class DBEngine():
def __init__(self, fdb):
self.db = records.Database('sqlite:///{}'.format(fdb))
def execute_query(self, table_id, query, *args, **kwargs):
return self.execute(table_id, query.sel_index, query.agg_index, query.conditions, *args, **kwargs)
def execute(self, table_id, select_index, aggregation_index, conditions, lower=True):
if (not table_id.startswith('table')):
table_id = 'table_{}'.format(table_id.replace('-', '_'))
table_info = self.db.query('SELECT sql from sqlite_master WHERE tbl_name = :name', name=table_id).all()[0].sql
schema_str = schema_re.findall(table_info)[0]
schema = {}
for tup in schema_str.split(', '):
(c, t) = tup.split()
schema[c] = t
select = 'col{}'.format(select_index)
agg = Query.agg_ops[aggregation_index]
if agg:
select = '{}({})'.format(agg, select)
where_clause = []
where_map = {}
for (col_index, op, val) in conditions:
if (lower and isinstance(val, str)):
val = val.lower()
if ((schema['col{}'.format(col_index)] == 'real') and (not isinstance(val, (int, float)))):
try:
val = float(parse_decimal(val))
except NumberFormatError as e:
val = float(num_re.findall(val)[0])
where_clause.append('col{} {} :col{}'.format(col_index, Query.cond_ops[op], col_index))
where_map['col{}'.format(col_index)] = val
where_str = ''
if where_clause:
where_str = ('WHERE ' + ' AND '.join(where_clause))
query = 'SELECT {} AS result FROM {} {}'.format(select, table_id, where_str)
out = self.db.query(query, **where_map)
return [o.result for o in out] |
class NavierStokesIRK3(IRK3):
def __init__(self, N=(10, 10), dt=0.01, Re=100.0, modplot=10, family='C'):
self.Re = Re
self.nu = (2.0 / Re)
self.N = N
self.dt = dt
self.modplot = modplot
D0 = FunctionSpace(N[0], family, bc=(0, 0))
D1 = FunctionSpace(N[1], family, bc=(0, (((1 - x) ** 2) * ((1 + x) ** 2))))
V1 = TensorProductSpace(comm, (D0, D1))
V0 = V1.get_homogeneous()
P = V1.get_orthogonal()
P.bases[0].slice = (lambda : slice(0, (N[0] - 2)))
P.bases[1].slice = (lambda : slice(0, (N[1] - 2)))
W1 = VectorSpace([V1, V0])
VQ = CompositeSpace([W1, P])
self.up_ = Array(VQ)
self.up_hat = Function(VQ)
self.uip = Array(W1.get_dealiased())
S1 = TensorSpace(V1.get_orthogonal().get_dealiased())
self.uiuj = Array(S1)
self.uiuj_hat = Function(S1)
IRK3.__init__(self, VQ)
def LinearRHS(self, up):
(u, p) = up
return ((self.nu * div(grad(u))) - grad(p))
def NonlinearRHS(self, up, up_hat, rhs, **params):
vq = TestFunction(self.T)
v = vq[0]
rhs.fill(0)
bi_hat = rhs[0]
ui_hat = up_hat[0]
Wp = self.uip.function_space()
uip = Wp.backward(ui_hat, self.uip)
uiuj = outer(uip, uip, self.uiuj)
uiuj_hat = uiuj.forward(self.uiuj_hat)
bi_hat = inner(v, (- div(uiuj_hat)), output_array=bi_hat)
return rhs
def setup(self, dt):
self.params['dt'] = dt
up = TrialFunction(self.T)
vq = TestFunction(self.T)
(u, p) = up
(v, q) = vq
(a, b) = (self.a, self.b)
self.solver = []
self.rhs_mats = []
L = self.LinearRHS(up)
A10 = inner(q, div(u))
for rk in range(3):
mats = inner(v, (u - ((((a[rk] + b[rk]) * dt) / 2) * L)))
mats += A10
sol = la.BlockMatrixSolver(mats)
sol = functools.partial(sol, constraints=((2, 0, 0),))
self.solver.append(sol)
rhs_mats = inner(v, (u + ((((a[rk] + b[rk]) * dt) / 2) * L)))
self.rhs_mats.append(BlockMatrix(rhs_mats))
def update(self, up, up_hat, t, tstep, **par):
if ((tstep % self.modplot) == 0):
up = up_hat.backward(up)
u_ = up[0]
plt.figure(1)
X = self.T.local_mesh(True)
plt.quiver(X[0], X[1], u_[0], u_[1])
plt.pause(0.01) |
def trunc_normal_(tensor, mean=0.0, std=1.0, a=(- 2.0), b=2.0):
return _no_grad_trunc_normal_(tensor, mean, std, a, b) |
class BertBaseline(BaseModel):
def __init__(self, vocab=None, bert_dir='', version_2_with_negative=True):
super(BertBaseline, self).__init__(vocab)
self.bert_dir = bert_dir
self.version_2_with_negative = version_2_with_negative
self._build_graph()
def _build_graph(self):
self.training = tf.placeholder(tf.bool, shape=())
self.input_ids = tf.placeholder(shape=[None, None], dtype=tf.int32)
self.input_mask = tf.placeholder(shape=[None, None], dtype=tf.int32)
self.segment_ids = tf.placeholder(shape=[None, None], dtype=tf.int32)
self.start_position = tf.placeholder(shape=[None], dtype=tf.int32)
self.end_position = tf.placeholder(shape=[None], dtype=tf.int32)
self.bert_embedding = BertEmbedding(self.bert_dir)
final_hidden = self.bert_embedding(input_ids=self.input_ids, input_mask=self.input_mask, segment_ids=self.segment_ids, is_training=self.training, return_pool_output=False)
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable('cls/squad/output_weights', [2, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable('cls/squad/output_bias', [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden, [(batch_size * seq_length), hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
start_logits = unstacked_logits[0]
end_logits = unstacked_logits[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=(- 1))
loss = (- tf.reduce_mean(tf.reduce_sum((one_hot_positions * log_probs), axis=(- 1))))
return loss
global_step = tf.train.get_or_create_global_step()
start_loss = compute_loss(start_logits, self.start_position)
end_loss = compute_loss(end_logits, self.end_position)
self.loss = ((start_loss + end_loss) / 2.0)
self.input_placeholder_dict = OrderedDict({'input_ids': self.input_ids, 'input_mask': self.input_mask, 'segment_ids': self.segment_ids, 'training': self.training, 'start_position': self.start_position, 'end_position': self.end_position})
self.output_variable_dict = OrderedDict({'start_logits': start_logits, 'end_logits': end_logits})
with tf.variable_scope('train_metrics'):
self.train_metrics = {'loss': tf.metrics.mean(self.loss)}
self.train_update_metrics = tf.group(*[op for (_, op) in self.train_metrics.values()])
metric_variables = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='train_metrics')
self.train_metric_init_op = tf.variables_initializer(metric_variables)
with tf.variable_scope('eval_metrics'):
self.eval_metrics = {'loss': tf.metrics.mean(self.loss)}
self.eval_update_metrics = tf.group(*[op for (_, op) in self.eval_metrics.values()])
metric_variables = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='eval_metrics')
self.eval_metric_init_op = tf.variables_initializer(metric_variables)
tf.summary.scalar('loss', self.loss)
self.summary_op = tf.summary.merge_all()
def compile(self, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False):
self.train_op = optimization.create_optimizer(self.loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
def train_and_evaluate(self, train_generator, eval_generator, evaluator, epochs=1, eposides=1, save_dir=None, summary_dir=None, save_summary_steps=10):
if (not self.initialized):
self.bert_embedding.init_bert()
self.session.run(tf.global_variables_initializer())
Trainer._train_and_evaluate(self, train_generator, eval_generator, evaluator, epochs=epochs, eposides=eposides, save_dir=save_dir, summary_dir=summary_dir, save_summary_steps=save_summary_steps)
def get_best_answer(self, output, instances, max_answer_len=30, null_score_diff_threshold=0.0):
def _get_best_indexes(logits, n_best_size):
index_and_score = sorted(enumerate(logits), key=(lambda x: x[1]), reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if (i >= n_best_size):
break
best_indexes.append(index_and_score[i][0])
return best_indexes
answer_list = []
pids_list = []
ground_answers = []
pred_dict = {}
qid_with_max_logits = {}
qid_with_final_text = {}
qid_with_null_logits = {}
na_prob = {}
for i in range(len(instances)):
instance = instances[i]
ground_answers.append(instance['answer'])
start_logits = output['start_logits'][i]
end_logits = output['end_logits'][i]
score_null = 1000000
min_null_feature_index = 0
null_start_logit = 0
null_end_logit = 0
if self.version_2_with_negative:
feature_null_score = (start_logits[0] + end_logits[0])
if (feature_null_score < score_null):
score_null = feature_null_score
null_start_logit = start_logits[0]
null_end_logit = end_logits[0]
start_indexes = _get_best_indexes(start_logits, n_best_size=20)
end_indexes = _get_best_indexes(end_logits, n_best_size=20)
max_start_index = (- 1)
max_end_index = (- 1)
import collections
max_logits = (- )
for start_index in start_indexes:
for end_index in end_indexes:
if (start_index >= len(instance['tokens'])):
continue
if (end_index >= len(instance['tokens'])):
continue
if (start_index not in instance['token_to_orig_map']):
continue
if (end_index not in instance['token_to_orig_map']):
continue
if (end_index < start_index):
continue
if (not instance['token_is_max_context'].get(start_index, False)):
continue
length = ((end_index - start_index) - 1)
if (length > max_answer_len):
continue
sum_logits = (start_logits[start_index] + end_logits[end_index])
if (sum_logits > max_logits):
max_logits = sum_logits
max_start_index = start_index
max_end_index = end_index
import math
def _compute_softmax(scores):
if (not scores):
return []
max_score = None
for score in scores:
if ((max_score is None) or (score > max_score)):
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp((score - max_score))
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append((score / total_sum))
return probs
final_text = ''
if ((self.version_2_with_negative and (max_start_index != (- 1)) and (max_end_index != (- 1))) or ((max_start_index != (- 1)) and (max_end_index != (- 1)))):
final_text = self.prediction_to_ori(max_start_index, max_end_index, instance)
qid = instance['qid']
if ((qid in qid_with_max_logits) and (max_logits > qid_with_max_logits[qid])):
qid_with_max_logits[qid] = max_logits
qid_with_final_text[qid] = final_text
if (qid not in qid_with_max_logits):
qid_with_max_logits[qid] = max_logits
qid_with_final_text[qid] = final_text
if self.version_2_with_negative:
if (qid not in qid_with_null_logits):
qid_with_null_logits[qid] = score_null
if ((qid in qid_with_null_logits) and (score_null > qid_with_null_logits[qid])):
qid_with_null_logits[qid] = score_null
if ((qid_with_null_logits[qid] - qid_with_max_logits[qid]) > null_score_diff_threshold):
qid_with_final_text[qid] = ''
na_prob[qid] = (qid_with_null_logits[qid] - qid_with_max_logits[qid])
if (not self.version_2_with_negative):
return qid_with_final_text
return (qid_with_final_text, na_prob)
def prediction_to_ori(self, start_index, end_index, instance):
if (start_index > 0):
tok_tokens = instance['tokens'][start_index:(end_index + 1)]
orig_doc_start = instance['token_to_orig_map'][start_index]
orig_doc_end = instance['token_to_orig_map'][end_index]
char_start_position = instance['context_token_spans'][orig_doc_start][0]
char_end_position = instance['context_token_spans'][orig_doc_end][1]
pred_answer = instance['context'][char_start_position:char_end_position]
return pred_answer
return ''
def evaluate(self, batch_generator, evaluator):
self.bert_embedding.init_bert()
Trainer._evaluate(self, batch_generator, evaluator) |
class PDELU_SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=100):
super(PDELU_SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
self.pdelu = PDELU()
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.pdelu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
class CDilatedB(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1, d=1, groups=1):
super().__init__()
padding = (int(((kSize - 1) / 2)) * d)
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, dilation=d, groups=groups)
self.bn = nn.BatchNorm2d(nOut)
def forward(self, input):
return self.bn(self.conv(input)) |
class WFRadiationMeshQxMax(RadiationField):
glossary_name = 'params/Mesh/qxMax'
def __init__(self, wf):
super(WFRadiationMeshQxMax, self).__init__(wf)
def value(self):
if (self._wf.params.wSpace == 'Q-space'):
return self._wf._srwl_wf.mesh.xFin
else:
warnings.warn('params/Mesh/qxMax not defined if NOT params/wSpace==Q-space')
return None
def value(self, val):
if (not (self._wf.params.wSpace == 'Q-space')):
warnings.warn('params/Mesh/qxMax not defined if NOT params/wSpace==Q-space')
self._wf._srwl_wf.mesh.xFin = float(val) |
((GRAPH_EXECUTOR == ProfilingMode.SIMPLE), "Simple Executor doesn't support gradients")
class TestAutodiffSubgraphSlicing(JitTestCase):
def _perform_ad_subgraph_slicing(self, fn, *input_sizes):
with disable_autodiff_subgraph_inlining():
with enable_profiling_mode_for_profiling_tests():
ge = torch.jit.script(fn)
inputs = [torch.randn(size, requires_grad=True) for size in input_sizes]
ge(*inputs, profile_and_replay=True)
return ge.graph_for(*inputs)
def assertGraphSize(self, graph, size):
nodes = list(filter((lambda n: ((n.kind() != 'prim::BailOut') and (n.kind() != 'prim::BailoutTemplate'))), graph.nodes()))
self.assertEqual(len(list(nodes)), size)
def test_chunk_constant_script_ad(self):
.script
def func(x):
(x1, x2) = torch.chunk(x, 2)
return (x1, x2)
input = torch.rand(6, 10).requires_grad_()
with disable_autodiff_subgraph_inlining():
with enable_profiling_mode_for_profiling_tests():
output = func(input, profile_and_replay=True)
self.assertAutodiffNode(func.graph_for(input), True, ['prim::ConstantChunk'], [])
def test_simple_merge(self):
def fn(x, y, z):
a = (x * y)
b = (a * z)
return b
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
self.assertGraphSize(graph, 1)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_simple_no_merge(self):
def fn(x, y, z):
a = (x * y)
b = torch.zeros([abs(int(y))])
return (a, b)
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
g_str = str(graph)
FileCheck().check('aten::Int').check('aten::zeros').check_not('aten::mul').run(g_str[0:g_str.find('return')])
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_does_not_merge_unrelated(self):
def fn(w, x, y, z):
a = (x * y)
b = (w * z)
return (a, b)
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1)
self.assertGraphSize(graph, 3)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2)
def test_merges_without_cycles(self):
def fn(w, x, y):
a = (w * x)
b = (a * y)
c = (a * b)
return c
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
self.assertGraphSize(graph, 1)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_merges_dense(self):
def fn(x, y):
(a, b) = x.chunk(2)
(c, d) = y.chunk(2)
return ((a + c), (b + d))
graph = self._perform_ad_subgraph_slicing(fn, 2, 2)
self.assertGraphSize(graph, 2)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_does_not_create_cycles(self):
def fn(w, x, y):
a = (w * x)
b = torch.zeros(abs(int(a)))
c = (a * b)
return c
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2)
def test_merges_up(self):
def fn(w, x, y, z):
a = (w * x)
b = torch.zeros(abs(int(y)))
c = (a * z)
return (b, c)
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1)
g_str = str(graph)
FileCheck().check_not('aten::add').run(g_str[0:g_str.find('return')])
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_merges_down(self):
def fn(v, w, x, y):
a = (v * w)
b = torch.ones(int(y))
c = (b * a)
return (a, c)
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1)
num_nodes = (4 if (GRAPH_EXECUTOR == ProfilingMode.PROFILING) else 3)
g_str = str(graph)
FileCheck().check_not('aten::add').run(g_str[0:g_str.find('return')])
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_respects_lexical_scoping(self):
def fn(x, k):
y = (x * 1.1)
if bool(k):
k = (k + y)
z = (y * k)
return (z, k)
graph = self._perform_ad_subgraph_slicing(fn, 1, 1)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 3)
def test_merge_respects_aliasing(self):
def fn(x, k, cond):
y = (x * 1.1)
y = (y * k)
y = (y * 2.2)
if bool(cond):
z1 = y[0]
z2 = y[1]
z1.add_(3)
out = ((z2 + k) + 3.3)
out = (out * out)
return out
graph = self._perform_ad_subgraph_slicing(fn, [2, 2], [2, 2], 1)
FileCheck().check('prim::If').check('aten::select').check_next('aten::select').check_next('aten::add_').check('Differentiable').run(graph)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2) |
def split_dataset(x, y, ratio=[0.7, 0.15, 0.15]):
flags = []
data_len = len(x)
flag += 1
lens = [int((data_len * item)) for item in ratio]
new_flag = flag
(trainX, trainY) = (x[:lens[0]], y[:lens[0]])
flag += 2
(testX, testY) = (x[lens[0]:(lens[0] + lens[1])], y[lens[0]:(lens[0] + lens[1])])
if (flag > 0):
flag += 1
(validX, validY) = (x[(- lens[(- 1)]):], y[(- lens[(- 1)]):])
flags.append(flag)
return ((trainX, trainY), (testX, testY), (validX, validY)) |
def compute_rouge_scores(summs, refs, splitchar='.', options=None, parallel=True):
assert (len(summs) == len(refs))
options = ['-a', '-c', 95, '-m', '-n', 2, '-w', 1.3]
rr = Rouge(options=options)
rouge_args = []
for (summ, ref) in zip(summs, refs):
letter = 'A'
ref_dict = {}
for r in ref:
ref_dict[letter] = [x for x in split_sentences(r, splitchar) if (len(x) > 0)]
letter = chr((ord(letter) + 1))
s = [x for x in split_sentences(summ, splitchar) if (len(x) > 0)]
rouge_args.append((s, ref_dict))
if parallel:
with closing(Pool((cpu_count() // 2))) as pool:
rouge_scores = pool.starmap(rr.score_summary, rouge_args)
else:
rouge_scores = []
for (s, a) in rouge_args:
rouge_scores.append(rr.score_summary(s, ref_dict))
return rouge_scores |
def register_metrics(types, device, has_detector=True):
global TYPES, METRIC_DICT
metric_dict = dict()
for name in types:
assert (name in TYPES)
if (name in METRIC_DICT):
metric_dict[name] = METRIC_DICT[name]
continue
if (name == 'ssim'):
metric_dict[name] = SSIMMetric()
elif (name == 'psnr'):
metric_dict[name] = PSNRMetric()
elif (name == 'lps'):
metric_dict[name] = PerceptualMetric(device)
elif (name == 'is'):
metric_dict[name] = InceptionScoreMetric(device)
elif (name == 'fid'):
metric_dict[name] = FIDMetric(device)
elif (name == 'OS-CS-reid'):
metric_dict[name] = ReIDScore(device, reid_name=BaseMetric.OSreID, has_detector=has_detector)
elif (name == 'OS-freid'):
metric_dict[name] = FreIDMetric(device, reid_name=BaseMetric.OSreID, has_detector=has_detector)
elif (name == 'PCB-CS-reid'):
metric_dict[name] = ReIDScore(device, reid_name=BaseMetric.PCBreID, has_detector=has_detector)
elif (name == 'PCB-freid'):
metric_dict[name] = FreIDMetric(device, reid_name=BaseMetric.PCBreID, has_detector=has_detector)
elif (name == 'SSPE'):
metric_dict[name] = ScaleShapePoseError(device)
elif (name == 'face-CS'):
from .metrics import FaceSimilarityScore
metric_dict[name] = FaceSimilarityScore(device=device, has_detector=has_detector)
elif (name == 'face-FD'):
from .metrics import FaceFrechetDistance
metric_dict[name] = FaceFrechetDistance(device=device, has_detector=has_detector)
else:
raise ValueError(name)
METRIC_DICT[name] = metric_dict[name]
return metric_dict |
def logmethod(f):
(f)
def wrapper(self, *args, **kwds):
debug_log(('%s in %s called' % (f.__name__, self.__class__.__name__)))
return f(self, *args, **kwds)
return wrapper |
.gpu
def test_dropout_vec():
_config()
def halftest(A: dace.float16[N], mask: dace.float16[N]):
out = np.ndarray([N], dace.float16)
for i in dace.map[0:N]:
with dace.tasklet:
(a << A[i])
(d << mask[i])
(o >> out[i])
o = (a * d)
return out
A = np.random.rand(24).astype(np.float16)
mask = np.random.randint(0, 2, size=[24]).astype(np.float16)
sdfg: dace.SDFG = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert (sdfg.apply_transformations(Vectorization, dict(vector_len=8)) == 1)
out = sdfg(A=A, mask=mask, N=24)
assert np.allclose(out, (A * mask)) |
.overload_method(IndexedOptionType, '_index', inline='always')
def IndexedOption_index(builder):
def getter(builder):
return builder._index
return getter |
def test_NS2D(args):
config.update({'nu': 0.01, 'dt': 0.05, 'T': 10}, 'doublyperiodic')
solver = get_solver(regression_test=regression_test, mesh='doublyperiodic', parse_args=args)
context = solver.get_context()
initialize(solver, **context)
solve(solver, context)
config.params.dealias = '3/2-rule'
initialize(solver, **context)
solve(solver, context)
config.params.dealias = '2/3-rule'
config.params.optimization = 'cython'
importlib.reload(solver)
initialize(solver, **context)
solve(solver, context)
config.params.write_result = 1
config.params.checkpoint = 1
config.params.dt = 0.01
config.params.t = 0.0
config.params.tstep = 0
config.params.T = 0.04
solver.regression_test = (lambda c: None)
initialize(solver, **context)
solve(solver, context) |
def get_scores(tokens, model, device, tokenizer, sequence_length, slide_by):
chunk_list = list(chunks(tokens, 512))
toks = list()
for c in chunk_list:
toks += tokenizer.convert_tokens_to_ids(c)
test_sequences = list()
test_labels_dummy = list()
test_token_indices = list()
idx = 0
end_flag = False
while (idx < len(toks)):
if ((not end_flag) and ((idx + sequence_length) >= len(toks))):
idx = (len(toks) - sequence_length)
end_flag = True
s = toks[idx:(idx + sequence_length)]
test_sequences.append(s)
test_labels_dummy.append([0 for _ in s])
test_token_indices.append([elem for elem in range(idx, (idx + sequence_length))])
idx += slide_by
if end_flag:
break
batch_size = 32
prediction_inputs = torch.tensor(test_sequences)
prediction_labels_dummy = torch.tensor(test_labels_dummy)
prediction_data = TensorDataset(prediction_inputs, prediction_labels_dummy)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size, num_workers=1)
model.eval()
predictions = list()
for batch in prediction_dataloader:
batch = tuple((t.to(device) for t in batch))
(b_input_ids, b_labels_dummy) = batch
with torch.no_grad():
logits = model(b_input_ids, token_type_ids=None)
logits = logits.detach().cpu().numpy()
predictions.append(logits)
flat_preds = list()
for batch in predictions:
for sequence in batch:
for probs in sequence:
flat_preds.append(probs)
flat_probabilities = list()
for x in flat_preds:
tmp0 = np.exp(x[0])
tmp1 = np.exp(x[1])
summ = (tmp0 + tmp1)
flat_probabilities.append((tmp1 / summ))
flat_token_indices = [item for sublist in test_token_indices for item in sublist]
d_probs = dict()
for iterator in range(len(flat_token_indices)):
index = flat_token_indices[iterator]
if (index in d_probs):
d_probs[index].append(flat_probabilities[iterator])
else:
d_probs[index] = [flat_probabilities[iterator]]
new_probs = ([0] * (max(d_probs.keys()) + 1))
for idx in d_probs.keys():
new_probs[idx] = max(d_probs[idx])
return new_probs |
def convert_cityscapes_instance_only(data_dir, out_dir):
sets = ['gtFine_val']
ann_dirs = ['gtFine_trainvaltest/gtFine/val']
json_name = 'instancesonly_filtered_%s.json'
ends_in = '%s_polygons.json'
img_id = 0
ann_id = 0
cat_id = 1
category_dict = {}
category_instancesonly = ['person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
for (data_set, ann_dir) in zip(sets, ann_dirs):
print(('Starting %s' % data_set))
ann_dict = {}
images = []
annotations = []
ann_dir = os.path.join(data_dir, ann_dir)
for (root, _, files) in os.walk(ann_dir):
for filename in files:
if filename.endswith((ends_in % data_set.split('_')[0])):
if ((len(images) % 50) == 0):
print(('Processed %s images, %s annotations' % (len(images), len(annotations))))
json_ann = json.load(open(os.path.join(root, filename)))
image = {}
image['id'] = img_id
img_id += 1
image['width'] = json_ann['imgWidth']
image['height'] = json_ann['imgHeight']
image['file_name'] = (filename[:(- len((ends_in % data_set.split('_')[0])))] + 'leftImg8bit.png')
image['seg_file_name'] = (filename[:(- len((ends_in % data_set.split('_')[0])))] + ('%s_instanceIds.png' % data_set.split('_')[0]))
images.append(image)
fullname = os.path.join(root, image['seg_file_name'])
objects = cs.instances2dict_with_polygons([fullname], verbose=False)[fullname]
for object_cls in objects:
if (object_cls not in category_instancesonly):
continue
for obj in objects[object_cls]:
if (obj['contours'] == []):
print('Warning: empty contours.')
continue
len_p = [len(p) for p in obj['contours']]
if (min(len_p) <= 4):
print('Warning: invalid contours.')
continue
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = obj['contours']
if (object_cls not in category_dict):
category_dict[object_cls] = cat_id
cat_id += 1
ann['category_id'] = category_dict[object_cls]
ann['iscrowd'] = 0
ann['area'] = obj['pixelCount']
ann['bbox'] = bboxs_util.xyxy_to_xywh(segms_util.polys_to_boxes([ann['segmentation']])).tolist()[0]
annotations.append(ann)
ann_dict['images'] = images
categories = [{'id': category_dict[name], 'name': name} for name in category_dict]
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print(('Num categories: %s' % len(categories)))
print(('Num images: %s' % len(images)))
print(('Num annotations: %s' % len(annotations)))
with open(os.path.join(out_dir, (json_name % data_set)), 'wb') as outfile:
outfile.write(json.dumps(ann_dict)) |
class WEBVIDDataset(BaseDataset):
def __init__(self, *args, split='', **kwargs):
assert (split in ['train', 'val', 'test'])
self.split = split
self.metadata = None
self.cut = 'jsfusion'
if (split == 'train'):
names = ['webvid_train']
elif (split == 'val'):
names = ['webvid_val']
elif (split == 'test'):
names = ['webvid_val']
self._load_metadata()
print(names, ': ', len(self.metadata), 'samples in total.')
super().__init__(*args, **kwargs, names=names, text_column_name='caption')
def _load_metadata(self):
metadata_dir = './meta_data/webvid'
split_files = {'train': 'webvid_training_success_full.tsv', 'val': 'webvid_validation_success_full.tsv', 'test': 'webvid_validation_success_full.tsv'}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
self.metadata = metadata
def _get_video_path(self, sample):
rel_video_fp = (sample[1] + '.mp4')
full_video_fp = os.path.join(self.data_dir, self.split, rel_video_fp)
return (full_video_fp, rel_video_fp)
def _get_caption(self, sample):
return sample[0]
def get_raw_video(self, sample):
(abs_fp, rel_fp) = self._get_video_path(sample)
(videos, idxs, vlen) = read_frames_decord(abs_fp, self.num_frames, mode=self.split)
if (videos is None):
raise Exception('Invalid video!', rel_fp)
else:
return videos
def get_video(self, index, sample):
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return {'video': videos_tensor, 'vid_index': index, 'cap_index': index, 'raw_index': index}
def get_false_video(self, rep):
random_index = random.randint(0, (len(self.metadata) - 1))
sample = self.metadata.iloc[random_index]
videos = self.get_raw_video(sample)
videos_tensor = self.video_aug(videos, self.video_transform)
return {f'false_video_{rep}': videos_tensor}
def get_text(self, raw_index, sample):
text = sample[0]
encoding = self.tokenizer(text, padding='max_length', truncation=True, max_length=self.max_text_len, return_special_tokens_mask=True)
return {'text': (text, encoding), 'vid_index': raw_index, 'cap_index': raw_index, 'raw_index': raw_index}
def get_false_text(self, rep):
random_index = random.randint(0, (len(self.metadata) - 1))
sample = self.metadata.iloc[random_index]
text = sample[0]
encoding = self.tokenizer(text, truncation=True, max_length=self.max_text_len, return_special_tokens_mask=True)
return {f'false_text_{rep}': (text, encoding)}
def get_suite(self, index):
result = None
while (result is None):
sample = self.metadata.iloc[index]
try:
ret = dict()
ret.update(self.get_video(index, sample))
if (not self.video_only):
txt = self.get_text(index, sample)
ret.update({'replica': (True if (txt['cap_index'] > 0) else False)})
ret.update(txt)
for i in range(self.draw_false_video):
ret.update(self.get_false_video(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
print(f'Error while read file idx {sample.name} in {self.names[0]} -> {e}')
index = random.randint(0, (len(self.metadata) - 1))
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index) |
def test_array_by_str_key():
class AClass():
def __init__(self):
self.adict = dict(akey=(7.0 * np.ones((10,))))
def __call__(self, A):
A[...] = self.adict['akey']
aobj = AClass()
arr = np.empty((10,))
aobj(arr)
assert np.allclose(7.0, arr) |
def run_all(sizes={'b': 8, 'h': 16, 'i': 1024, 'j': 512, 'k': 512, 'p': 64, 'u': 4096, 'q': 3, 'v': 2}, output='.'):
runtest('Q', 'phi,ibj->phbj', sizes=sizes, output_dir=output)
runtest('lin1', 'bji,ui->bju', sizes=sizes, output_dir=output)
runtest('lin2', 'bju,iu->bji', sizes=sizes, output_dir=output)
runtest('out', 'phi,phbj->bij', sizes=sizes, output_dir=output)
runtest('QKT', 'phbk,phbj->hbjk', sizes=sizes, output_dir=output)
runtest('gamma', 'phbk,hbjk->phbj', sizes=sizes, output_dir=output)
runtest('QKV-fused', 'qphi,ibj->qphbj', sizes=sizes, output_dir=output)
runtest('KV-fused', 'vphi,ibj->vphbj', sizes=sizes, output_dir=output)
runtest('dWlin2', 'bji,bju->iu', sizes=sizes, output_dir=output)
runtest('dXlin2', 'bji,iu->bju', sizes=sizes, output_dir=output)
runtest('dWlin1', 'bju,bji->ui', sizes=sizes, output_dir=output)
runtest('dXlin1', 'bju,ui->bji', sizes=sizes, output_dir=output)
runtest('dWout', 'phbj,bij->phi', sizes=sizes, output_dir=output)
runtest('dXout', 'phi,bij->phbj', sizes=sizes, output_dir=output)
runtest('dX2gamma', 'phbj,hbjk->phbk', sizes=sizes, output_dir=output)
runtest('dX1gamma', 'phbk,phbj->hbjk', sizes=sizes, output_dir=output)
runtest('dX2QKT', 'phbj,hbjk->phbk', sizes=sizes, output_dir=output)
runtest('dX1QKT', 'phbk,hbjk->phbj', sizes=sizes, output_dir=output)
runtest('dWQ', 'ibj,phbj->phi', sizes=sizes, output_dir=output)
runtest('dXQ', 'phi,phbj->ibj', sizes=sizes, output_dir=output)
runtest('dWQK-fused', 'ibj,vphbj->vphi', sizes=sizes, output_dir=output)
runtest('dXQK-fused', 'vphi,vphbj->ibj', sizes=sizes, output_dir=output)
runtest('dWQKV-fused', 'ibj,qphbj->qphi', sizes=sizes, output_dir=output)
runtest('dXQKV-fused', 'qphi,qphbj->ibj', sizes=sizes, output_dir=output) |
def create_diag_(A, diag):
n = A.size(0)
diag_z = torch.zeros((n - 1))
diag_z[::2] = diag
A_init = torch.diag(diag_z, diagonal=1)
A_init = (A_init - A_init.T)
with torch.no_grad():
A.copy_(A_init)
return A |
class CompositionSpeciesStructure(GenericSpeciesStructure):
def __init__(self, parent, labels, pi, f, gs):
self._partition = pi
GenericSpeciesStructure.__init__(self, parent, labels, [f, gs])
def __repr__(self):
(f, gs) = self._list
return ('F-structure: %s; G-structures: %s' % (repr(f), repr(gs)))
def transport(self, perm):
(f, gs) = self._list
pi = self._partition.transport(perm)
f = f.change_labels(pi._list)
_ = [g.change_labels(part) for (g, part) in zip(gs, pi)]
return self.__class__(self, self._labels, pi, f, gs)
def change_labels(self, labels):
(f, gs) = self._list
pi = self._partition.change_labels(labels)
f = f.change_labels(list(pi))
g = [g.change_labels(part) for (g, part) in zip(gs, pi)]
return self.__class__(self, labels, pi, f, g) |
def encoder(x, reuse=False):
with tf.name_scope('model_xyz'):
x = tflearn.layers.conv.conv_2d(x, 16, (3, 3), strides=1, activation='relu', weight_decay=1e-05, regularizer='L2', reuse=reuse, scope='conv1_1')
x = tflearn.layers.conv.conv_2d(x, 16, (3, 3), strides=1, activation='relu', weight_decay=1e-05, regularizer='L2', reuse=reuse, scope='conv1_2')
x = tflearn.layers.conv.conv_2d(x, 16, (3, 3), strides=2, activation='relu', weight_decay=1e-05, regularizer='L2', reuse=reuse, scope='conv1_3')
x = tflearn.layers.conv.conv_2d(x, 16, (3, 3), strides=1, activation='relu', weight_decay=1e-05, regularizer='L2', reuse=reuse, scope='conv2_1')
x = tflearn.layers.conv.conv_2d(x, 16, (3, 3), strides=1, activation='relu', weight_decay=1e-05, regularizer='L2', reuse=reuse, scope='conv2_2')
x = tflearn.layers.conv.conv_2d(x, 32, (3, 3), strides=2, activation='relu', weight_decay=1e-05, regularizer='L2', reuse=reuse, scope='conv2_3')
x = tflearn.layers.conv.conv_2d(x, 32, (3, 3), strides=1, activation='relu', weight_decay=1e-05, regularizer='L2', reuse=reuse, scope='conv3_1')
x = tflearn.layers.conv.conv_2d(x, 32, (3, 3), strides=1, activation='relu', weight_decay=1e-05, regularizer='L2', reuse=reuse, scope='conv3_2')
x = tflearn.layers.conv.conv_2d(x, 64, (3, 3), strides=2, activation='relu', weight_decay=1e-05, regularizer='L2', reuse=reuse, scope='conv3_3')
return x |
class ValAndGradFn(Protocol[(M, X)]):
def __call__(self, model: M, *inputs: X, **input_kwargs) -> Tuple[(float, M)]:
... |
def autolabel(rects, counts):
for (ii, rect) in enumerate(rects):
height = rect.get_height()
plt.text((rect.get_x() + (rect.get_width() / 2.0)), (1.02 * height), f'{counts[ii]:.2f}', ha='center', va='bottom') |
def evaluate(label_path, result_path, label_split_file, current_class=0, coco=False, score_thresh=(- 1)):
dt_annos = kitti.get_label_annos(result_path)
if (score_thresh > 0):
dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
val_image_ids = _read_imageset_file(label_split_file)
gt_annos = kitti.get_label_annos(label_path, val_image_ids)
if coco:
return get_coco_eval_result(gt_annos, dt_annos, current_class)
else:
return get_official_eval_result(gt_annos, dt_annos, current_class) |
.mujoco
class TestRL2PPO(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.max_path_length = 100
self.meta_batch_size = 10
self.episode_per_task = 4
self.tasks = task_sampler.SetTaskSampler((lambda : RL2Env(env=normalize(HalfCheetahDirEnv()))))
self.env_spec = RL2Env(env=normalize(HalfCheetahDirEnv())).spec
self.policy = GaussianGRUPolicy(env_spec=self.env_spec, hidden_dim=64, state_include_action=False)
self.baseline = LinearFeatureBaseline(env_spec=self.env_spec)
.timeout(120)
def test_rl2_ppo_pendulum(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = RL2PPO(rl2_max_path_length=self.max_path_length, meta_batch_size=self.meta_batch_size, task_sampler=self.tasks, env_spec=self.env_spec, policy=self.policy, baseline=self.baseline, discount=0.99, gae_lambda=0.95, lr_clip_range=0.2, stop_entropy_gradient=True, entropy_method='max', policy_ent_coeff=0.02, center_adv=False, max_path_length=(self.max_path_length * self.episode_per_task))
runner.setup(algo, self.tasks.sample(self.meta_batch_size), sampler_cls=LocalSampler, n_workers=self.meta_batch_size, worker_class=RL2Worker, worker_args=dict(n_paths_per_trial=self.episode_per_task))
last_avg_ret = runner.train(n_epochs=1, batch_size=((self.episode_per_task * self.max_path_length) * self.meta_batch_size))
assert (last_avg_ret > (- 40))
def test_rl2_ppo_pendulum_meta_test(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
meta_evaluator = MetaEvaluator(test_task_sampler=self.tasks, n_exploration_traj=10, n_test_rollouts=10, max_path_length=self.max_path_length, n_test_tasks=1)
algo = RL2PPO(rl2_max_path_length=self.max_path_length, meta_batch_size=self.meta_batch_size, task_sampler=self.tasks, env_spec=self.env_spec, policy=self.policy, baseline=self.baseline, discount=0.99, gae_lambda=0.95, lr_clip_range=0.2, optimizer_args=dict(batch_size=32, max_epochs=10), stop_entropy_gradient=True, entropy_method='max', policy_ent_coeff=0.02, center_adv=False, max_path_length=(self.max_path_length * self.episode_per_task), meta_evaluator=meta_evaluator, n_epochs_per_eval=10)
runner.setup(algo, self.tasks.sample(self.meta_batch_size), sampler_cls=LocalSampler, n_workers=self.meta_batch_size, worker_class=RL2Worker)
last_avg_ret = runner.train(n_epochs=1, batch_size=((self.episode_per_task * self.max_path_length) * self.meta_batch_size))
assert (last_avg_ret > (- 40))
def test_rl2_ppo_pendulum_exploration_policy(self):
with LocalTFRunner(snapshot_config, sess=self.sess):
algo = RL2PPO(rl2_max_path_length=self.max_path_length, meta_batch_size=self.meta_batch_size, task_sampler=self.tasks, env_spec=self.env_spec, policy=self.policy, baseline=self.baseline, discount=0.99, gae_lambda=0.95, lr_clip_range=0.2, optimizer_args=dict(batch_size=32, max_epochs=10), stop_entropy_gradient=True, entropy_method='max', policy_ent_coeff=0.02, center_adv=False, max_path_length=(self.max_path_length * self.episode_per_task))
exploration_policy = algo.get_exploration_policy()
params = exploration_policy.get_param_values()
new_params = np.zeros_like(params)
exploration_policy.set_param_values(new_params)
assert np.array_equal(new_params, exploration_policy.get_param_values())
def test_rl2_ppo_pendulum_adapted_policy(self):
with LocalTFRunner(snapshot_config, sess=self.sess):
algo = RL2PPO(rl2_max_path_length=self.max_path_length, meta_batch_size=self.meta_batch_size, task_sampler=self.tasks, env_spec=self.env_spec, policy=self.policy, baseline=self.baseline, discount=0.99, gae_lambda=0.95, lr_clip_range=0.2, optimizer_args=dict(batch_size=32, max_epochs=10), stop_entropy_gradient=True, entropy_method='max', policy_ent_coeff=0.02, center_adv=False, max_path_length=(self.max_path_length * self.episode_per_task))
exploration_policy = algo.get_exploration_policy()
adapted_policy = algo.adapt_policy(exploration_policy, [])
(params, hidden) = adapted_policy.get_param_values()
expected_new_params = np.zeros_like(params)
expected_hidden = np.zeros_like(hidden)
adapted_policy.set_param_values((expected_new_params, expected_hidden))
(new_params, new_hidden) = adapted_policy.get_param_values()
assert np.array_equal(expected_new_params, new_params)
assert np.array_equal(expected_hidden, new_hidden)
def test_rl2_ppo_pendulum_wrong_worker(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
with pytest.raises(ValueError):
algo = RL2PPO(rl2_max_path_length=self.max_path_length, meta_batch_size=self.meta_batch_size, task_sampler=self.tasks, env_spec=self.env_spec, policy=self.policy, baseline=self.baseline, discount=0.99, gae_lambda=0.95, lr_clip_range=0.2, optimizer_args=dict(batch_size=32, max_epochs=10), stop_entropy_gradient=True, entropy_method='max', policy_ent_coeff=0.02, center_adv=False, max_path_length=(self.max_path_length * self.episode_per_task), flatten_input=False)
runner.setup(algo, self.tasks.sample(self.meta_batch_size), sampler_cls=LocalSampler, n_workers=self.meta_batch_size)
runner.train(n_epochs=10, batch_size=((self.episode_per_task * self.max_path_length) * self.meta_batch_size)) |
def test_complex_with_nan_and_inf():
content = ak.contents.NumpyArray(np.array([(1.1 + 0.1j), 2.2, 3.3, (np.nan + (1j * np.nan)), 5.5, (- np.inf), 7.7, (np.inf + (1j * np.inf)), 9.9]))
assert (ak.operations.to_json(content, complex_record_fields=('r', 'i'), nan_string='Not a number', posinf_string='Inf', neginf_string='-Inf') == '[{"r":1.1,"i":0.1},{"r":2.2,"i":0.0},{"r":3.3,"i":0.0},{"r":"Not a number","i":"Not a number"},{"r":5.5,"i":0.0},{"r":"-Inf","i":0.0},{"r":7.7,"i":0.0},{"r":"Not a number","i":"Inf"},{"r":9.9,"i":0.0}]') |
.parametrize('dtype', [np.float64, np.int64, np.uint8, None])
.parametrize('like_dtype', [np.float64, np.int64, np.uint8, None])
def test_zeros_like(dtype, like_dtype):
array = ak.contents.numpyarray.NumpyArray(np.array([99, 88, 77, 66, 66], dtype=dtype))
full = ak.zeros_like(array.to_typetracer(), dtype=like_dtype, highlevel=False)
assert (full.to_typetracer().shape == array.shape)
assert ((full.to_typetracer().dtype == like_dtype) or array.dtype) |
def passages2text(passages: Union[(str, list, tuple)]) -> str:
if isinstance(passages, str):
return passages
assert (type(passages) in [list, tuple])
if (len(passages) == 0):
return 'N/A'
if (len(passages) == 1):
return f'{passages[0]}'
return '\n'.join([f'[{(idx + 1)}] {txt}' for (idx, txt) in enumerate(passages)]) |
class Generator(nn.Module):
def __init__(self, img_size=256, style_dim=64, max_conv_dim=512, w_hpf=1):
super().__init__()
dim_in = ((2 ** 14) // img_size)
self.img_size = img_size
self.from_rgb = nn.Conv2d(3, dim_in, 3, 1, 1)
self.encode = nn.ModuleList()
self.decode = nn.ModuleList()
self.to_rgb = nn.Sequential(nn.InstanceNorm2d(dim_in, affine=True), nn.LeakyReLU(0.2), nn.Conv2d(dim_in, 3, 1, 1, 0))
self.attention = Attention(style_dim)
repeat_num = (int(np.log2(img_size)) - 4)
if (w_hpf > 0):
repeat_num += 1
for _ in range(repeat_num):
dim_out = min((dim_in * 2), max_conv_dim)
self.encode.append(ResBlk(dim_in, dim_out, normalize=True, downsample=True))
self.decode.insert(0, AdainResBlk(dim_out, dim_in, style_dim, w_hpf=w_hpf, upsample=True))
dim_in = dim_out
for _ in range(2):
self.encode.append(ResBlk(dim_out, dim_out, normalize=True))
self.decode.insert(0, AdainResBlk(dim_out, dim_out, style_dim, w_hpf=w_hpf))
if (w_hpf > 0):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
self.hpf = HighPass(w_hpf, device)
def forward(self, x, s, masks=None):
x = self.from_rgb(x)
cache = {}
for block in self.encode:
if ((masks is not None) and (x.size(2) in [32, 64, 128])):
cache[x.size(2)] = x
x = block(x)
for block in self.decode:
x = block(x, s)
if ((masks is not None) and (x.size(2) in [32, 64, 128])):
mask = (masks[0] if (x.size(2) in [32]) else masks[1])
mask = F.interpolate(mask, size=x.size(2), mode='bilinear')
x = (x + self.hpf((mask * cache[x.size(2)])))
return self.to_rgb(x) |
def get_controller(model_space, session, data_description_len=3, layer_embedding_sharing=None, use_ppo_loss=False, is_training=True):
with tf.device('/cpu:0'):
controller = ZeroShotController(data_description_config={'length': data_description_len, 'hidden_layer': {'units': 16, 'activation': 'relu'}, 'regularizer': {'l1': 1e-08}}, share_embedding=layer_embedding_sharing, model_space=model_space, session=session, with_skip_connection=False, skip_weight=None, lstm_size=128, lstm_num_layers=1, kl_threshold=0.1, train_pi_iter=(100 if (use_ppo_loss is True) else 20), optim_algo='adam', temperature=(1.0 if (is_training is True) else 0.5), tanh_constant=(1.5 if (is_training is True) else None), buffer_type='MultiManager', buffer_size=10, batch_size=10, use_ppo_loss=use_ppo_loss, rescale_advantage_by_reward=True)
return controller |
class BasicUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=(128 + hidden_dim))
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(nn.Conv2d(128, 256, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, (64 * 9), 1, padding=0))
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
mask = (0.25 * self.mask(net))
return (net, mask, delta_flow) |
def _get_info(cls_or_fn):
if isinstance(cls_or_fn, type):
if hasattr(cls_or_fn.__init__, '_autoargs_info'):
return cls_or_fn.__init__._autoargs_info
return {}
else:
if hasattr(cls_or_fn, '_autoargs_info'):
return cls_or_fn._autoargs_info
return {} |
class SawyerPlateSlideV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'puck_pos': obs[3:6], 'shelf_x': obs[(- 3)], 'unused_info': obs[[6, 7, 8, 10, 11]]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.0)
action['grab_effort'] = (- 1.0)
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_puck = (o_d['puck_pos'] + np.array([0.0, (- 0.055), 0.03]))
aligned_with_puck = (np.linalg.norm((pos_curr[:2] - pos_puck[:2])) <= 0.03)
if (not aligned_with_puck):
return (pos_puck + np.array([0.0, 0.0, 0.1]))
elif (abs((pos_curr[2] - pos_puck[2])) > 0.04):
return pos_puck
else:
return np.array([o_d['shelf_x'], 0.9, pos_puck[2]]) |
def compute_sample_covariance(centered_data, sample_size, name):
covariance = tf.matmul(((1.0 / (sample_size - 1.0)) * centered_data), centered_data, transpose_a=True, transpose_b=False)
almost_zero_covariance = tf.fill(tf.shape(covariance), 1e-10)
abs_sum = tf.reduce_sum(tf.abs(covariance))
cond = tf.equal(abs_sum, 0)
covariance = tf.where(cond, almost_zero_covariance, covariance)
covariance = tf_print(covariance, ['compute_sample_covariance', name, covariance])
return covariance |
def process_trace(args):
dir_name = args[0]
trace_path = args[1]
with open(os.path.join(dir_name, trace_path), 'r') as f:
lines = f.readlines()
dir_seq = np.zeros(5000, dtype=np.int8)
time_seq = np.zeros(5000, dtype=np.float32)
label = (0 if ('-' not in trace_path) else (int(trace_path.split('-')[0]) + 1))
total_time = float(lines[(- 1)].split('\t')[0])
total_incoming = 0
total_outgoing = 0
for (packet_num, line) in enumerate(lines):
line = line.split('\t')
curr_time = float(line[0])
curr_dir = np.sign(int(line[1]))
if (packet_num < 5000):
dir_seq[packet_num] = curr_dir
time_seq[packet_num] = curr_time
if (curr_dir == 1):
total_outgoing += 1
elif (curr_dir == (- 1)):
total_incoming += 1
total_packets = (total_incoming + total_outgoing)
if (total_packets == 0):
metadata = np.zeros(7, dtype=np.float32)
else:
metadata = np.array([total_packets, total_incoming, total_outgoing, (total_incoming / total_packets), (total_outgoing / total_packets), total_time, (total_time / total_packets)], dtype=np.float32)
return (dir_seq, time_seq, metadata, label) |
class MilSimPush(Dataset):
def __init__(self, training_size=693, validation_size=76):
super().__init__(name='mil_sim_push', img_shape=(125, 125, 3), state_size=20, action_size=7, time_horizon=100, training_size=training_size, validation_size=validation_size) |
def get_dist_transform_image(image):
canny = cv2.Canny(image, 100, 200)
edges_inv = (255 - canny)
dist_image = cv2.distanceTransform(edges_inv, cv2.DIST_L2, 0)
return dist_image |
def mk_auto_soundness_tempvar(ctx: LeanGenContext, instr: LeanPreprocessedTempVar):
ctx.add_main('-- tempvar')
base_name = instr.identifier.identifier.name
(temp_rewrites, temp_names, _) = process_assert_block(ctx=ctx, asserts=instr.asserts, temp_name_prefix=('tv_' + base_name))
(var_rw, var_type, var_cast) = create_var(ctx, instr)
if (not isinstance(var_type, TypeFelt)):
ctx.rewrite_types[var_rw] = (var_type, var_cast)
name = name_with_sub(base_name, ctx.name_sub)
ctx.add_main(f"have htv_{name}: {name} = {((var_cast + ' mem ') if var_cast else '')}_, {{")
ctx.indent()
expr_simps = LeanExprSimps()
prepare_expr_simps(simps=expr_simps, ctx=ctx, expr=instr.expr, div_to_field=False)
ctx.outdent()
assert_simp = gen_tempvar_assignment(ctx=ctx, var_size=ctx.func.struct_defs.get_type_size(var_type), var_rw=var_rw, assert_rw=temp_rewrites, expr_simps=expr_simps)
ctx.indent()
for line in assert_simp:
ctx.add_main(line)
ctx.outdent()
ctx.add_main(f"clear {' '.join(temp_names)},")
ctx.add_main(f'try {{ dsimp at {var_rw} }}, try {{ arith_simps at {var_rw} }},')
ctx.rewrites += [var_rw, f'htv_{name}']
if (not isinstance(var_type, TypeFelt)):
ctx.rewrite_types[var_rw] = (var_type, var_cast)
add_var_range_checked(ctx=ctx, expr=instr.expr, var_rw=var_rw) |
def custom_draw_geometry_with_optimization(mesh_list, handles, targets, res_path_imgs):
custom_draw_geometry_with_optimization.index = (- 1)
custom_draw_geometry_with_optimization.mesh_list = mesh_list
custom_draw_geometry_with_optimization.rotation_step = 20
os.makedirs(res_path_imgs, exist_ok=True)
out_mesh_o3d = o3d.io.read_triangle_mesh(mesh_list[0])
out_mesh_o3d.compute_vertex_normals()
custom_draw_geometry_with_optimization.curr_mesh = out_mesh_o3d
def rotate_view(vis):
glb = custom_draw_geometry_with_optimization
ctr = vis.get_view_control()
if (glb.index < 0):
ctr.set_front(np.array([0., 0., (- 0.)]))
ctr.set_lookat(np.array([(- 0.), (- 0.), 0.]))
ctr.set_up(np.array([(- 0.), 0., 0.]))
ctr.set_zoom(0.)
curr_fov = ctr.get_field_of_view()
targ_fov = 5
if (curr_fov != targ_fov):
if (curr_fov > targ_fov):
delta = (- (curr_fov - targ_fov))
else:
delta = (targ_fov - curr_fov)
assert ((curr_fov + delta) == targ_fov)
ctr.change_field_of_view(step=delta)
print('bf', curr_fov, 'delta', delta, 'af', ctr.get_field_of_view())
if ((glb.index >= 0) and (glb.index < len(glb.mesh_list))):
print('\tUpdate geometry')
new_mesh = o3d.io.read_triangle_mesh(mesh_list[glb.index])
new_mesh.compute_vertex_normals()
vis.remove_geometry(glb.curr_mesh, reset_bounding_box=False)
vis.add_geometry(new_mesh, reset_bounding_box=False)
glb.curr_mesh = new_mesh
glb.index = (glb.index + 1)
return False
o3d.visualization.draw_geometries_with_animation_callback([out_mesh_o3d, handles, targets], rotate_view, height=1024, width=1024) |
def register_Ns3Ipv6OptionHeader_methods(root_module, cls):
cls.add_constructor([param('ns3::Ipv6OptionHeader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetAlignment', 'ns3::Ipv6OptionHeader::Alignment', [], is_const=True, is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetLength', 'uint8_t', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetType', 'uint8_t', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetLength', 'void', [param('uint8_t', 'length')])
cls.add_method('SetType', 'void', [param('uint8_t', 'type')])
return |
class BeitConfig(PretrainedConfig):
model_type = 'beit'
def __init__(self, vocab_size=8192, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, is_encoder_decoder=False, image_size=224, patch_size=16, num_channels=3, use_mask_token=False, use_absolute_position_embeddings=False, use_relative_position_bias=False, use_shared_relative_position_bias=False, layer_scale_init_value=0.1, drop_path_rate=0.1, use_mean_pooling=True, out_indices=[3, 5, 7, 11], pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, semantic_loss_ignore_index=255, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.use_mask_token = use_mask_token
self.use_absolute_position_embeddings = use_absolute_position_embeddings
self.use_relative_position_bias = use_relative_position_bias
self.use_shared_relative_position_bias = use_shared_relative_position_bias
self.layer_scale_init_value = layer_scale_init_value
self.drop_path_rate = drop_path_rate
self.use_mean_pooling = use_mean_pooling
self.out_indices = out_indices
self.pool_scales = pool_scales
self.use_auxiliary_head = use_auxiliary_head
self.auxiliary_loss_weight = auxiliary_loss_weight
self.auxiliary_channels = auxiliary_channels
self.auxiliary_num_convs = auxiliary_num_convs
self.auxiliary_concat_input = auxiliary_concat_input
self.semantic_loss_ignore_index = semantic_loss_ignore_index |
def test_threshold_synthetic_policy_continuous():
with pytest.raises(ValueError):
context = np.array([1.0, 1.0])
threshold_synthetic_policy_continuous(context=context)
with pytest.raises(ValueError):
context = [1.0, 1.0]
threshold_synthetic_policy_continuous(context=context)
n_rounds = 10
dim_context = 3
context = np.ones([n_rounds, dim_context])
continuous_actions = threshold_synthetic_policy_continuous(context=context)
assert ((continuous_actions.shape[0] == n_rounds) and (continuous_actions.ndim == 1)) |
class Exit(RuntimeError):
__slots__ = ('exit_code',)
def __init__(self, code=0):
self.exit_code = code |
def conv1x1(in_planes, out_planes):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) |
def register_Ns3Ipv4L3ClickProtocol_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Ipv4L3ClickProtocol const &', 'arg0')])
return |
def train_example(loggers, loaders, model, optimizer, scheduler, datasets, **kwargs):
start_epoch = 0
if cfg.train.auto_resume:
start_epoch = load_ckpt(model, optimizer, scheduler)
if (start_epoch == cfg.optim.max_epoch):
logging.info('Checkpoint found, Task already done')
else:
logging.info('Start from epoch {}'.format(start_epoch))
num_splits = len(loggers)
for cur_epoch in range(start_epoch, cfg.optim.max_epoch):
train_epoch(loggers[0], model, optimizer, scheduler, datasets[0], train=True)
loggers[0].write_epoch(cur_epoch)
if is_eval_epoch(cur_epoch):
for i in range(1, num_splits):
train_epoch(loggers[i], model, optimizer, scheduler, datasets[i], train=False)
if is_ckpt_epoch(cur_epoch):
save_ckpt(model, optimizer, scheduler, cur_epoch)
for logger in loggers:
logger.close()
if cfg.train.ckpt_clean:
clean_ckpt()
logging.info('Task done, results saved in {}'.format(cfg.out_dir)) |
class TestJaccard(unittest.TestCase):
def setUp(self):
pass
def test_similarity(self):
a = [1, 2, 3, 4]
b = []
c = [1, 2]
d = [5, 6]
self.assertAlmostEqual(jaccard(a, b), 0.0)
self.assertAlmostEqual(jaccard(a, a), 1.0)
self.assertAlmostEqual(jaccard(a, c), 0.5)
self.assertAlmostEqual(jaccard(c, d), 0.0) |
def top_n_error_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, axis=None, n=1):
dy = grad_inputs[0]
x0 = inputs[0]
raise NotImplementedError('top_n_error_backward is not implemented.') |
class Partition2(nn.Module):
LAYER_SCOPES = ['Net/BatchNorm1d[bn1]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:2'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [[1, 1, 1, 1]]
self.lookup = {'l_0': 'bn1'}
self.to(self.device)
def forward(self, *args):
x0 = unflatten(args, self.input_structure)[0]
t_0 = torch.cat(x0, dim=(- 1))
t_0 = self.l_0(t_0)
t_0 = torch.nn.functional.leaky_relu(t_0, negative_slope=0.01, inplace=False)
return (t_0,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
class SVGPModel(gpytorch.models.ApproximateGP):
def __init__(self, initial_inducing, initial_lengthscale):
variational_distribution = gpytorch.variational.NaturalVariationalDistribution(initial_inducing.size(0))
variational_strategy = gpytorch.variational.VariationalStrategy(self, initial_inducing, variational_distribution, learn_inducing_locations=True)
super().__init__(variational_strategy)
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=initial_inducing.size(1)))
self.covar_module.base_kernel.lengthscale = initial_lengthscale
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x) |
.parametrize('seed', [412])
.parametrize('batch_size', [2, 4])
.parametrize('grid_size', [2, 8])
.parametrize('feature_size', [4])
.parametrize('m, M', [((- 1.0), 1.0)])
def test_query_on_voxel_double_backward(seed, batch_size, grid_size, feature_size, m, M):
nn.clear_parameters()
ctx = get_extension_context('cudnn', device_id='0')
nn.set_default_context(ctx)
B = batch_size
G = grid_size
D = feature_size
rng = np.random.RandomState(seed)
query_data = (m + (rng.rand(batch_size, 3) * (M - m)))
initializer_data = (rng.randn(G, G, G, D) * 0.01)
query_data0 = query_data.astype(np.float32)
initializer_data0 = initializer_data.astype(np.float32)
query0 = nn.Variable.from_numpy_array(query_data0).apply(need_grad=True)
feature0 = nn.parameter.get_parameter_or_create('F0', (G, G, G, D), initializer_data0)
output0 = query_on_voxel_composite(query0, feature0, m, M)
query_data1 = query_data.astype(np.float32)
initializer_data1 = initializer_data.astype(np.float32)
query1 = nn.Variable.from_numpy_array(query_data1).apply(need_grad=True)
feature1 = nn.parameter.get_parameter_or_create('F1', (G, G, G, D), initializer_data1)
output1 = F.cosine_query_on_voxel(query1, feature1, ([m] * 3), ([M] * 3))
ograd = rng.randn(*output0.shape).astype(np.float32)
ograd0 = nn.Variable.from_numpy_array(ograd).apply(need_grad=True, persistent=True)
ograd1 = nn.Variable.from_numpy_array(ograd).apply(need_grad=True, persistent=True)
grad_query0 = nn.grad([output0], [query0], grad_outputs=[ograd0])[0]
grad_query1 = nn.grad([output1], [query1], grad_outputs=[ograd1])[0]
F.sink(*[grad_query0]).forward(clear_no_need_grad=True)
F.sink(*[grad_query1]).forward(clear_no_need_grad=True)
np.testing.assert_allclose(grad_query0.d, grad_query1.d, atol=1e-06)
query0.grad.fill(0)
query1.grad.fill(0)
feature0.grad.fill(0)
feature1.grad.fill(0)
ograd0.grad.fill(0)
ograd1.grad.fill(0)
o0 = F.sum((grad_query0 ** 2))
o1 = F.sum((grad_query1 ** 2))
o0.forward(clear_no_need_grad=True)
o1.forward(clear_no_need_grad=True)
ograd = rng.randn()
o0.backward(ograd, clear_buffer=True)
o1.backward(ograd, clear_buffer=True)
np.testing.assert_allclose(ograd0.g, ograd1.g, atol=0.001)
np.testing.assert_allclose(feature0.g, feature1.g, atol=0.001) |
class Basic2DBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size):
super(Basic2DBlock, self).__init__()
self.block = nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=1, padding=((kernel_size - 1) // 2)), nn.BatchNorm2d(out_planes), nn.ReLU(True))
def forward(self, x):
return self.block(x) |
class MAMLPPO(MAML):
def __init__(self, env, policy, baseline, inner_lr=_Default(0.1), outer_lr=0.001, lr_clip_range=0.5, max_path_length=100, discount=0.99, gae_lambda=1.0, center_adv=True, positive_adv=False, policy_ent_coeff=0.0, use_softplus_entropy=False, stop_entropy_gradient=False, entropy_method='no_entropy', meta_batch_size=20, num_grad_updates=1):
inner_algo = PPO(env.spec, policy, baseline, optimizer=torch.optim.Adam, policy_lr=inner_lr, lr_clip_range=lr_clip_range, max_path_length=max_path_length, num_train_per_epoch=1, discount=discount, gae_lambda=gae_lambda, center_adv=center_adv, positive_adv=positive_adv, policy_ent_coeff=policy_ent_coeff, use_softplus_entropy=use_softplus_entropy, stop_entropy_gradient=stop_entropy_gradient, entropy_method=entropy_method)
super().__init__(inner_algo=inner_algo, env=env, policy=policy, baseline=baseline, meta_optimizer=torch.optim.Adam, meta_batch_size=meta_batch_size, inner_lr=inner_lr, outer_lr=outer_lr, num_grad_updates=num_grad_updates) |
class WeightedIntegerVectors(Parent, UniqueRepresentation):
def __classcall_private__(cls, n=None, weight=None):
if (weight is None):
if (n is None):
raise ValueError('the weights must be specified')
if (n in ZZ):
weight = (n,)
else:
weight = tuple(n)
n = None
weight = tuple(weight)
if (n is None):
return WeightedIntegerVectors_all(weight)
return super().__classcall__(cls, n, weight)
def __init__(self, n, weight):
self._n = n
self._weights = weight
Parent.__init__(self, category=FiniteEnumeratedSets())
Element = IntegerVector
def _element_constructor_(self, lst):
if isinstance(lst, IntegerVector):
if (lst.parent() is self):
return lst
if (lst not in self):
raise ValueError(('cannot convert %s into %s' % (lst, self)))
return self.element_class(self, lst)
def _repr_(self):
return ('Integer vectors of %s weighted by %s' % (self._n, list(self._weights)))
def __contains__(self, x):
if (not isinstance(x, (list, IntegerVector, Permutation))):
return False
if (len(self._weights) != len(x)):
return False
s = 0
for (i, val) in enumerate(x):
if ((not isinstance(val, (int, Integer))) and (val not in ZZ)):
return False
s += (x[i] * self._weights[i])
return (s == self._n)
def _recfun(self, n, l):
w = l[(- 1)]
l = l[:(- 1)]
if (l == []):
d = (int(n) // int(w))
if ((n % w) == 0):
(yield [d])
return
for d in range((int(n) // int(w)), (- 1), (- 1)):
for x in self._recfun((n - (d * w)), l):
(yield (x + [d]))
def __iter__(self):
if (not self._weights):
if (self._n == 0):
(yield self.element_class(self, []))
return
perm = Word(self._weights).standard_permutation()
perm = [(len(self._weights) - i) for i in perm]
l = [x for x in sorted(self._weights, reverse=True)]
for x in iterator_fast(self._n, l):
(yield self.element_class(self, [x[i] for i in perm])) |
class TestDetectionConfig(unittest.TestCase):
def test_serialization(self):
this_dir = os.path.dirname(os.path.abspath(__file__))
cfg_name = 'detection_cvpr_2019'
config_path = os.path.join(this_dir, '..', 'configs', (cfg_name + '.json'))
with open(config_path) as f:
cfg = json.load(f)
detect_cfg = DetectionConfig.deserialize(cfg)
self.assertEqual(cfg, detect_cfg.serialize())
recovered = DetectionConfig.deserialize(json.loads(json.dumps(detect_cfg.serialize())))
self.assertEqual(detect_cfg, recovered) |
class BaseOverSampler(BaseSampler):
_sampling_type = 'over-sampling'
_sampling_strategy_docstring = "sampling_strategy : float, str, dict or callable, default='auto'\n Sampling information to resample the data set.\n\n - When ``float``, it corresponds to the desired ratio of the number of\n samples in the minority class over the number of samples in the\n majority class after resampling. Therefore, the ratio is expressed as\n :math:`\\alpha_{os} = N_{rm} / N_{M}` where :math:`N_{rm}` is the\n number of samples in the minority class after resampling and\n :math:`N_{M}` is the number of samples in the majority class.\n\n .. warning::\n ``float`` is only available for **binary** classification. An\n error is raised for multi-class classification.\n\n - When ``str``, specify the class targeted by the resampling. The\n number of samples in the different classes will be equalized.\n Possible choices are:\n\n ``'minority'``: resample only the minority class;\n\n ``'not minority'``: resample all classes but the minority class;\n\n ``'not majority'``: resample all classes but the majority class;\n\n ``'all'``: resample all classes;\n\n ``'auto'``: equivalent to ``'not majority'``.\n\n - When ``dict``, the keys correspond to the targeted classes. The\n values correspond to the desired number of samples for each targeted\n class.\n\n - When callable, function taking ``y`` and returns a ``dict``. The keys\n correspond to the targeted classes. The values correspond to the\n desired number of samples for each class.\n ".strip()
_parameter_constraints: dict = {'sampling_strategy': [Interval(numbers.Real, 0, 1, closed='right'), StrOptions({'auto', 'minority', 'not minority', 'not majority', 'all'}), Mapping, callable], 'random_state': ['random_state']} |
def create_pipeline_configuration(DEBUG=False, batch_size=64):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (StatelessEmbedding, Linear, T5Block, Dropout, CrossEntropyLoss, T5LayerNorm), 'model_inputs': {'attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0, 1, 2, 3, 4, 5, 6, 7]}, 'decoder_attention_mask': {'shape': torch.Size([64, 1, 4, 4]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [8, 9, 10, 11, 12, 13, 14, 15]}, 'decoder_input_ids': {'shape': torch.Size([64, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([64, 64]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'inverted_encoder_attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [8, 9, 10, 11, 12, 13, 14, 15]}, 'lm_labels': {'shape': torch.Size([64, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [15]}}, 'model_outputs': {'T5ForConditionalGeneration/CrossEntropyLoss[lm_loss]': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 15}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([64, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([64, 64]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_1': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[2]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 15}, 1: {'stage_cls': Partition1, 'inputs': {'attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_1': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[2]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_2': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[5]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 14}, 2: {'stage_cls': Partition2, 'inputs': {'attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_2': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[5]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_3': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[8]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 13}, 3: {'stage_cls': Partition3, 'inputs': {'attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_3': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[8]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_4': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[11]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 12}, 4: {'stage_cls': Partition4, 'inputs': {'attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_4': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[11]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_5': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[14]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 11}, 5: {'stage_cls': Partition5, 'inputs': {'attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_5': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[14]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_6': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[17]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 10}, 6: {'stage_cls': Partition6, 'inputs': {'attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_6': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[17]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_7': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[20]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 9}, 7: {'stage_cls': Partition7, 'inputs': {'attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_7': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[20]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[23]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 8}, 8: {'stage_cls': Partition8, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([64, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[23]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 7}, 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_9': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_9': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[2]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}}, 'devices': [('cpu' if DEBUG else 'cuda:8')], 'stage_depth': 7}, 9: {'stage_cls': Partition9, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([64, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_9': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_9': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[2]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_10': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_10': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}}, 'devices': [('cpu' if DEBUG else 'cuda:9')], 'stage_depth': 6}, 10: {'stage_cls': Partition10, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([64, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_10': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_10': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_11': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_11': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}}, 'devices': [('cpu' if DEBUG else 'cuda:10')], 'stage_depth': 5}, 11: {'stage_cls': Partition11, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([64, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_11': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_11': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_12': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_12': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[11]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}}, 'devices': [('cpu' if DEBUG else 'cuda:11')], 'stage_depth': 4}, 12: {'stage_cls': Partition12, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([64, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_12': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_12': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[11]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_13': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_13': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[14]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}}, 'devices': [('cpu' if DEBUG else 'cuda:12')], 'stage_depth': 3}, 13: {'stage_cls': Partition13, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([64, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_13': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_13': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[14]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_14': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_14': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}}, 'devices': [('cpu' if DEBUG else 'cuda:13')], 'stage_depth': 2}, 14: {'stage_cls': Partition14, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([64, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_14': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_14': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_15': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_15': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[20]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}}, 'devices': [('cpu' if DEBUG else 'cuda:14')], 'stage_depth': 1}, 15: {'stage_cls': Partition15, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([64, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'lm_labels': {'shape': torch.Size([64, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_15': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_15': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[20]': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}}, 'outputs': {'T5ForConditionalGeneration/CrossEntropyLoss[lm_loss]': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:15')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config |
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None, problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if (self.context is not None):
lines.append(self.context)
if ((self.context_mark is not None) and ((self.problem is None) or (self.problem_mark is None) or (self.context_mark.name != self.problem_mark.name) or (self.context_mark.line != self.problem_mark.line) or (self.context_mark.column != self.problem_mark.column))):
lines.append(str(self.context_mark))
if (self.problem is not None):
lines.append(self.problem)
if (self.problem_mark is not None):
lines.append(str(self.problem_mark))
if (self.note is not None):
lines.append(self.note)
return '\n'.join(lines) |
def get_all_in_parens(sequence):
if (sequence[(- 1)] == ';'):
sequence = sequence[:(- 1)]
if (not ('(' in sequence)):
return []
if ((sequence[0] == '(') and (sequence[(- 1)] == ')')):
in_parens = sequence[1:(- 1)]
return ([in_parens] + get_all_in_parens(in_parens))
else:
paren_subseqs = []
current_seq = []
num_parens = 0
in_parens = False
for token in sequence:
if in_parens:
current_seq.append(token)
if (token == ')'):
num_parens -= 1
if (num_parens == 0):
in_parens = False
paren_subseqs.append(current_seq)
current_seq = []
elif (token == '('):
in_parens = True
current_seq.append(token)
if (token == '('):
num_parens += 1
all_subseqs = []
for subseq in paren_subseqs:
all_subseqs.extend(get_all_in_parens(subseq))
return all_subseqs |
def test():
net = preactresnet34()
y = net(Variable(torch.randn(1, 3, 32, 32)))
print(y.size()) |
class FindPeaks(Benchmark):
param_names = ['distance']
params = [[None, 8, 64, 512, 4096]]
def setup(self, distance):
self.x = electrocardiogram()
def time_find_peaks(self, distance):
find_peaks(self.x, distance=distance) |
def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
passkwargs = {k: v for (k, v) in kwargs.items() if (v is not np._NoValue)}
if (type(obj) is not mu.ndarray):
try:
reduction = getattr(obj, method)
except AttributeError:
pass
else:
if (dtype is not None):
return reduction(axis=axis, dtype=dtype, out=out, **passkwargs)
else:
return reduction(axis=axis, out=out, **passkwargs)
return ufunc.reduce(obj, axis, dtype, out, **passkwargs) |
def lexical_diversity(sorted_A: List[str], sorted_B: List[str], top_p: float=0.2, num_samples: int=4, max_gap=None):
(sorted_A, sorted_B) = (deepcopy(sorted_A), deepcopy(sorted_B))
a_candidates = []
b_candidates = []
if (max_gap is None):
max_gap = ((num_samples // 4) + 1)
reordered_A = re_order(sorted_A, top_p)
reordered_B = re_order(sorted_B, top_p)
(a_words_count, b_words_count) = (defaultdict(int), defaultdict(int))
(cur_A_pointer, cur_B_pointer) = (0, 0)
for _ in range(num_samples):
while (cur_A_pointer < len(reordered_A)):
sample_A = reordered_A[cur_A_pointer]
cur_A_pointer += 1
word_set_A = get_word_set_of_sample(sample_A)
add_A_flg = True
for word in word_set_A:
if ((a_words_count[word] - b_words_count[word]) >= max_gap):
add_A_flg = False
break
if add_A_flg:
a_candidates.append(sample_A)
for word in word_set_A:
a_words_count[word] += 1
break
while (cur_B_pointer < len(reordered_B)):
sample_B = reordered_B[cur_B_pointer]
cur_B_pointer += 1
word_set_B = get_word_set_of_sample(sample_B)
add_B_flg = True
for word in word_set_B:
if ((b_words_count[word] - a_words_count[word]) >= max_gap):
add_B_flg = False
if add_B_flg:
b_candidates.append(sample_B)
for word in word_set_B:
b_words_count[word] += 1
break
return (a_candidates, b_candidates) |
def agent(config: Config, workspace: Workspace) -> Agent:
ai_config = AIConfig(ai_name='Base', ai_role='A base AI', ai_goals=[])
command_registry = CommandRegistry()
ai_config.command_registry = command_registry
config.set_memory_backend('json_file')
memory_json_file = get_memory(config, init=True)
system_prompt = ai_config.construct_full_prompt()
return Agent(ai_name=ai_config.ai_name, memory=memory_json_file, command_registry=command_registry, ai_config=ai_config, config=config, next_action_count=0, system_prompt=system_prompt, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, workspace_directory=workspace.root) |
def create_model_from_pretrained(model_name: str, pretrained: str, precision: str='fp32', device: Union[(str, torch.device)]='cpu', jit: bool=False, force_quick_gelu: bool=False, force_custom_clip: bool=False, force_patch_dropout: Optional[float]=None, return_transform: bool=True, image_mean: Optional[Tuple[(float, ...)]]=None, image_std: Optional[Tuple[(float, ...)]]=None, cache_dir: Optional[str]=None, is_frozen: bool=False):
if ((not is_pretrained_cfg(model_name, pretrained)) and (not os.path.exists(pretrained))):
raise RuntimeError(f'{pretrained} is not a valid pretrained cfg or checkpoint for {model_name}. Use open_clip.list_pretrained() to find one.')
model = create_model(model_name, pretrained, precision=precision, device=device, jit=jit, force_quick_gelu=force_quick_gelu, force_custom_clip=force_custom_clip, force_patch_dropout=force_patch_dropout, cache_dir=cache_dir)
if is_frozen:
for param in model.parameters():
param.requires_grad = False
if (not return_transform):
return model
image_mean = (image_mean or getattr(model.visual, 'image_mean', None))
image_std = (image_std or getattr(model.visual, 'image_std', None))
preprocess = image_transform(model.visual.image_size, is_train=False, mean=image_mean, std=image_std)
return (model, preprocess) |
class SketchEncoder(nn.Module):
def __init__(self):
super(SketchEncoder, self).__init__()
self.embed_dim = ENCODER_CONFIG['embed_dim']
self.coord_embed_x = Embedder(((2 ** CAD_BIT) + SKETCH_PAD), self.embed_dim)
self.coord_embed_y = Embedder(((2 ** CAD_BIT) + SKETCH_PAD), self.embed_dim)
self.pixel_embeds = Embedder((((2 ** CAD_BIT) * (2 ** CAD_BIT)) + SKETCH_PAD), self.embed_dim)
self.pos_embed = PositionalEncoding(max_len=MAX_CAD, d_model=self.embed_dim)
layers = TransformerEncoderLayerImproved(d_model=self.embed_dim, nhead=ENCODER_CONFIG['num_heads'], dim_feedforward=ENCODER_CONFIG['hidden_dim'], dropout=ENCODER_CONFIG['dropout_rate'])
self.encoder = TransformerEncoder(layers, ENCODER_CONFIG['num_layers'], LayerNorm(self.embed_dim))
def forward(self, pixel, coord, mask):
coord_embed = (self.coord_embed_x(coord[(..., 0)]) + self.coord_embed_y(coord[(..., 1)]))
pixel_embed = self.pixel_embeds(pixel)
embed_inputs = (pixel_embed + coord_embed)
input_embeds = self.pos_embed(embed_inputs.transpose(0, 1))
outputs = self.encoder(src=input_embeds, src_key_padding_mask=mask)
return outputs.transpose(0, 1) |
class WarmupConfig():
epoch: int = 1
multiplier: int = 1
buffer_epoch: int = 0
min_lr: float = 0.0
mode: str = 'fix'
peak_lr: float = 0.0001
start_from_zero: bool = True |
def plot_things(lines, scatters, filename):
(fig, ax) = plt.subplots(nrows=len(lines), figsize=(12, 12))
for i in range(len(lines)):
lines_tp = lines[i]
for (j, _) in enumerate(lines_tp):
(x, y, label, color) = lines_tp[j]
ax[i].plot(x, y, label=label, c=color)
for i in range(len(scatters)):
scatters_tp = scatters[i]
for (j, _) in enumerate(scatters_tp):
(x, y, label, marker) = scatters_tp[j]
ax[i].scatter(x, y, label=label, marker=marker)
for i in range(max(len(scatters), len(lines))):
ax[i].legend(loc='upper right')
ax[i].grid()
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
plt.close(fig) |
def _test_compiled_functions():
def func(a: ti.types.ndarray(ti.types.vector(n=10, dtype=ti.i32))):
for i in range(5):
for j in range(4):
a[i][(j * j)] = (j * j)
v = ti.Vector.ndarray(10, ti.i32, 5)
func(v)
assert (impl.get_runtime().get_num_compiled_functions() == 1)
v = np.zeros((6, 10), dtype=np.int32)
func(v)
assert (impl.get_runtime().get_num_compiled_functions() == 1) |
def run_validate(args):
logging.info('Running validate.')
num_files = len(args.filenames)
if (num_files == 1):
(task,) = args.filenames
domain = util.find_domain_filename(task)
elif (num_files == 2):
(domain, task) = args.filenames
else:
returncodes.exit_with_driver_input_error('validate needs one or two PDDL input files.')
plan_files = list(PlanManager(args.plan_file).get_existing_plans())
if (not plan_files):
print('Not running validate since no plans found.')
return (0, True)
validate_inputs = ([domain, task] + plan_files)
try:
call.check_call('validate', ([VALIDATE] + validate_inputs), time_limit=args.validate_time_limit, memory_limit=args.validate_memory_limit)
except OSError as err:
if (err.errno == errno.ENOENT):
returncodes.exit_with_driver_input_error('Error: {} not found. Is it on the PATH?'.format(VALIDATE))
else:
returncodes.exit_with_driver_critical_error(err)
else:
return (0, True) |
def main():
parser = argparse.ArgumentParser(description='OGBN-Products (GraphSAINT)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--inductive', action='store_true')
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--batch_size', type=int, default=20000)
parser.add_argument('--walk_length', type=int, default=3)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--num_steps', type=int, default=30)
parser.add_argument('--epochs', type=int, default=20)
parser.add_argument('--eval_steps', type=int, default=2)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = (f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu')
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-products')
split_idx = dataset.get_idx_split()
data = dataset[0]
for (key, idx) in split_idx.items():
mask = torch.zeros(data.num_nodes, dtype=torch.bool)
mask[idx] = True
data[f'{key}_mask'] = mask
sampler_data = data
if args.inductive:
sampler_data = to_inductive(data)
loader = GraphSAINTRandomWalkSampler(sampler_data, batch_size=args.batch_size, walk_length=args.walk_length, num_steps=args.num_steps, sample_coverage=0, save_dir=dataset.processed_dir)
model = SAGE(data.x.size((- 1)), args.hidden_channels, dataset.num_classes, args.num_layers, args.dropout).to(device)
subgraph_loader = NeighborSampler(data.edge_index, sizes=[(- 1)], batch_size=4096, shuffle=False, num_workers=12)
evaluator = Evaluator(name='ogbn-products')
logger = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, (1 + args.epochs)):
loss = train(model, loader, optimizer, device)
if ((epoch % args.log_steps) == 0):
print(f'Run: {(run + 1):02d}, Epoch: {epoch:02d}, Loss: {loss:.4f}')
if ((epoch > 9) and ((epoch % args.eval_steps) == 0)):
result = test(model, data, evaluator, subgraph_loader, device)
logger.add_result(run, result)
(train_acc, valid_acc, test_acc) = result
print(f'Run: {(run + 1):02d}, Epoch: {epoch:02d}, Train: {(100 * train_acc):.2f}%, Valid: {(100 * valid_acc):.2f}% Test: {(100 * test_acc):.2f}%')
logger.add_result(run, result)
logger.print_statistics(run)
logger.print_statistics() |
def parse_args(*arg_descriptors):
def decorator(fn):
fn._arg_descriptors = arg_descriptors
def wrapper(g, *args, **kwargs):
assert (len(arg_descriptors) >= len(args))
args = [_parse_arg(arg, arg_desc) for (arg, arg_desc) in zip(args, arg_descriptors)]
assert (len(kwargs) <= 1)
if (len(kwargs) == 1):
assert ('_outputs' in kwargs)
return fn(g, *args, **kwargs)
try:
wrapper = wraps(fn)(wrapper)
except Exception:
pass
return wrapper
return decorator |
def compute_s_test(n_gpu: int, device: torch.device, model: torch.nn.Module, test_inputs: Dict[(str, torch.Tensor)], train_data_loaders: List[torch.utils.data.DataLoader], params_filter: Optional[List[str]], weight_decay: Optional[float], weight_decay_ignores: Optional[List[str]], damp: float, scale: float, num_samples: Optional[int]=None, verbose: bool=True) -> List[torch.FloatTensor]:
v = compute_gradients(model=model, n_gpu=n_gpu, device=device, inputs=test_inputs, params_filter=params_filter, weight_decay=weight_decay, weight_decay_ignores=weight_decay_ignores)
last_estimate = list(v).copy()
cumulative_num_samples = 0
with tqdm(total=num_samples) as pbar:
for data_loader in train_data_loaders:
for (i, inputs) in enumerate(data_loader):
this_estimate = compute_hessian_vector_products(model=model, n_gpu=n_gpu, device=device, vectors=last_estimate, inputs=inputs, params_filter=params_filter, weight_decay=weight_decay, weight_decay_ignores=weight_decay_ignores)
with torch.no_grad():
new_estimate = [((a + ((1 - damp) * b)) - (c / scale)) for (a, b, c) in zip(v, last_estimate, this_estimate)]
pbar.update(1)
if (verbose is True):
new_estimate_norm = new_estimate[0].norm().item()
last_estimate_norm = last_estimate[0].norm().item()
estimate_norm_diff = (new_estimate_norm - last_estimate_norm)
pbar.set_description(f'{new_estimate_norm:.2f} | {estimate_norm_diff:.2f}')
cumulative_num_samples += 1
last_estimate = new_estimate
if ((num_samples is not None) and (i > num_samples)):
break
inverse_hvp = [(X / scale) for X in last_estimate]
if (cumulative_num_samples not in [num_samples, (num_samples + 2)]):
raise ValueError(f'cumulative_num_samples={cumulative_num_samples} fbut num_samples={num_samples}: Untested Territory')
return inverse_hvp |
class FlaxBigBirdPreTrainedModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class CompilerDirectivesNode(Node):
child_attrs = ['body']
def analyse_declarations(self, env):
old = env.directives
env.directives = self.directives
self.body.analyse_declarations(env)
env.directives = old
def analyse_expressions(self, env):
old = env.directives
env.directives = self.directives
self.body = self.body.analyse_expressions(env)
env.directives = old
return self
def generate_function_definitions(self, env, code):
env_old = env.directives
code_old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_function_definitions(env, code)
env.directives = env_old
code.globalstate.directives = code_old
def generate_execution_code(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_execution_code(code)
code.globalstate.directives = old
def annotate(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.annotate(code)
code.globalstate.directives = old |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.