code stringlengths 101 5.91M |
|---|
class RLArh_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, rla_channel=32, SE=False, ECA_size=None, groups=1, base_width=64, dilation=1, norm_layer=None, reduction=16):
super(RLArh_Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.averagePooling = None
if ((downsample is not None) and (stride != 1)):
self.averagePooling = nn.AvgPool2d((2, 2), stride=(2, 2))
self.se = None
if SE:
self.se = SELayer((planes * self.expansion), reduction)
self.eca = None
if (ECA_size != None):
self.eca = eca_layer((planes * self.expansion), int(ECA_size))
def forward(self, x, h):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.se != None):
out = self.se(out)
if (self.eca != None):
out = self.eca(out)
y = out
if (self.downsample is not None):
identity = self.downsample(identity)
if (self.averagePooling is not None):
h = self.averagePooling(h)
out += identity
out = self.relu(out)
return (out, y, h, identity) |
def test_digits_lazy():
model = FeatureBasedSelection(100, 'sqrt', optimizer='lazy')
model.fit(X_digits, sample_cost=X_digits_costs)
assert_array_equal(model.ranking, digits_ranking)
assert_array_almost_equal(model.gains, digits_gains, 4)
assert_less_equal(sum(X_digits_costs[model.ranking]), 100) |
def average(a, b):
if (type(a) is not Expr):
a = Expr(a)
if (type(b) is not Expr):
b = Expr(b)
'Expr average(Expr a, Expr b)'
assert (a.type() == b.type())
if a.type().is_float():
return ((a + b) / 2)
narrow = a.type()
wider = narrow.with_bits((narrow.bits() * 2))
a = cast(wider, a)
b = cast(wider, b)
return cast(narrow, ((a + b) / 2)) |
class BehaviorAgent(BasicAgent):
def __init__(self, vehicle, behavior='normal'):
super(BehaviorAgent, self).__init__(vehicle)
self._look_ahead_steps = 0
self._speed = 0
self._speed_limit = 0
self._direction = None
self._incoming_direction = None
self._incoming_waypoint = None
self._min_speed = 5
self._behavior = None
self._sampling_resolution = 4.5
if (behavior == 'cautious'):
self._behavior = Cautious()
elif (behavior == 'normal'):
self._behavior = Normal()
elif (behavior == 'aggressive'):
self._behavior = Aggressive()
def _update_information(self):
self._speed = get_speed(self._vehicle)
self._speed_limit = self._vehicle.get_speed_limit()
self._local_planner.set_speed(self._speed_limit)
self._direction = self._local_planner.target_road_option
if (self._direction is None):
self._direction = RoadOption.LANEFOLLOW
self._look_ahead_steps = int((self._speed_limit / 10))
(self._incoming_waypoint, self._incoming_direction) = self._local_planner.get_incoming_waypoint_and_direction(steps=self._look_ahead_steps)
if (self._incoming_direction is None):
self._incoming_direction = RoadOption.LANEFOLLOW
def traffic_light_manager(self):
actor_list = self._world.get_actors()
lights_list = actor_list.filter('*traffic_light*')
(affected, _) = self._affected_by_traffic_light(lights_list)
return affected
def _tailgating(self, waypoint, vehicle_list):
left_turn = waypoint.left_lane_marking.lane_change
right_turn = waypoint.right_lane_marking.lane_change
left_wpt = waypoint.get_left_lane()
right_wpt = waypoint.get_right_lane()
(behind_vehicle_state, behind_vehicle, _) = self._vehicle_obstacle_detected(vehicle_list, max(self._behavior.min_proximity_threshold, (self._speed_limit / 2)), up_angle_th=180, low_angle_th=160)
if (behind_vehicle_state and (self._speed < get_speed(behind_vehicle))):
if (((right_turn == carla.LaneChange.Right) or (right_turn == carla.LaneChange.Both)) and ((waypoint.lane_id * right_wpt.lane_id) > 0) and (right_wpt.lane_type == carla.LaneType.Driving)):
(new_vehicle_state, _, _) = self._vehicle_obstacle_detected(vehicle_list, max(self._behavior.min_proximity_threshold, (self._speed_limit / 2)), up_angle_th=180, lane_offset=1)
if (not new_vehicle_state):
print('Tailgating, moving to the right!')
end_waypoint = self._local_planner.target_waypoint
self._behavior.tailgate_counter = 200
self.set_destination(end_waypoint.transform.location, right_wpt.transform.location)
elif ((left_turn == carla.LaneChange.Left) and ((waypoint.lane_id * left_wpt.lane_id) > 0) and (left_wpt.lane_type == carla.LaneType.Driving)):
(new_vehicle_state, _, _) = self._vehicle_obstacle_detected(vehicle_list, max(self._behavior.min_proximity_threshold, (self._speed_limit / 2)), up_angle_th=180, lane_offset=(- 1))
if (not new_vehicle_state):
print('Tailgating, moving to the left!')
end_waypoint = self._local_planner.target_waypoint
self._behavior.tailgate_counter = 200
self.set_destination(end_waypoint.transform.location, left_wpt.transform.location)
def collision_and_car_avoid_manager(self, waypoint):
vehicle_list = self._world.get_actors().filter('*vehicle*')
def dist(v):
return v.get_location().distance(waypoint.transform.location)
vehicle_list = [v for v in vehicle_list if ((dist(v) < 45) and (v.id != self._vehicle.id))]
if (self._direction == RoadOption.CHANGELANELEFT):
(vehicle_state, vehicle, distance) = self._vehicle_obstacle_detected(vehicle_list, max(self._behavior.min_proximity_threshold, (self._speed_limit / 2)), up_angle_th=180, lane_offset=(- 1))
elif (self._direction == RoadOption.CHANGELANERIGHT):
(vehicle_state, vehicle, distance) = self._vehicle_obstacle_detected(vehicle_list, max(self._behavior.min_proximity_threshold, (self._speed_limit / 2)), up_angle_th=180, lane_offset=1)
else:
(vehicle_state, vehicle, distance) = self._vehicle_obstacle_detected(vehicle_list, max(self._behavior.min_proximity_threshold, (self._speed_limit / 3)), up_angle_th=30)
if ((not vehicle_state) and (self._direction == RoadOption.LANEFOLLOW) and (not waypoint.is_junction) and (self._speed > 10) and (self._behavior.tailgate_counter == 0)):
self._tailgating(waypoint, vehicle_list)
return (vehicle_state, vehicle, distance)
def pedestrian_avoid_manager(self, waypoint):
walker_list = self._world.get_actors().filter('*walker.pedestrian*')
def dist(w):
return w.get_location().distance(waypoint.transform.location)
walker_list = [w for w in walker_list if (dist(w) < 10)]
if (self._direction == RoadOption.CHANGELANELEFT):
(walker_state, walker, distance) = self._vehicle_obstacle_detected(walker_list, max(self._behavior.min_proximity_threshold, (self._speed_limit / 2)), up_angle_th=90, lane_offset=(- 1))
elif (self._direction == RoadOption.CHANGELANERIGHT):
(walker_state, walker, distance) = self._vehicle_obstacle_detected(walker_list, max(self._behavior.min_proximity_threshold, (self._speed_limit / 2)), up_angle_th=90, lane_offset=1)
else:
(walker_state, walker, distance) = self._vehicle_obstacle_detected(walker_list, max(self._behavior.min_proximity_threshold, (self._speed_limit / 3)), up_angle_th=60)
return (walker_state, walker, distance)
def car_following_manager(self, vehicle, distance, debug=False):
vehicle_speed = get_speed(vehicle)
delta_v = max(1, ((self._speed - vehicle_speed) / 3.6))
ttc = ((distance / delta_v) if (delta_v != 0) else (distance / np.nextafter(0.0, 1.0)))
if (self._behavior.safety_time > ttc > 0.0):
target_speed = min([positive((vehicle_speed - self._behavior.speed_decrease)), self._behavior.max_speed, (self._speed_limit - self._behavior.speed_lim_dist)])
self._local_planner.set_speed(target_speed)
control = self._local_planner.run_step(debug=debug)
elif ((2 * self._behavior.safety_time) > ttc >= self._behavior.safety_time):
target_speed = min([max(self._min_speed, vehicle_speed), self._behavior.max_speed, (self._speed_limit - self._behavior.speed_lim_dist)])
self._local_planner.set_speed(target_speed)
control = self._local_planner.run_step(debug=debug)
else:
target_speed = min([self._behavior.max_speed, (self._speed_limit - self._behavior.speed_lim_dist)])
self._local_planner.set_speed(target_speed)
control = self._local_planner.run_step(debug=debug)
return control
def run_step(self, debug=False):
self._update_information()
control = None
if (self._behavior.tailgate_counter > 0):
self._behavior.tailgate_counter -= 1
ego_vehicle_loc = self._vehicle.get_location()
ego_vehicle_wp = self._map.get_waypoint(ego_vehicle_loc)
if self.traffic_light_manager():
return self.emergency_stop()
(walker_state, walker, w_distance) = self.pedestrian_avoid_manager(ego_vehicle_wp)
if walker_state:
distance = ((w_distance - max(walker.bounding_box.extent.y, walker.bounding_box.extent.x)) - max(self._vehicle.bounding_box.extent.y, self._vehicle.bounding_box.extent.x))
if (distance < self._behavior.braking_distance):
return self.emergency_stop()
(vehicle_state, vehicle, distance) = self.collision_and_car_avoid_manager(ego_vehicle_wp)
if vehicle_state:
distance = ((distance - max(vehicle.bounding_box.extent.y, vehicle.bounding_box.extent.x)) - max(self._vehicle.bounding_box.extent.y, self._vehicle.bounding_box.extent.x))
if (distance < self._behavior.braking_distance):
return self.emergency_stop()
else:
control = self.car_following_manager(vehicle, distance)
elif (self._incoming_waypoint.is_junction and (self._incoming_direction in [RoadOption.LEFT, RoadOption.RIGHT])):
target_speed = min([self._behavior.max_speed, (self._speed_limit - 5)])
self._local_planner.set_speed(target_speed)
control = self._local_planner.run_step(debug=debug)
else:
target_speed = min([self._behavior.max_speed, (self._speed_limit - self._behavior.speed_lim_dist)])
self._local_planner.set_speed(target_speed)
control = self._local_planner.run_step(debug=debug)
return control
def emergency_stop(self):
control = carla.VehicleControl()
control.throttle = 0.0
control.brake = self._max_brake
control.hand_brake = False
return control |
def plot_loss(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if (title is not None):
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training data', 'Validation data'], loc=0) |
def trainsample(sample, model, optimizer, criterion=nn.BCELoss()):
model.train()
model.zero_grad()
(output, label_tensor) = model(sample)
loss = criterion(output, label_tensor)
loss.backward()
optimizer.step()
return (output, loss.item()) |
def ExtractCommandName(command_string):
command = ' '.join(command_string.split()[2:])
end_position = FindOpenParanthesisPosition(command)
if (end_position is not None):
command = command[:end_position]
command = ':'.join(command.split(':')[1:]).strip()
return command |
_model
def vit_large_r50_s32_224(pretrained=False, **kwargs):
backbone = _resnetv2((3, 4, 6, 3), **kwargs)
model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer_hybrid('vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs)
return model |
def get_negative_images(all_images, image_names, num_neg_images):
random_numbers = np.arange(len(all_images))
np.random.shuffle(random_numbers)
if (int(num_neg_images) > (len(all_images) - 1)):
num_neg_images = (len(all_images) - 1)
neg_count = 0
negative_images = []
for random_number in list(random_numbers):
if (all_images[random_number] not in image_names):
negative_images.append(all_images[random_number])
neg_count += 1
if (neg_count > (int(num_neg_images) - 1)):
break
return negative_images |
def loss_batch(model: nn.Module, xb: Tensor, yb: Tensor, loss_func: OptLossFunc=None, opt: OptOptimizer=None, cb_handler: Optional[CallbackHandler]=None, count: [int]=[1], batch_multiplier: int=1) -> Tuple[Union[(Tensor, int, float, str)]]:
cb_handler = ifnone(cb_handler, CallbackHandler())
if (not is_listy(xb)):
xb = [xb]
if (not is_listy(yb)):
yb = [yb]
out = model(*xb)
if (not loss_func):
return (to_detach(out), yb[0].detach())
out = cb_handler.on_loss_begin(out)
loss = (loss_func(out, *yb) / batch_multiplier)
count[0] -= 1
if (opt is not None):
(loss, skip_bwd) = cb_handler.on_backward_begin(loss)
if (not skip_bwd):
loss.backward()
if (count[0] == 0):
if (not cb_handler.on_backward_end()):
opt.step()
if (not cb_handler.on_step_end()):
opt.zero_grad()
count[0] = batch_multiplier
return loss.detach().cpu() |
def multiprocess(keys):
pool = Pool(4)
r = pool.map_async(run, keys)
r.wait()
pool.close()
pool.join()
data = r.get()
return data |
def mol2graph_with_substructures(mols: Union[List[str]], args) -> BatchMolGraphWithSubstructures:
return BatchMolGraphWithSubstructures([MolGraphWithSubstructures(mol, args) for mol in mols], args=args) |
class AccuracyValidator(object):
def __init__(self, **kwargs):
validation_set_image_dir = '/path/to/your/validation/set/directory'
validation_set_label_file_path = '/path/to/imagenet_groundtruth_labels.txt'
black_list_file_path = '/path/to/imagenet_blacklist.txt'
imagenet_classes_file = '/path/to/imagenet_classes.txt'
self._imagenet_classes = [line.rstrip('\n') for line in open(imagenet_classes_file)]
imagenet_classes_map = {}
for idx in range(len(self._imagenet_classes)):
imagenet_classes_map[self._imagenet_classes[idx]] = idx
black_list = [int(line.rstrip('\n')) for line in open(black_list_file_path)]
self._samples = []
self._labels = [0]
self._correct_count = 0
for img_file in os.listdir(validation_set_image_dir):
if img_file.endswith('.JPEG'):
img_id = int(os.path.splitext(img_file)[0].split('_')[(- 1)])
if (img_id not in black_list):
self._samples.append(os.path.join(validation_set_image_dir, img_file))
for label in open(validation_set_label_file_path):
label = label.rstrip('\n')
self._labels.append(imagenet_classes_map[label])
def sample_size(self):
return len(self._samples)
def batch_size(self):
return 1
def preprocess(self, sample_idx_start, sample_idx_end, **kwargs):
inputs = {}
batch_sample_data = []
sample_idx_end = min(sample_idx_end, self.sample_size())
for sample_idx in range(sample_idx_start, sample_idx_end):
sample_file_path = self._samples[sample_idx]
sample_img = Image.open(sample_file_path).resize((224, 224))
sample_data = np.asarray(sample_img, dtype=np.float32)
sample_data = (((2.0 / 255.0) * sample_data) - 1.0)
batch_sample_data.append(sample_data.tolist())
inputs['input'] = batch_sample_data
return inputs
def postprocess(self, sample_idx_start, sample_idx_end, output_map, **kwargs):
output = output_map['MobilenetV2/Predictions/Reshape_1']
sample_idx_end = min(sample_idx_end, self.sample_size())
batch_size = (sample_idx_end - sample_idx_start)
output = np.array(output).reshape((batch_size, (- 1)))
output = np.argmax(output, axis=(- 1))
output_idx = 0
for sample_idx in range(sample_idx_start, sample_idx_end):
sample_file_path = self._samples[sample_idx]
img_id = int(os.path.splitext(sample_file_path)[0].split('_')[(- 1)])
if (output[output_idx] == self._labels[img_id]):
self._correct_count += 1
else:
print(img_id, ('predict %s vs gt %s' % (self._imagenet_classes[output[output_idx]], self._imagenet_classes[self._labels[img_id]])))
output_idx += 1
def result(self):
print('')
print(('Top 1 accuracy: %f' % ((self._correct_count * 1.0) / self.sample_size())))
print('') |
class DualData25(Dataset):
def __init__(self, list_file, root='', num_patches=20, for_train=False, transforms='', return_target=True, crop=True, sample_size=25, sub_sample_size=19, target_size=19):
(paths, names) = ([], [])
with open(list_file) as f:
for line in f:
line = line.strip()
name = line.split('/')[(- 1)]
names.append(name)
path = os.path.join(root, line, (name + '_'))
paths.append(path)
self.root = root
self.names = names
self.paths = paths
self.crop = crop
self.num_patches = num_patches
self.for_train = for_train
self.return_target = return_target
self.sample_size = sample_size
self.sub_sample_size = sub_sample_size
self.target_size = target_size
self.suffix = '{}x{}x{}_'.format(sample_size, sample_size, sample_size)
self.all_coords = get_all_coords(target_size)
self.shape = np.ceil((np.array(_shape, dtype='float32') / target_size)).astype('int')
self.transforms = eval((transforms or 'Identity()'))
def __getitem__(self, index):
path = self.paths[index]
(images, label) = pkload((path + 'data_f32.pkl'))
(images, label) = (torch.from_numpy(images), torch.from_numpy(label))
mask = np.load((path + 'HarvardOxford-sub.npy'))
mask = torch.from_numpy(mask)
if (not self.crop):
(images, label, mask) = (images.unsqueeze(0), label.unsqueeze(0), mask.unsqueeze(0))
(images, label, mask) = self.transforms([images, label, mask])
(images, label, mask) = (images.squeeze(0), label.squeeze(0), mask.squeeze(0))
images = images.permute(3, 0, 1, 2).contiguous()
return ((images, self.all_coords, mask), label)
if self.for_train:
(fg, bg) = pkload(((path + self.suffix) + 'coords.pkl'))
coords = torch.cat([sample(x, (self.num_patches // 2)) for x in (fg, bg)])
else:
coords = self.all_coords
samples = multicrop.crop3d_cpu(images, coords, self.sample_size, self.sample_size, self.sample_size, 1, False)
sub_samples = multicrop.crop3d_cpu(images, coords, self.sub_sample_size, self.sub_sample_size, self.sub_sample_size, 3, False)
mask_id = multicrop.crop3d_cpu(mask, coords, self.sample_size, self.sample_size, self.sample_size, 1, False)
sub_mask_id = multicrop.crop3d_cpu(mask, coords, self.sub_sample_size, self.sub_sample_size, self.sub_sample_size, 3, False)
if self.return_target:
target = multicrop.crop3d_cpu(label, coords, self.target_size, self.target_size, self.target_size, 1, False)
(samples, sub_samples, mask_id, sub_mask_id, target) = self.transforms([samples, sub_samples, mask_id, sub_mask_id, target])
else:
(samples, sub_samples, mask_id, sub_mask_id) = self.transforms([samples, sub_samples, mask_id, sub_mask_id])
target = coords
if self.for_train:
label = _zero
samples = samples.permute(0, 4, 1, 2, 3).contiguous()
sub_samples = sub_samples.permute(0, 4, 1, 2, 3).contiguous()
return ((samples, sub_samples, target, mask_id, sub_mask_id), label)
def __len__(self):
return len(self.names)
def collate(self, batch):
(data, label) = list(zip(*batch))
data = [torch.cat(v) for v in zip(*data)]
label = torch.cat(label)
if self.for_train:
perm = torch.randperm(data[0].shape[0])
data = [t[perm] for t in data]
return (data, label) |
class BaseCascadeDecodeHead(BaseDecodeHead, metaclass=ABCMeta):
def __init__(self, *args, **kwargs):
super(BaseCascadeDecodeHead, self).__init__(*args, **kwargs)
def forward(self, inputs, prev_output):
pass
def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, train_cfg):
seg_logits = self.forward(inputs, prev_output)
losses = self.losses(seg_logits, gt_semantic_seg)
return losses
def forward_test(self, inputs, prev_output, img_metas, test_cfg):
return self.forward(inputs, prev_output) |
class nnUNetTrainerV2_Loss_DicewithBG(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16)
self.loss = SoftDiceLoss(**{'apply_nonlin': softmax_helper, 'batch_dice': self.batch_dice, 'smooth': 1e-05, 'do_bg': True}) |
def parse_hhr(hhr_string: str) -> Sequence[TemplateHit]:
lines = hhr_string.splitlines()
block_starts = [i for (i, line) in enumerate(lines) if line.startswith('No ')]
hits = []
if block_starts:
block_starts.append(len(lines))
for i in range((len(block_starts) - 1)):
hits.append(_parse_hhr_hit(lines[block_starts[i]:block_starts[(i + 1)]]))
return hits |
class RandomFourierApproximation(torch.nn.Module):
def __init__(self, kernel: RandomSpectralKernel):
super().__init__()
self.kernel = kernel
self.weights_real = self.sample_weights()
self.weights_imag = self.sample_weights()
def sample_weights(self):
return torch.randn(self.kernel.manifold.order, dtype=dtype, device=device)
def resample(self):
self.weights_real = self.sample_weights()
self.weights_imag = self.sample_weights()
self.kernel.manifold.generate_lb_eigenspaces(self.kernel.measure)
def forward(self, x):
x_ = self.kernel.manifold.to_group(x)
embedding = (self.kernel.manifold.lb_eigenspaces(x_) * torch.sqrt(torch.abs(self.kernel.measure.variance[0])))
sample_real = torch.einsum('nm,m->n', embedding.real, self.weights_real)
sample_imag = torch.einsum('nm,m->n', embedding.imag, self.weights_imag)
sample = ((sample_real - sample_imag) / sqrt(self.kernel.normalizer))
return sample
def _cov(self, x, y):
(x_, y_) = (self.kernel.manifold.to_group(x), self.kernel.manifold.to_group(y))
(x_embed, y_embed) = (self.kernel.manifold.lb_eigenspaces(x_), self.kernel.manifold.lb_eigenspaces(y_))
return ((torch.abs(self.kernel.measure.variance[0]) * (x_embed torch.conj(y_embed).T).real) / self.kernel.normalizer) |
def load_newcrfs_net(scene: str, max_depth: Optional[float]=None) -> nn.Module:
if (scene not in SCENES):
raise ValueError(f'Invalid NeWCRFs model. ({scene} vs. {SCENES})')
max_depth = (max_depth or (10 if (scene == 'indoor') else 80))
if (max_depth <= 0):
raise ValueError(f'Max depth must be a positive number. Got {max_depth}.')
net = nn.DataParallel(NewCRFDepth(version='large07', inv_depth=False, max_depth=max_depth, pretrained=None))
ckpt_file = (PATHS['newcrfs_indoor'] if (scene == 'indoor') else PATHS['newcrfs_outdoor'])
ckpt = torch.load(ckpt_file, map_location='cpu')
net.load_state_dict(ckpt['model'])
net = net.eval()
for p in net.parameters():
p.requires_grad = False
return net |
def count(dict, inp):
for s in inp:
for w in s:
if (w in dict):
dict[w] += 1
else:
dict[w] = 1
return dict |
class TFBertTokenizer(metaclass=DummyObject):
_backends = ['tensorflow_text']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tensorflow_text']) |
def eval_model(test_images, test_images_label, model, data_transforms):
pred_list = []
pred_fname = []
with torch.no_grad():
for (im, im_list) in tqdm(test_images.items()):
td = WSIDataloader(im_list, transform=data_transforms)
tdl = torch.utils.data.DataLoader(td, batch_size=128, shuffle=False, num_workers=0)
(t_pred, _) = compute_attn_df(tdl, model)
pred_list.append(t_pred)
pred_fname.append(im)
pred_df = pd.DataFrame({'wsi': pred_fname, 'prediction': pred_list})
pred_df['actual'] = pred_df['wsi'].apply((lambda x: test_images_label[x]))
print('Test Accuracy: ', (sum((pred_df['actual'] == pred_df['prediction'])) / pred_df.shape[0]))
label_map = {0: 'Normal', 1: 'Disease'}
actual_label = pd.Series([label_map[x] for x in pred_df['actual'].tolist()], name='Actual')
predicted_label = pd.Series([label_map[x] for x in pred_list], name='Predicted')
print(pd.crosstab(actual_label, predicted_label))
return (sum((pred_df['actual'] == pred_df['prediction'])) / pred_df.shape[0]) |
def _edge_dict_to_H(edge_dict):
n_nodes = len(edge_dict)
H = np.zeros(shape=(n_nodes, n_nodes))
for (center_id, adj_list) in enumerate(edge_dict):
H[(center_id, center_id)] = 1.0
for adj_id in adj_list:
H[(adj_id, center_id)] = 1.0
return H |
def triply_nested_spec(doubly_nested_spec: specs.Spec) -> specs.Spec[TriplyNested]:
return specs.Spec(TriplyNested, 'TriplyNestedSpec', doubly_nested=doubly_nested_spec, bounded_array=specs.BoundedArray((7, 9), jnp.int32, 0, 6), discrete_array=specs.DiscreteArray(5, jnp.int32)) |
class Prototypes(nn.Module):
def __init__(self, fdim, num_classes, temp=0.05):
super().__init__()
self.prototypes = nn.Linear(fdim, num_classes, bias=False)
self.temp = temp
def forward(self, x):
x = F.normalize(x, p=2, dim=1)
out = self.prototypes(x)
out = (out / self.temp)
return out |
_grad()
def add_noise_on_isr(img_self_res, transform_type='noise+blur'):
if ('blur' in transform_type):
if (torch.rand(1) < 0.5):
raw_size = img_self_res.shape[1:]
blur_kernel_size = 2
img_self_res = F.avg_pool2d(img_self_res[None], kernel_size=(blur_kernel_size, blur_kernel_size))
img_self_res = F.interpolate(img_self_res, size=raw_size, mode='bilinear', align_corners=False)[0]
if ('noise' in transform_type):
disappear_mask_threshold = (1.0, 1.5)
random_mask_threshold = (0.4, 0.6)
noise_intensity = (0.1, 0.3)
disappear_mask_threshold = torch.empty(size=(1,)).uniform_(*disappear_mask_threshold).item()
disappear_mask = (torch.abs(torch.randn_like(img_self_res)) < disappear_mask_threshold)
img_self_res = (img_self_res * disappear_mask)
random_mask_threshold = torch.empty(size=(1,)).uniform_(*random_mask_threshold).item()
noise_intensity = torch.empty(size=(1,)).uniform_(*noise_intensity).item()
random_mask = (torch.abs(torch.randn_like(img_self_res)) < random_mask_threshold)
img_self_res = (img_self_res + ((torch.randn_like(img_self_res) * noise_intensity) * random_mask))
img_self_res = torch.clamp(img_self_res, min=(- 1), max=1)
return img_self_res |
class VGGLoss(nn.Module):
def __init__(self, gpu_ids):
super(VGGLoss, self).__init__()
self.vgg = VGG19().cuda()
self.criterion = nn.L1Loss()
self.weights = [(1.0 / 32), (1.0 / 16), (1.0 / 8), (1.0 / 4), 1.0]
def forward(self, x, y):
(x_vgg, y_vgg) = (self.vgg(x), self.vgg(y))
loss = 0
for i in range(len(x_vgg)):
loss += (self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()))
return loss |
_tf
class TestTFActivations(unittest.TestCase):
def test_get_activation(self):
get_tf_activation('swish')
get_tf_activation('silu')
get_tf_activation('gelu')
get_tf_activation('relu')
get_tf_activation('tanh')
get_tf_activation('gelu_new')
get_tf_activation('gelu_fast')
get_tf_activation('mish')
get_tf_activation('quick_gelu')
get_tf_activation('glu')
with self.assertRaises(KeyError):
get_tf_activation('bogus')
with self.assertRaises(KeyError):
get_tf_activation(None) |
_optimizer('adam')
class FairseqAdam(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = ((not getattr(args, 'use_old_adam', False)) and (fused_adam_cls is not None) and torch.cuda.is_available())
if use_fused_adam:
logger.info('using FusedAdam')
self._optimizer = fused_adam_cls(params, **self.optimizer_config)
else:
self._optimizer = Adam(params, **self.optimizer_config)
def add_args(parser):
parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer')
parser.add_argument('--adam-eps', type=float, default=1e-08, metavar='D', help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--use-old-adam', action='store_true', default=False, help='Use fairseq.optim.adam.Adam')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay}
def average_params(self):
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for (_, value) in state_dict['state'].items():
value['exp_avg'] /= total_gpus
value['exp_avg_sq'] /= total_gpus
dist.all_reduce(value['exp_avg'], op=dist.ReduceOp.SUM)
dist.all_reduce(value['exp_avg_sq'], op=dist.ReduceOp.SUM) |
_known_failing
def test_for_loop_literal_binding():
run_cell('a = 0')
run_cell('b = 1')
run_cell('c = 2')
run_cell('\n for i in [a, b, c]:\n pass\n ')
run_cell('a = 3')
run_cell('logging.info(i)')
assert_not_detected('`i` should not depend on `a` at end of for loop') |
class MaskedActionsModel(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
action_mask = input_dict['obs']['action_mask']
if (num_outputs != action_mask.shape[1].value):
raise ValueError('This model assumes num outputs is equal to max avail actions', num_outputs, action_mask)
last_layer = input_dict['obs']['obs']
hiddens = options.get('fcnet_hiddens')
for (i, size) in enumerate(hiddens):
label = 'fc{}'.format(i)
last_layer = slim.fully_connected(last_layer, size, weights_initializer=normc_initializer(1.0), activation_fn=tf.nn.tanh, scope=label)
action_logits = slim.fully_connected(last_layer, num_outputs, weights_initializer=normc_initializer(0.01), activation_fn=None, scope='fc_out')
inf_mask = tf.maximum(tf.log(action_mask), tf.float32.min)
masked_logits = (inf_mask + action_logits)
return (masked_logits, last_layer) |
def tree_prod(tree1: T, tree2: T) -> T:
return jax.tree_map((lambda a, b: (a * b)), tree1, tree2) |
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from mmdet.version import __version__
sha = __version__.split('+')[(- 1)]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha |
class Plot():
def __init__(self, title: str='Pareto front approximation', reference_front: List[S]=None, reference_point: list=None, axis_labels: list=None):
self.plot_title = title
self.axis_labels = axis_labels
if (reference_point and (not isinstance(reference_point[0], list))):
reference_point = [reference_point]
self.reference_point = reference_point
self.reference_front = reference_front
self.dimension = None
def get_points(solutions: List[S]) -> Tuple[(pd.DataFrame, int)]:
if (solutions is None):
raise Exception('Front is none!')
points = pd.DataFrame(list((solution.objectives for solution in solutions)))
return (points, points.shape[1])
def plot(self, front, label='', normalize: bool=False, filename: str=None, format: str='eps'):
if (not isinstance(front[0], list)):
front = [front]
if (not isinstance(label, list)):
label = [label]
if (len(front) != len(label)):
raise Exception('Number of fronts and labels must be the same')
dimension = front[0][0].number_of_objectives
if (dimension == 2):
self.two_dim(front, label, filename, format)
elif (dimension == 3):
self.three_dim(front, label, filename, format)
else:
self.pcoords(front, normalize, filename, format)
def two_dim(self, fronts: List[list], labels: List[str]=None, filename: str=None, format: str='eps'):
n = int(np.ceil(np.sqrt(len(fronts))))
fig = plt.figure()
fig.suptitle(self.plot_title, fontsize=16)
reference = None
if self.reference_front:
(reference, _) = self.get_points(self.reference_front)
for (i, _) in enumerate(fronts):
(points, _) = self.get_points(fronts[i])
ax = fig.add_subplot(n, n, (i + 1))
points.plot(kind='scatter', x=0, y=1, ax=ax, s=10, color='#236FA4', alpha=1.0)
if labels:
ax.set_title(labels[i])
if self.reference_front:
reference.plot(x=0, y=1, ax=ax, color='k', legend=False)
if self.reference_point:
for point in self.reference_point:
plt.plot([point[0]], [point[1]], marker='o', markersize=5, color='r')
plt.axvline(x=point[0], color='r', linestyle=':')
plt.axhline(y=point[1], color='r', linestyle=':')
if self.axis_labels:
plt.xlabel(self.axis_labels[0])
plt.ylabel(self.axis_labels[1])
if filename:
_filename = ((filename + '.') + format)
plt.savefig(_filename, format=format, dpi=1000)
logger.info('Figure {_filename} saved to file')
else:
plt.show()
plt.close(fig=fig)
def three_dim(self, fronts: List[list], labels: List[str]=None, filename: str=None, format: str='eps'):
n = int(np.ceil(np.sqrt(len(fronts))))
fig = plt.figure()
fig.suptitle(self.plot_title, fontsize=16)
for (i, _) in enumerate(fronts):
ax = fig.add_subplot(n, n, (i + 1), projection='3d')
ax.scatter([s.objectives[0] for s in fronts[i]], [s.objectives[1] for s in fronts[i]], [s.objectives[2] for s in fronts[i]])
if labels:
ax.set_title(labels[i])
if self.reference_front:
ax.scatter([s.objectives[0] for s in self.reference_front], [s.objectives[1] for s in self.reference_front], [s.objectives[2] for s in self.reference_front])
if self.reference_point:
pass
ax.relim()
ax.autoscale_view(True, True, True)
ax.view_init(elev=30.0, azim=15.0)
ax.locator_params(nbins=4)
if filename:
_filename = ((filename + '.') + format)
plt.savefig(_filename, format=format, dpi=1000)
logger.info('Figure {_filename} saved to file')
else:
plt.show()
plt.close(fig=fig)
def pcoords(self, fronts: List[list], normalize: bool=False, filename: str=None, format: str='eps'):
n = int(np.ceil(np.sqrt(len(fronts))))
fig = plt.figure()
fig.suptitle(self.plot_title, fontsize=16)
for (i, _) in enumerate(fronts):
(points, _) = self.get_points(fronts[i])
if normalize:
points = ((points - points.min()) / (points.max() - points.min()))
ax = fig.add_subplot(n, n, (i + 1))
(min_, max_) = (points.values.min(), points.values.max())
points['scale'] = ((np.linspace(0, 1, len(points)) * (max_ - min_)) + min_)
pd.plotting.parallel_coordinates(points, 'scale', ax=ax)
ax.get_legend().remove()
if self.axis_labels:
ax.set_xticklabels(self.axis_labels)
if filename:
plt.savefig(((filename + '.') + format), format=format, dpi=1000)
else:
plt.show()
plt.close(fig=fig) |
def test_digits_log_two_stage_init():
model = FeatureBasedSelection(100, 'log', optimizer='two-stage', initial_subset=digits_log_ranking[:5])
model.fit(X_digits)
assert_array_equal(model.ranking[:(- 5)], digits_log_ranking[5:])
assert_array_almost_equal(model.gains[:(- 5)], digits_log_gains[5:], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
class Pooling(nn.Module):
def __init__(self, word_embedding_dimension: int, pooling_mode_cls_token: bool=False, pooling_mode_max_tokens: bool=False, pooling_mode_mean_tokens: bool=True, pooling_mode_mean_sqrt_len_tokens: bool=False):
super(Pooling, self).__init__()
self.config_keys = ['word_embedding_dimension', 'pooling_mode_cls_token', 'pooling_mode_mean_tokens', 'pooling_mode_max_tokens', 'pooling_mode_mean_sqrt_len_tokens']
self.word_embedding_dimension = word_embedding_dimension
self.pooling_mode_cls_token = pooling_mode_cls_token
self.pooling_mode_mean_tokens = pooling_mode_mean_tokens
self.pooling_mode_max_tokens = pooling_mode_max_tokens
self.pooling_mode_mean_sqrt_len_tokens = pooling_mode_mean_sqrt_len_tokens
pooling_mode_multiplier = sum([pooling_mode_cls_token, pooling_mode_max_tokens, pooling_mode_mean_tokens, pooling_mode_mean_sqrt_len_tokens])
self.pooling_output_dimension = (pooling_mode_multiplier * word_embedding_dimension)
def forward(self, features: Dict[(str, Tensor)]):
token_embeddings = features['token_embeddings']
cls_token = features['cls_token_embeddings']
input_mask = features['input_mask']
output_vectors = []
if self.pooling_mode_cls_token:
output_vectors.append(cls_token)
if self.pooling_mode_max_tokens:
input_mask_expanded = input_mask.unsqueeze((- 1)).expand(token_embeddings.size()).float()
token_embeddings[(input_mask_expanded == 0)] = (- .0)
max_over_time = torch.max(token_embeddings, 1)[0]
output_vectors.append(max_over_time)
if (self.pooling_mode_mean_tokens or self.pooling_mode_mean_sqrt_len_tokens):
input_mask_expanded = input_mask.unsqueeze((- 1)).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum((token_embeddings * input_mask_expanded), 1)
if ('token_weights_sum' in features):
sum_mask = features['token_weights_sum'].unsqueeze((- 1)).expand(sum_embeddings.size())
else:
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-09)
if self.pooling_mode_mean_tokens:
output_vectors.append((sum_embeddings / sum_mask))
if self.pooling_mode_mean_sqrt_len_tokens:
output_vectors.append((sum_embeddings / torch.sqrt(sum_mask)))
output_vector = torch.cat(output_vectors, 1)
features.update({'sentence_embedding': output_vector})
return features
def get_sentence_embedding_dimension(self):
return self.pooling_output_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return Pooling(**config) |
class ConvBNReLU(nn.Module):
def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs):
super(ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_chan, out_chan, kernel_size=ks, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(out_chan)
self.init_weight()
def forward(self, x):
x = self.conv(x)
x = F.relu(self.bn(x))
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if (not (ly.bias is None)):
nn.init.constant_(ly.bias, 0) |
def build_model(model_opt, opt, fields, checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt, opt, fields, use_gpu(opt), checkpoint)
drqa_model = None
if (opt.enable_rl_after >= 0):
vocab = torch.load(opt.drqa_vocab_path)
json_config = json.load(open(opt.drqa_config_path, 'r'))
args = SimpleNamespace(**json_config)
drqa_model = DrQA(vocab, args)
drqa_model.load(opt.drqa_param_path)
drqa_model.gpu = False
if use_gpu(opt):
drqa_model = drqa_model.cuda()
drqa_model.gpu = True
model.set_drqa_model(drqa_model)
logger.info(model)
return model |
class PodMeta(BaseScaleSpec):
def __init__(self, name, id, type, rank_index, service, resource: ContainerResourceSpec):
self.name = name
self.id = id
self.type = type
self.rank_index = rank_index
self.service = service
self.resource = resource
def to_dict(self):
spec = {}
spec['name'] = self.name
spec['type'] = self.type
spec['id'] = self.id
spec['rankIndex'] = self.rank_index
spec['service'] = self.service
spec['resource'] = self.resource.to_dict()
return spec |
class FlaxRobertaForCausalLM(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class Bond():
def __init__(self, t0=(- 1), t1=(- 1)):
self.t0 = t0
self.t1 = t1
def __str__(self):
return '({0},{1})'.format(self.t0, self.t1)
def isFree(self):
return ((self.t0 < 0) or (self.t1 < 0))
def connect(self, tensor_index):
assert self.isFree(), 'edge already connected to two tensors'
if (self.t0 < 0):
self.t0 = tensor_index
else:
assert (not (self.t0 == tensor_index)), 'edge connects to the same tensor'
self.t1 = tensor_index
def has(self, tensor_index):
return ((self.t0 == tensor_index) or (self.t1 == tensor_index)) |
class TestEventWriter(unittest.TestCase):
def testScalar(self):
with tempfile.TemporaryDirectory(prefix='detectron2_tests') as dir, EventStorage() as storage:
json_file = os.path.join(dir, 'test.json')
writer = JSONWriter(json_file)
for k in range(60):
storage.put_scalar('key', k, smoothing_hint=False)
if (((k + 1) % 20) == 0):
writer.write()
storage.step()
writer.close()
with open(json_file) as f:
data = [json.loads(l) for l in f]
self.assertTrue(([int(k['key']) for k in data] == [19, 39, 59]))
def testScalarMismatchedPeriod(self):
with tempfile.TemporaryDirectory(prefix='detectron2_tests') as dir, EventStorage() as storage:
json_file = os.path.join(dir, 'test.json')
writer = JSONWriter(json_file)
for k in range(60):
if ((k % 17) == 0):
storage.put_scalar('key2', k, smoothing_hint=False)
storage.put_scalar('key', k, smoothing_hint=False)
if (((k + 1) % 20) == 0):
writer.write()
storage.step()
writer.close()
with open(json_file) as f:
data = [json.loads(l) for l in f]
self.assertTrue(([int(k.get('key2', 0)) for k in data] == [17, 0, 34, 0, 51, 0]))
self.assertTrue(([int(k.get('key', 0)) for k in data] == [0, 19, 0, 39, 0, 59]))
self.assertTrue(([int(k['iteration']) for k in data] == [17, 19, 34, 39, 51, 59]))
def testPrintETA(self):
with EventStorage() as s:
p1 = CommonMetricPrinter(10)
p2 = CommonMetricPrinter()
s.put_scalar('time', 1.0)
s.step()
s.put_scalar('time', 1.0)
s.step()
with self.assertLogs('detectron2.utils.events') as logs:
p1.write()
self.assertIn('eta', logs.output[0])
with self.assertLogs('detectron2.utils.events') as logs:
p2.write()
self.assertNotIn('eta', logs.output[0])
def testPrintNonLosses(self):
with EventStorage() as s:
p1 = CommonMetricPrinter(10)
p2 = CommonMetricPrinter()
s.put_scalar('time', 1.0)
s.put_scalar('[metric]bn_stat', 1.0)
s.step()
s.put_scalar('time', 1.0)
s.put_scalar('[metric]bn_stat', 1.0)
s.step()
with self.assertLogs('detectron2.utils.events') as logs:
p1.write()
self.assertIn('[metric]bn_stat', logs.output[0])
with self.assertLogs('detectron2.utils.events') as logs:
p2.write()
self.assertIn('[metric]bn_stat', logs.output[0])
def testSmoothingWithWindowSize(self):
with tempfile.TemporaryDirectory(prefix='detectron2_tests') as dir, EventStorage() as storage:
json_file = os.path.join(dir, 'test.json')
writer = JSONWriter(json_file, window_size=10)
for k in range(20):
storage.put_scalar('key1', k, smoothing_hint=True)
if (((k + 1) % 2) == 0):
storage.put_scalar('key2', k, smoothing_hint=True)
if (((k + 1) % 5) == 0):
storage.put_scalar('key3', k, smoothing_hint=True)
if (((k + 1) % 10) == 0):
writer.write()
storage.step()
num_samples = {k: storage.count_samples(k, 10) for k in ['key1', 'key2', 'key3']}
self.assertEqual(num_samples, {'key1': 10, 'key2': 5, 'key3': 2})
writer.close()
with open(json_file) as f:
data = [json.loads(l) for l in f]
self.assertEqual([k['key1'] for k in data], [4.5, 14.5])
self.assertEqual([k['key2'] for k in data], [5, 15])
self.assertEqual([k['key3'] for k in data], [6.5, 16.5])
def testEventStorage(self):
self.assertFalse(has_event_storage())
with EventStorage() as storage:
self.assertTrue(has_event_storage())
self.assertEqual(storage, get_event_storage())
self.assertFalse(has_event_storage()) |
class SparseRewardShaper():
def __init__(self):
pass
def reset(self, env):
pass
def __call__(self, env, observations, action_dict, rewards, dones):
for handle in rewards.keys():
if (rewards[handle] == (- 1)):
rewards[handle] = 0
return rewards |
def dict_to_nt(value, name):
if isinstance(value, dict):
values = {k: dict_to_nt(v, '_'.join([name, k])) for (k, v) in value.items()}
return globals()[name](**values)
if (isinstance(value, np.ndarray) and (value.dtype == np.float64)):
return np.asarray(value, dtype=np.float32)
return value |
def validate_lines_in_file_gt(fileName, file_contents, CRLF=True, LTRB=True, withTranscription=False, withConfidence=False, imWidth=0, imHeight=0):
utf8File = decode_utf8(file_contents)
if (utf8File is None):
raise Exception(('The file %s is not UTF-8' % fileName))
lines = utf8File.split(('\r\n' if CRLF else '\n'))
for line in lines:
line = line.replace('\r', '').replace('\n', '')
if (line != ''):
try:
validate_tl_line_gt(line, LTRB, withTranscription, withConfidence, imWidth, imHeight)
except Exception as e:
raise Exception(('Line in sample not valid. Sample: %s Line: %s Error: %s' % (fileName, line, str(e))).encode('utf-8', 'replace')) |
def load_subtensors(blocks, features):
h_list = []
for block in blocks:
input_nodes = block.srcdata[dgl.NID]
h_list.append(features[input_nodes])
return h_list |
class Register():
instances_counter = itertools.count()
prefix = 'reg'
bit_type = None
def __init__(self, size, name=None):
try:
size = int(size)
except Exception:
raise QiskitError(("Register size must be castable to an int (%s '%s' was provided)" % (type(size).__name__, size)))
if (size <= 0):
raise QiskitError(("Register size must be positive (%s '%s' was provided)" % (type(size).__name__, size)))
if (name is None):
name = ('%s%i' % (self.prefix, next(self.instances_counter)))
else:
try:
name = str(name)
except Exception:
raise QiskitError('The circuit name should be castable to a string (or None for autogenerate a name).')
name_format = re.compile('[a-z][a-zA-Z0-9_]*')
if (name_format.match(name) is None):
raise QiskitError(('%s is an invalid OPENQASM register name.' % name))
self.name = name
self.size = size
def __repr__(self):
return ("%s(%d, '%s')" % (self.__class__.__qualname__, self.size, self.name))
def __len__(self):
return self.size
def __getitem__(self, key):
if (not isinstance(key, (int, slice, list))):
raise QiskitError('expected integer or slice index into register')
if isinstance(key, slice):
return [self.bit_type(self, ind) for ind in range(*key.indices(len(self)))]
elif isinstance(key, list):
if (max(key) < len(self)):
return [self.bit_type(self, ind) for ind in key]
else:
raise QiskitError('register index out of range')
else:
return self.bit_type(self, key)
def __iter__(self):
for bit in range(self.size):
(yield self[bit])
def __eq__(self, other):
res = False
if ((type(self) is type(other)) and (self.name == other.name) and (self.size == other.size)):
res = True
return res
def __hash__(self):
return hash((type(self), self.name, self.size)) |
class TestScore(unittest.TestCase):
def test_scoreV1(self):
metric = MoverScoreMetric(version=1, stop_wordsf=None, n_gram=1, remove_subwords=True)
scores = metric.evaluate_batch(CANDS, REFS, aggregate=False)
(score0, score1, score2) = [0., 0., 0.]
self.assertTrue(((scores[0]['mover_score'] - score0) < EPS))
self.assertTrue(((scores[1]['mover_score'] - score1) < EPS))
self.assertTrue(((scores[2]['mover_score'] - score2) < EPS))
def test_score_singleV1(self):
metric = MoverScoreMetric(version=1, stop_wordsf=None, n_gram=1, remove_subwords=True)
score = metric.evaluate_example(CANDS[0], REFS[0])
ref_score = 0.
self.assertTrue(((score['mover_score'] - ref_score) < EPS))
def test_scoreV2(self):
metric = MoverScoreMetric(version=2, stop_wordsf=None, n_gram=1, remove_subwords=True)
scores = metric.evaluate_batch(CANDS, REFS, aggregate=False)
(score0, score1, score2) = [0., 0., 0.]
self.assertTrue(((scores[0]['mover_score'] - score0) < EPS))
self.assertTrue(((scores[1]['mover_score'] - score1) < EPS))
self.assertTrue(((scores[2]['mover_score'] - score2) < EPS))
def test_score_singleV2(self):
metric = MoverScoreMetric(version=2, stop_wordsf=None, n_gram=1, remove_subwords=True)
score = metric.evaluate_example(CANDS[0], REFS[0])
ref_score = 0.
self.assertTrue(((score['mover_score'] - ref_score) < EPS)) |
def interpolation(model, dataloader, n_samples, epoch=0, writer=None):
if args.log_interval:
print('{:<2} {:<4}'.format('', ('Image interpolation...' + (24 * ' '))), end='\r')
n_samples += 2
(imgs, _) = next(iter(dataloader))
(idx1, idx2) = (random.randint(0, imgs.shape[0]), random.randint(0, imgs.shape[0]))
(img_1, img_2) = (imgs[idx1].to(args.device).unsqueeze(0), imgs[idx2].to(args.device).unsqueeze(0))
models_name = (model.module.__class__.__name__ if isinstance(model, nn.DataParallel) else model.__class__.__name__)
if (models_name in ['VAE']):
encoder = (model.module.q_z if isinstance(model, nn.DataParallel) else model.q_z)
decoder = (model.module.p_x if isinstance(model, nn.DataParallel) else model.p_x)
reparameterize = (model.module.reparameterize if isinstance(model, nn.DataParallel) else model.reparameterize)
sample_distribution = (model.module.sample_distribution if isinstance(model, nn.DataParallel) else model.sample_distribution)
(z1_mu, z1_logvar) = encoder(img_1)
(z2_mu, z2_logvar) = encoder(img_2)
z1 = reparameterize(z1_mu, z1_logvar)
z2 = reparameterize(z2_mu, z2_logvar)
interpolation_space = np.linspace(z1.cpu().detach().numpy(), z2.cpu().detach().numpy(), n_samples)
code_list = []
for code in interpolation_space:
z = (torch.from_numpy(code).float().to(args.device) * torch.ones(*z1.shape).to(args.device))
code_list.append(z)
z = torch.stack(code_list, dim=0).squeeze(1)
x_logits = decoder(z)
sample_distribution = (model.module.sample_distribution if isinstance(model, nn.DataParallel) else model.sample_distribution)
x_hat = sample_distribution(x_logits)
if writer:
writer.add_image('image_completion/x_rec', make_grid(x_hat, nrow=n_rows, normalize=TF_NORMALIZE), epoch)
writer.add_image('image_completion/x', make_grid(x_img, nrow=n_rows, normalize=TF_NORMALIZE), epoch)
else:
save_image(make_grid(x_hat, nrow=n_samples, normalize=JPG_NORMALIZE), 'images/image_interpolation.jpg')
elif (models_name in ['TwoStagedVAE']):
q_u = (model.module.q_u if isinstance(model, nn.DataParallel) else model.q_u)
p_y = (model.module.p_y if isinstance(model, nn.DataParallel) else model.p_y)
super_resolution = (model.module.super_resolution if isinstance(model, nn.DataParallel) else model.super_resolution)
reparameterize = (model.module.reparameterize if isinstance(model, nn.DataParallel) else model.reparameterize)
sample_distribution = (model.module.sample_distribution if isinstance(model, nn.DataParallel) else model.sample_distribution)
(y_img_1, y_img_2) = (model.module.compressed_transoformation(img_1), model.module.compressed_transoformation(img_2))
(z1_mu, z1_logvar) = q_u(y_img_1)
(z2_mu, z2_logvar) = q_u(y_img_2)
z1 = reparameterize(z1_mu, z1_logvar)
z2 = reparameterize(z2_mu, z2_logvar)
interpolation_space = np.linspace(z1.cpu().detach().numpy(), z2.cpu().detach().numpy(), n_samples)
code_list = []
for code in interpolation_space:
z = (torch.from_numpy(code).float().to(args.device) * torch.ones(*z1.shape).to(args.device))
code_list.append(z)
z = torch.stack(code_list, dim=0).squeeze(1)
y_logits = p_y(z)
y_hat = sample_distribution(y_logits)
x_hat = super_resolution(y_hat)
if writer:
writer.add_image('image_completion/x_rec', make_grid(x_hat, nrow=n_rows, normalize=TF_NORMALIZE), epoch)
else:
save_image(make_grid(x_hat, nrow=n_samples, normalize=JPG_NORMALIZE), 'images/image_interpolation.jpg')
else:
pass
return |
class Unobservable(WorldObject):
char = '*'
def __init__(self, name='fog'):
super().__init__(name) |
(EvaluateGradientVariance)
class AnalyzeGradientVariance(AutoNamingTask):
DataGeneration_params = luigi.DictParameter(default=DataGeneration_params)
EvaluateGradientVariance_params = luigi.DictParameter(default=EvaluateGradientVariance_params)
data_seed = luigi.IntParameter(default=43)
train_seed = luigi.IntParameter(default=44)
def run_task(self, input_list):
grad_list_dict = input_list[0]
cov_dict = {}
std_list = []
for each_param in grad_list_dict:
grad = torch.stack(grad_list_dict[each_param]).detach().numpy()
cov_dict[each_param] = np.cov(grad.T)
diag_std = (cov_dict[each_param].diagonal() ** 0.5)
logger.info(f' ** diag std of {each_param} **')
logger.info(str(diag_std))
std_list = (std_list + list(diag_std[np.where((diag_std > 0))]))
logger.info(' * E[std] = {}'.format(np.mean(std_list)))
return cov_dict |
class Conv3DPrelu(Layer):
def __init__(self, incoming, num_filters, filter_size=3, stride=(1, 1, 1), pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
ensure_set_name('conv3d_prelu', kwargs)
super(Conv3DPrelu, self).__init__(incoming, **kwargs)
self.conv = Conv3D(incoming, num_filters, filter_size, stride, pad=pad, W=W, b=b, nonlinearity=None, **kwargs)
self.prelu = prelu(self.conv, **kwargs)
self.params = self.conv.params.copy()
self.params.update(self.prelu.params)
def get_output_for(self, input, **kwargs):
out_conv = self.conv.get_output_for(input)
out_prelu = self.prelu.get_output_for(out_conv)
return out_prelu
def get_output_shape_for(self, input, **kwargs):
return self.conv.get_output_shape_for(input) |
def main():
cl_args = parse_command_line()
config['args'].update(vars(cl_args))
if (config['args']['bases'] is not None):
assert (config['args']['bases'] in ['dct', 'svd']), 'Invalid bases: {}'.format(config['args']['bases'])
config['bases_to_use'] = config['args']['bases']
if (config['args']['input'] is not None):
assert (config['args']['input'] in ['det', 'gt']), 'Invalid input: {}'.format(config['args']['input'])
config['input_data'] = config['args']['input']
if (config['args']['nframes'] is not None):
config['n_frames'] = config['args']['nframes']
if (config['args']['nbases'] is not None):
config['n_bases'] = config['args']['nbases']
if (config['args']['gpu'] is not None):
config['gpus'] = [config['args']['gpu']]
exp_tag = '{}_F{}_k{}_{}_{}'.format(datetime.datetime.now().strftime('%m%d_%H%M%S'), config['n_frames'], config['n_bases'], config['bases_to_use'], config['input_data'])
exp_path = os.path.join(config['exp_root'], exp_tag)
config['exp_tag'] = exp_tag
config['exp_path'] = exp_path
mkdir(exp_path)
logger = logging.getLogger()
coloredlogs.install(level='DEBUG', logger=logger)
fileHandler = logging.FileHandler(os.path.join(exp_path, 'log.txt'))
logFormatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s - %(message)s')
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
config['logger'] = logger
logger.info(sys.argv)
gpus = ','.join([str(x) for x in config['gpus']])
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
logger.info('Set CUDA_VISIBLE_DEVICES to {}'.format(gpus))
model = PoseNet(config)
model = nn.DataParallel(model)
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=config['train']['init_lr'])
if (config['bases_to_use'] == 'svd'):
fixed_bases = np.load(config['bases_path'])
assert ((config['n_bases'] <= fixed_bases.shape[0]) and (config['n_frames'] == fixed_bases.shape[1])), fixed_bases.shape
fixed_bases = fixed_bases[:config['n_bases']]
fixed_bases *= np.sqrt(25)
elif (config['bases_to_use'] == 'dct'):
x = np.arange(config['n_frames'])
fixed_bases = [(np.ones([config['n_frames']]) * np.sqrt(0.5))]
for i in range(1, config['n_bases']):
fixed_bases.append(np.cos(((i * np.pi) * ((x + 0.5) / config['n_frames']))))
fixed_bases = np.array(fixed_bases)
else:
assert False, config['bases_to_use']
config['bases'] = fixed_bases
fixed_bases = torch.from_numpy(fixed_bases).float()
fixed_bases = fixed_bases.view(1, config['n_bases'], config['n_frames'])
train_dataset = H36M_Dataset(config, 'train')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=(config['batch_size_per_gpu'] * len(config['gpus'])), shuffle=True, num_workers=config['num_workers'], pin_memory=True)
test_dataset = H36M_Dataset(config, 'test')
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=(config['batch_size_per_gpu'] * len(config['gpus'])), shuffle=False, num_workers=config['num_workers'], pin_memory=True)
tot_step = 0
for epoch in range(config['train']['num_epochs']):
if (epoch in [60, 85]):
optimizer.param_groups[0]['lr'] = (optimizer.param_groups[0]['lr'] * config['train']['lr_decay'])
logger.info('Learning rate set to {}'.format(optimizer.param_groups[0]['lr']))
model.train()
for (step, batch) in enumerate(train_loader):
data_2d_gt = batch['data_2d_gt']
data_2d_cpn = batch['data_2d_cpn']
if (config['input_data'] == 'det'):
data_2d = data_2d_cpn
elif (config['input_data'] == 'gt'):
data_2d = data_2d_gt
else:
assert False, config['input_data']
data_3d = batch['data_3d']
mean_3d = batch['mean_3d']
std_3d = batch['std_3d']
B = data_3d.shape[0]
batch_bases = fixed_bases.repeat(B, 1, 1)
data_2d = data_2d.cuda()
data_3d = data_3d.cuda()
batch_bases = batch_bases.cuda()
mean_3d = mean_3d.cuda()
std_3d = std_3d.cuda()
coeff = model(data_2d, batch_bases)
loss = model.module.build_loss_training(coeff, batch_bases, data_3d, mean_3d, std_3d)
optimizer.zero_grad()
loss.backward()
optimizer.step()
tot_step += 1
if (('log_per_n_iterations' in config['train']) and (((step + 1) % config['train']['log_per_n_iterations']) == 0)):
logger.info('TRAIN Epoch {}, step {}/{} ({}): loss = {:.6f}'.format((epoch + 1), (step + 1), len(train_loader), tot_step, loss.item()))
model.eval()
logger.info('Testing on test set...')
total_loss = AverageMeter()
gts_3d = []
preds_3d = []
indices = []
with torch.no_grad():
for (step, batch) in enumerate(test_loader):
data_2d_gt = batch['data_2d_gt']
data_2d_cpn = batch['data_2d_cpn']
if (config['input_data'] == 'det'):
data_2d = data_2d_cpn
elif (config['input_data'] == 'gt'):
data_2d = data_2d_gt
else:
assert False, config['input_data']
data_2d_gt_flip = batch['data_2d_gt_flip']
data_2d_cpn_flip = batch['data_2d_cpn_flip']
if (config['input_data'] == 'det'):
data_2d_flip = data_2d_cpn_flip
elif (config['input_data'] == 'gt'):
data_2d_flip = data_2d_gt_flip
else:
assert False, config['input_data']
data_3d = batch['data_3d']
data_3d_flip = batch['data_3d_flip']
mean_3d = batch['mean_3d']
std_3d = batch['std_3d']
idx = batch['idx']
B = data_3d.shape[0]
batch_bases = fixed_bases.repeat(B, 1, 1)
data_2d = data_2d.cuda()
data_2d_flip = data_2d_flip.cuda()
data_3d = data_3d.cuda()
data_3d_flip = data_3d_flip.cuda()
batch_bases = batch_bases.cuda()
mean_3d = mean_3d.cuda()
std_3d = std_3d.cuda()
coeff = model(data_2d, batch_bases)
coeff_flip = model(data_2d_flip, batch_bases)
(loss, res) = model.module.build_loss_test((coeff, coeff_flip), batch_bases, (data_3d, data_3d_flip), mean_3d, std_3d)
(pred_3d, gt_3d) = res
total_loss.add(loss.item())
preds_3d.append(pred_3d)
gts_3d.append(gt_3d)
indices.append(idx.data.numpy())
avg_loss = total_loss.value()
logger.info('Test loss: {}'.format(avg_loss))
if (epoch == (config['train']['num_epochs'] - 1)):
preds_3d = np.concatenate(preds_3d, 0)
gts_3d = np.concatenate(gts_3d, 0)
indices = np.concatenate(indices, 0)
h36m_evaluate(preds_3d, gts_3d, indices, test_dataset, config)
save_ckpt(model, exp_path) |
.parametrize('dtype', [torch.float32, torch.complex64])
.parametrize('device', ['cpu', 'cuda'])
.parametrize('log_n', [4, 10, 12])
def test_block_diag_butterfly_multiply_reference(log_n, device, dtype):
torch.random.manual_seed(0)
n = (1 << log_n)
sqrtn = (1 << (log_n // 2))
batch_size = 3
x = torch.randn(batch_size, n, device=device, dtype=dtype, requires_grad=True)
w1_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
w2_bfly = torch.randn(sqrtn, sqrtn, sqrtn, device=x.device, dtype=x.dtype, requires_grad=True)
out1 = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=1)
out2 = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=2)
out3 = blockdiag_butterfly_multiply_reference(x, w1_bfly, w2_bfly, version=3)
assert torch.allclose(out1, out2, rtol=0.0001, atol=0.0001)
assert torch.allclose(out2, out3, rtol=0.0001, atol=0.0001) |
def convert_pytorch_to_onnx(model, dimension, n_channels, gpu_id=0):
if torch.cuda.is_available():
device = ('cuda:' + str(gpu_id))
else:
device = 'cpu'
model_net = torch.load(model, map_location=device)
dummy_input = (torch.randn(1, n_channels, 96, 96, device=device) if (dimension == 2) else torch.randn(1, n_channels, 96, 96, 96, device=device))
imed_utils.save_onnx_model(model_net, dummy_input, model.replace('pt', 'onnx')) |
class MJDATA(Structure):
_fields_ = [('nstack', c_int), ('nbuffer', c_int), ('pstack', c_int), ('maxstackuse', c_int), ('ne', c_int), ('nf', c_int), ('nefc', c_int), ('ncon', c_int), ('nwarning', (c_int * 8)), ('warning_info', (c_int * 8)), ('timer_duration', (c_double * 14)), ('timer_ncall', (c_double * 14)), ('mocaptime', (c_double * 3)), ('time', c_double), ('energy', (c_double * 2)), ('solverstat', (c_double * 4)), ('solvertrace', (c_double * 200)), ('buffer', POINTER(c_ubyte)), ('stack', POINTER(c_double)), ('qpos', POINTER(c_double)), ('qvel', POINTER(c_double)), ('act', POINTER(c_double)), ('ctrl', POINTER(c_double)), ('qfrc_applied', POINTER(c_double)), ('xfrc_applied', POINTER(c_double)), ('qacc', POINTER(c_double)), ('act_dot', POINTER(c_double)), ('mocap_pos', POINTER(c_double)), ('mocap_quat', POINTER(c_double)), ('userdata', POINTER(c_double)), ('sensordata', POINTER(c_double)), ('xpos', POINTER(c_double)), ('xquat', POINTER(c_double)), ('xmat', POINTER(c_double)), ('xipos', POINTER(c_double)), ('ximat', POINTER(c_double)), ('xanchor', POINTER(c_double)), ('xaxis', POINTER(c_double)), ('geom_xpos', POINTER(c_double)), ('geom_xmat', POINTER(c_double)), ('site_xpos', POINTER(c_double)), ('site_xmat', POINTER(c_double)), ('cam_xpos', POINTER(c_double)), ('cam_xmat', POINTER(c_double)), ('light_xpos', POINTER(c_double)), ('light_xdir', POINTER(c_double)), ('com_subtree', POINTER(c_double)), ('cdof', POINTER(c_double)), ('cinert', POINTER(c_double)), ('ten_wrapadr', POINTER(c_int)), ('ten_wrapnum', POINTER(c_int)), ('ten_length', POINTER(c_double)), ('ten_moment', POINTER(c_double)), ('wrap_obj', POINTER(c_int)), ('wrap_xpos', POINTER(c_double)), ('actuator_length', POINTER(c_double)), ('actuator_moment', POINTER(c_double)), ('crb', POINTER(c_double)), ('qM', POINTER(c_double)), ('qLD', POINTER(c_double)), ('qLDiagInv', POINTER(c_double)), ('qLDiagSqrtInv', POINTER(c_double)), ('contact', POINTER(MJCONTACT)), ('efc_type', POINTER(c_int)), ('efc_id', POINTER(c_int)), ('efc_rownnz', POINTER(c_int)), ('efc_rowadr', POINTER(c_int)), ('efc_colind', POINTER(c_int)), ('efc_rownnz_T', POINTER(c_int)), ('efc_rowadr_T', POINTER(c_int)), ('efc_colind_T', POINTER(c_int)), ('efc_solref', POINTER(c_double)), ('efc_solimp', POINTER(c_double)), ('efc_margin', POINTER(c_double)), ('efc_frictionloss', POINTER(c_double)), ('efc_pos', POINTER(c_double)), ('efc_J', POINTER(c_double)), ('efc_J_T', POINTER(c_double)), ('efc_diagApprox', POINTER(c_double)), ('efc_D', POINTER(c_double)), ('efc_R', POINTER(c_double)), ('efc_AR', POINTER(c_double)), ('e_ARchol', POINTER(c_double)), ('fc_e_rect', POINTER(c_double)), ('fc_AR', POINTER(c_double)), ('ten_velocity', POINTER(c_double)), ('actuator_velocity', POINTER(c_double)), ('cvel', POINTER(c_double)), ('cdof_dot', POINTER(c_double)), ('qfrc_bias', POINTER(c_double)), ('qfrc_passive', POINTER(c_double)), ('efc_vel', POINTER(c_double)), ('efc_aref', POINTER(c_double)), ('actuator_force', POINTER(c_double)), ('qfrc_actuator', POINTER(c_double)), ('qfrc_unc', POINTER(c_double)), ('qacc_unc', POINTER(c_double)), ('efc_b', POINTER(c_double)), ('fc_b', POINTER(c_double)), ('efc_force', POINTER(c_double)), ('qfrc_constraint', POINTER(c_double)), ('qfrc_inverse', POINTER(c_double)), ('cacc', POINTER(c_double)), ('cfrc_int', POINTER(c_double)), ('cfrc_ext', POINTER(c_double))] |
def graph_to_text(graph: EBMGraph, include_description=True, feature_format=None, x_axis_precision=None, y_axis_precision='auto', confidence_bounds=True, confidence_level=0.95, max_tokens=3000):
try:
if ((len(graph.x_vals) == 2) and (graph.x_vals[0].upper() == 'FALSE') and (graph.x_vals[1].upper() == 'TRUE')):
feature_format = 'boolean'
except:
pass
if (feature_format is None):
feature_format = graph.feature_type
if (feature_format == 'nominal'):
feature_format = 'categorical'
if (feature_format == 'continuous'):
description_text = 'This graph represents a continuous-valued feature. The keys are intervals that represent ranges where the function predicts the same value.\n\n'
elif (feature_format == 'categorical'):
description_text = 'This graph represents categorical feature. Each key represents a possible value that the feature can take.\n\n'
elif (feature_format == 'boolean'):
description_text = "This graph represents a boolean feature. The keys are 'True' and 'False', the two possible values of the feature.\n\n"
else:
raise Exception(f'Unknown feature format {feature_format}')
total_tokens = None
min_variation_per_cent = 0.0
while True:
simplified_graph = graph
if (feature_format == 'continuous'):
simplified_graph = simplify_graph(graph, min_variation_per_cent=min_variation_per_cent)
scores = simplified_graph.scores
if confidence_bounds:
factor = scipy.stats.norm.interval(confidence_level, loc=0, scale=1)[1]
lower_bounds = [(scores[idx] - (factor * simplified_graph.stds[idx])) for idx in range(len(scores))]
upper_bounds = [(scores[idx] + (factor * simplified_graph.stds[idx])) for idx in range(len(scores))]
x_vals = simplified_graph.x_vals
if (x_axis_precision is not None):
x_vals = [(np.round(x[0], x_axis_precision).astype(str), np.round(x[1], x_axis_precision).astype(str)) for x in x_vals]
if (y_axis_precision == 'auto'):
total_variation = (np.max(scores) - np.min(scores))
y_fraction = (total_variation / 100.0)
y_axis_precision = 1
while (y_fraction < 1):
y_fraction *= 10
y_axis_precision += 1
scores = np.round(scores, y_axis_precision).astype(str)
if confidence_bounds:
lower_bounds = np.round(lower_bounds, y_axis_precision).astype(str)
upper_bounds = np.round(upper_bounds, y_axis_precision).astype(str)
if (feature_format == 'boolean'):
assert (len(x_vals) == 2), f'Requested a boolean format, but the feature has more than x-axis values: {x_vals}'
x_vals = ['False', 'True']
prompt = ''
if include_description:
prompt += description_text
prompt += f'''Feature Name: {graph.feature_name}
'''
prompt += f'''Feature Type: {feature_format}
'''
prompt += f'''Means: {xy_to_json_(x_vals, scores)}
'''
if confidence_bounds:
prompt += f'''Lower Bounds ({(confidence_level * 100):.0f}%-Confidence Interval): {xy_to_json_(x_vals, lower_bounds)}
'''
prompt += f'''Upper Bounds ({(confidence_level * 100):.0f}%-Confidence Interval): {xy_to_json_(x_vals, upper_bounds)}
'''
total_tokens = num_tokens_from_string_(prompt, 'gpt-4')
if (feature_format == 'continuous'):
if (total_tokens > max_tokens):
if (min_variation_per_cent > 0.1):
raise Exception(f'The graph for feature {graph.feature_name} of type {graph.feature_type} requires {total_tokens} tokens even at a simplification level of 10\%. This graph is too complex to be passed to the LLM within the loken limit of {max_tokens} tokens.')
min_variation_per_cent += 0.001
else:
if (min_variation_per_cent > 0):
print(f'INFO: The graph of feature {graph.feature_name} was simplified by {(min_variation_per_cent * 100):.1f}%.')
return prompt
elif (total_tokens > max_tokens):
raise Exception(f'The graph for feature {graph.feature_name} of type {graph.feature_type} requires {total_tokens} tokens and exceeds the token limit of {max_tokens}.')
else:
return prompt |
def train(total_loss, global_step):
num_batches_per_epoch = (NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size)
decay_steps = int((num_batches_per_epoch * NUM_EPOCHS_PER_DECAY))
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True)
tf.summary.scalar('learning_rate', lr)
loss_averages_op = _add_loss_summaries(total_loss)
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
for (grad, var) in grads:
if (grad is not None):
tf.summary.histogram((var.op.name + '/gradients'), grad)
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op |
class FlaxRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class BoundFlatten(torch.nn.Module):
def __init__(self, bound_opts=None):
super(BoundFlatten, self).__init__()
self.bound_opts = bound_opts
def forward(self, x):
self.shape = x.size()[1:]
return x.view(x.size(0), (- 1))
def interval_propagate(self, norm, h_U, h_L, eps):
return (norm, h_U.view(h_U.size(0), (- 1)), h_L.view(h_L.size(0), (- 1)), 0, 0, 0, 0)
def bound_backward(self, last_uA, last_lA):
def _bound_oneside(A):
if (A is None):
return None
return A.view(A.size(0), A.size(1), *self.shape)
if (self.bound_opts.get('same-slope', False) and (last_uA is not None) and (last_lA is not None)):
new_bound = _bound_oneside(last_uA)
return (new_bound, 0, new_bound, 0)
else:
return (_bound_oneside(last_uA), 0, _bound_oneside(last_lA), 0) |
class DetLoss(nn.Module):
def __init__(self, loss_type='bce', ignore_index=(- 1)):
super(DetLoss, self).__init__()
self.loss_type = loss_type
self.ignore_index = ignore_index
if (loss_type == 'ghm'):
print('Use Gradient Harmonized Loss')
from modules.ghm_loss import GHMC_Loss
self.GHMC_Loss = GHMC_Loss(bins=30, momentum=0.75)
def forward(self, det_score, gt_score):
gt_score = gt_score.unsqueeze(0).repeat(det_score.size(0), 1)
if ('bce' in self.loss_type):
loss = F.binary_cross_entropy_with_logits(det_score, gt_score)
if ('l2' in self.loss_type):
mask = (1 - gt_score.eq(self.ignore_index))
loss = F.mse_loss(det_score.mul(mask.float()), gt_score)
if ('l1' in self.loss_type):
mask = (1 - gt_score.eq(self.ignore_index))
loss = F.smooth_l1_loss(det_score.mul(mask.float()), gt_score)
if ('ghm' in self.loss_type):
mask = (1 - gt_score.eq(self.ignore_index))
loss = self.GHMC_Loss(det_score, gt_score, mask)
return loss |
.parametrize('shape', [[5, 3], [2, 4, 5], [2, 7, 5]])
def test_ifftshift(shape):
input = np.arange(np.product(shape)).reshape(shape)
out_torch = transforms.ifftshift(torch.from_numpy(input)).numpy()
out_numpy = np.fft.ifftshift(input)
assert np.allclose(out_torch, out_numpy) |
class VecEnv(ABC):
closed = False
viewer = None
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
pass
def step_async(self, actions):
pass
def step_wait(self):
pass
def close_extras(self):
pass
def close(self):
if self.closed:
return
if (self.viewer is not None):
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if (mode == 'human'):
self.get_viewer().imshow(bigimg)
elif (mode == 'rgb_array'):
return bigimg
else:
raise NotImplementedError
def get_images(self):
raise NotImplementedError
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if (self.viewer is None):
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer |
class dataset_it_dtc(Dataset):
def __init__(self, data_dir, input1, num_classes, transform_1, queue_length=20, samples_per_volume=5, patch_size=128, num_workers=8, shuffle_subjects=True, shuffle_patches=True, sup=True, num_images=None):
super(dataset_it_dtc, self).__init__()
self.subjects_1 = []
image_dir_1 = ((data_dir + '/') + input1)
if sup:
mask_dir_1 = (data_dir + '/mask')
mask_dir_2 = (data_dir + '/mask_sdf1')
if (num_classes == 3):
mask_dir_3 = (data_dir + '/mask_sdf2')
for i in os.listdir(image_dir_1):
image_path_1 = os.path.join(image_dir_1, i)
if sup:
mask_path_1 = os.path.join(mask_dir_1, i)
mask_path_2 = os.path.join(mask_dir_2, i)
if (num_classes == 3):
mask_path_3 = os.path.join(mask_dir_3, i)
subject_1 = tio.Subject(image=tio.ScalarImage(image_path_1), mask=tio.LabelMap(mask_path_1), mask2=tio.LabelMap(mask_path_2), mask3=tio.LabelMap(mask_path_3), ID=i)
else:
subject_1 = tio.Subject(image=tio.ScalarImage(image_path_1), mask=tio.LabelMap(mask_path_1), mask2=tio.LabelMap(mask_path_2), ID=i)
else:
subject_1 = tio.Subject(image=tio.ScalarImage(image_path_1), ID=i)
self.subjects_1.append(subject_1)
if (num_images is not None):
len_img_paths = len(self.subjects_1)
quotient = (num_images // len_img_paths)
remainder = (num_images % len_img_paths)
if (num_images <= len_img_paths):
self.subjects_1 = self.subjects_1[:num_images]
else:
rand_indices = torch.randperm(len_img_paths).tolist()
new_indices = rand_indices[:remainder]
self.subjects_1 = (self.subjects_1 * quotient)
self.subjects_1 += [self.subjects_1[i] for i in new_indices]
self.dataset_1 = tio.SubjectsDataset(self.subjects_1, transform=transform_1)
self.queue_train_set_1 = tio.Queue(subjects_dataset=self.dataset_1, max_length=queue_length, samples_per_volume=samples_per_volume, sampler=UniformSampler(patch_size), num_workers=num_workers, shuffle_subjects=shuffle_subjects, shuffle_patches=shuffle_patches) |
def add_memory_planning_flag(prefix_list, memory_planning):
if (memory_planning == 'cycle_buffer'):
print('cycle buffer')
elif (memory_planning == 'unified_buffer'):
prefix_list[:] = [(prefix + ' UNIFIED_BUFFER=1 ') for prefix in prefix_list]
print('unified_buffer')
elif (memory_planning == 'auto'):
prefix_list += [(prefix + ' UNIFIED_BUFFER=1 ') for prefix in prefix_list]
print('auto memory planning')
else:
print('memory incorrect incorrect')
return prefix_list |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
output_dir = Path(training_args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
checkpoint = None
if ((len(os.listdir(training_args.output_dir)) > 0) and (not training_args.overwrite_output_dir)):
if ((output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file()):
checkpoint = output_dir
logger.info(f'Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
else:
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to continue regardless.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if training_args.should_log else logging.WARN))
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.test_file.split('.')[(- 1)]
datasets = load_dataset(extension, data_files=data_files, field='data', cache_dir=model_args.cache_dir)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at to find the model types that meet this requirement')
if training_args.do_train:
column_names = datasets['train'].column_names
elif training_args.do_eval:
column_names = datasets['validation'].column_names
else:
column_names = datasets['test'].column_names
question_column_name = ('question' if ('question' in column_names) else column_names[0])
context_column_name = ('context' if ('context' in column_names) else column_names[1])
answer_column_name = ('answers' if ('answers' in column_names) else column_names[2])
pad_on_right = (tokenizer.padding_side == 'right')
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def prepare_train_features(examples):
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
tokenized_examples = tokenizer(examples[(question_column_name if pad_on_right else context_column_name)], examples[(context_column_name if pad_on_right else question_column_name)], truncation=('only_second' if pad_on_right else 'only_first'), max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=('max_length' if data_args.pad_to_max_length else False))
sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping')
offset_mapping = tokenized_examples.pop('offset_mapping')
tokenized_examples['start_positions'] = []
tokenized_examples['end_positions'] = []
for (i, offsets) in enumerate(offset_mapping):
input_ids = tokenized_examples['input_ids'][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
sequence_ids = tokenized_examples.sequence_ids(i)
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
if (len(answers['answer_start']) == 0):
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
start_char = answers['answer_start'][0]
end_char = (start_char + len(answers['text'][0]))
token_start_index = 0
while (sequence_ids[token_start_index] != (1 if pad_on_right else 0)):
token_start_index += 1
token_end_index = (len(input_ids) - 1)
while (sequence_ids[token_end_index] != (1 if pad_on_right else 0)):
token_end_index -= 1
if (not ((offsets[token_start_index][0] <= start_char) and (offsets[token_end_index][1] >= end_char))):
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
while ((token_start_index < len(offsets)) and (offsets[token_start_index][0] <= start_char)):
token_start_index += 1
tokenized_examples['start_positions'].append((token_start_index - 1))
while (offsets[token_end_index][1] >= end_char):
token_end_index -= 1
tokenized_examples['end_positions'].append((token_end_index + 1))
return tokenized_examples
processed_datasets = dict()
if training_args.do_train:
if ('train' not in datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(prepare_train_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
processed_datasets['train'] = train_dataset
def prepare_validation_features(examples):
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
tokenized_examples = tokenizer(examples[(question_column_name if pad_on_right else context_column_name)], examples[(context_column_name if pad_on_right else question_column_name)], truncation=('only_second' if pad_on_right else 'only_first'), max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=('max_length' if data_args.pad_to_max_length else False))
sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping')
tokenized_examples['example_id'] = []
for i in range(len(tokenized_examples['input_ids'])):
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = (1 if pad_on_right else 0)
sample_index = sample_mapping[i]
tokenized_examples['example_id'].append(examples['id'][sample_index])
tokenized_examples['offset_mapping'][i] = [(o if (sequence_ids[k] == context_index) else None) for (k, o) in enumerate(tokenized_examples['offset_mapping'][i])]
return tokenized_examples
if training_args.do_eval:
if ('validation' not in datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_examples = datasets['validation']
if (data_args.max_eval_samples is not None):
eval_examples = eval_examples.select(range(data_args.max_eval_samples))
eval_dataset = eval_examples.map(prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
if (data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
processed_datasets['validation'] = eval_dataset
if training_args.do_predict:
if ('test' not in datasets):
raise ValueError('--do_predict requires a test dataset')
predict_examples = datasets['test']
if (data_args.max_predict_samples is not None):
predict_examples = predict_examples.select(range(data_args.max_predict_samples))
predict_dataset = predict_examples.map(prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
if (data_args.max_predict_samples is not None):
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
processed_datasets['test'] = predict_dataset
def post_processing_function(examples, features, predictions, stage='eval'):
predictions = postprocess_qa_predictions(examples=examples, features=features, predictions=predictions, version_2_with_negative=data_args.version_2_with_negative, n_best_size=data_args.n_best_size, max_answer_length=data_args.max_answer_length, null_score_diff_threshold=data_args.null_score_diff_threshold, output_dir=training_args.output_dir, prefix=stage)
if data_args.version_2_with_negative:
formatted_predictions = [{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for (k, v) in predictions.items()]
else:
formatted_predictions = [{'id': k, 'prediction_text': v} for (k, v) in predictions.items()]
references = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = load_metric(('squad_v2' if data_args.version_2_with_negative else 'squad'))
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
with training_args.strategy.scope():
if (checkpoint is None):
model_path = model_args.model_name_or_path
else:
model_path = checkpoint
model = TFAutoModelForQuestionAnswering.from_pretrained(model_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
optimizer = tf.keras.optimizers.Adam(learning_rate=training_args.learning_rate, beta_1=training_args.adam_beta1, beta_2=training_args.adam_beta2, epsilon=training_args.adam_epsilon, clipnorm=training_args.max_grad_norm)
def dummy_loss(y_true, y_pred):
return tf.reduce_mean(y_pred)
losses = {'loss': dummy_loss}
model.compile(optimizer=optimizer, loss=losses)
if training_args.do_train:
if (isinstance(training_args.strategy, tf.distribute.TPUStrategy) or data_args.pad_to_max_length):
logger.info("Padding all batches to max length because argument was set or we're on TPU.")
dataset_mode = 'constant_batch'
else:
dataset_mode = 'variable_batch'
training_dataset = convert_dataset_for_tensorflow(processed_datasets['train'], batch_size=training_args.per_device_train_batch_size, dataset_mode=dataset_mode, drop_remainder=True, shuffle=True)
model.fit(training_dataset, epochs=int(training_args.num_train_epochs))
if training_args.do_eval:
logger.info('*** Evaluation ***')
eval_inputs = {'input_ids': tf.ragged.constant(processed_datasets['validation']['input_ids']).to_tensor(), 'attention_mask': tf.ragged.constant(processed_datasets['validation']['attention_mask']).to_tensor()}
eval_predictions = model.predict(eval_inputs)
post_processed_eval = post_processing_function(datasets['validation'], processed_datasets['validation'], (eval_predictions.start_logits, eval_predictions.end_logits))
metrics = compute_metrics(post_processed_eval)
logging.info('Evaluation metrics:')
for (metric, value) in metrics.items():
logging.info(f'{metric}: {value:.3f}')
if training_args.do_predict:
logger.info('*** Predict ***')
predict_inputs = {'input_ids': tf.ragged.constant(processed_datasets['test']['input_ids']).to_tensor(), 'attention_mask': tf.ragged.constant(processed_datasets['test']['attention_mask']).to_tensor()}
test_predictions = model.predict(predict_inputs)
post_processed_test = post_processing_function(datasets['test'], processed_datasets['test'], (test_predictions.start_logits, test_predictions.end_logits))
metrics = compute_metrics(post_processed_test)
logging.info('Test metrics:')
for (metric, value) in metrics.items():
logging.info(f'{metric}: {value:.3f}')
if training_args.push_to_hub:
model.push_to_hub() |
class Discriminator(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=2):
super().__init__()
self.cLSTM = cLSTM(input_size, hidden_size, num_layers)
self.out = nn.Sequential(nn.Linear(hidden_size, 1), nn.Sigmoid())
def forward(self, features):
h = self.cLSTM(features)
prob = self.out(h).squeeze()
return (h, prob) |
def validate(val_loader, model, criterion, num_classes, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
end = time.time()
for (i, (input, target)) in enumerate(val_loader):
input = input.cuda()
target = target.cuda()
output = model(input)
loss = criterion(output, target)
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data, input.size(0))
top1.update(prec1, input.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\ {top1.val:.3f} ({top1.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))
print(' * {top1.avg:.3f}'.format(top1=top1))
if args.tensorboard:
log_value('val_loss', losses.avg, epoch)
log_value('val_acc', top1.avg, epoch)
return top1.avg |
class LabelSmoothing(CrossEntropy):
def __init__(self, params: dict=None) -> None:
super().__init__()
self.epsilon = params['epsilon']
def forward(self, pred_probs: Tensor, target_probs: Tensor, epsilon: float=None) -> Tensor:
if (epsilon is None):
epsilon = self.epsilon
class_num = pred_probs.shape[1]
new_target_probs = (((1 - epsilon) * target_probs) + ((epsilon * 1.0) / class_num))
return super().forward(pred_probs, new_target_probs) |
class QuantizableInception3(inception_module.Inception3):
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(QuantizableInception3, self).__init__(num_classes=num_classes, aux_logits=aux_logits, transform_input=transform_input, inception_blocks=[QuantizableBasicConv2d, QuantizableInceptionA, QuantizableInceptionB, QuantizableInceptionC, QuantizableInceptionD, QuantizableInceptionE, QuantizableInceptionAux])
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self._transform_input(x)
x = self.quant(x)
(x, aux) = self._forward(x)
x = self.dequant(x)
aux_defined = (self.training and self.aux_logits)
if torch.jit.is_scripting():
if (not aux_defined):
warnings.warn('Scripted QuantizableInception3 always returns QuantizableInception3 Tuple')
return InceptionOutputs(x, aux)
else:
return self.eager_outputs(x, aux)
def fuse_model(self):
for m in self.modules():
if (type(m) == QuantizableBasicConv2d):
m.fuse_model() |
def display_actions(actions):
action_names = list(actions)
for (i, name) in enumerate(action_names):
a_def = actions[name]
output = [f'{i} {name}:']
output.extend([f'{k}={v}' for (k, v) in a_def.items()])
print(' '.join(output)) |
def pdm2_cpu(t, y, w, freqs, nbins=30, linterp=True):
t -= np.mean(t)
y -= np.mean(y)
ybar = np.dot(w, y)
var = np.dot(w, np.power((y - ybar), 2))
return [(1 - (var_binned(t, y, w, freq, nbins=nbins, linterp=linterp) / var)) for freq in freqs] |
class TimerStat():
def __init__(self, window_size=10):
self._window_size = window_size
self._samples = []
self._units_processed = []
self._start_time = None
self._total_time = 0.0
self.count = 0
def __enter__(self):
invalidInputError((self._start_time is None), 'concurrent updates not supported')
self._start_time = time.time()
def __exit__(self, type, value, tb):
invalidInputError((self._start_time is not None), 'expect start time is not none')
time_delta = (time.time() - self._start_time)
self.push(time_delta)
self._start_time = None
def push(self, time_delta):
self._samples.append(time_delta)
if (len(self._samples) > self._window_size):
self._samples.pop(0)
self.count += 1
self._total_time += time_delta
def push_units_processed(self, n):
self._units_processed.append(n)
if (len(self._units_processed) > self._window_size):
self._units_processed.pop(0)
def mean(self):
return np.mean(self._samples)
def median(self):
return np.median(self._samples)
def sum(self):
return np.sum(self._samples)
def max(self):
return np.max(self._samples)
def first(self):
return (self._samples[0] if self._samples else None)
def last(self):
return (self._samples[(- 1)] if self._samples else None)
def size(self):
return len(self._samples)
def mean_units_processed(self):
return float(np.mean(self._units_processed))
def mean_throughput(self):
time_total = sum(self._samples)
if (not time_total):
return 0.0
return (sum(self._units_processed) / time_total)
def reset(self):
self._samples = []
self._units_processed = []
self._start_time = None
self._total_time = 0.0
self.count = 0 |
def json_serialize_value(v):
try:
return json.dumps(v)
except Exception as e:
return str(v) |
def torch_index_select(input, dim, index, *, out=None):
shape = list(input.shape)
shape[dim] = len(index)
return torch.empty(*shape, device='meta') |
_module()
class TextSnakeHead(HeadMixin, BaseModule):
def __init__(self, in_channels, out_channels=5, downsample_ratio=1.0, loss=dict(type='TextSnakeLoss'), postprocessor=dict(type='TextSnakePostprocessor', text_repr_type='poly'), train_cfg=None, test_cfg=None, init_cfg=dict(type='Normal', override=dict(name='out_conv'), mean=0, std=0.01), **kwargs):
old_keys = ['text_repr_type', 'decoding_type']
for key in old_keys:
if kwargs.get(key, None):
postprocessor[key] = kwargs.get(key)
warnings.warn(f'{key} is deprecated, please specify it in postprocessor config dict. See for details.', UserWarning)
BaseModule.__init__(self, init_cfg=init_cfg)
HeadMixin.__init__(self, loss, postprocessor)
assert isinstance(in_channels, int)
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample_ratio = downsample_ratio
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.out_conv = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, inputs):
outputs = self.out_conv(inputs)
return outputs |
def fix_captions_in_float_annotations(annotation_list, float_type='figure', float_child_type='figure_graphic', caption_type='figure_caption'):
ann_by_id = dict()
anns_by_cat = defaultdict(list)
ann_children_ids = defaultdict(list)
for ann in annotation_list:
ann_by_id[ann['id']] = ann
anns_by_cat[ann['category']].append(ann)
ann_children_ids[ann['parent']].append(ann['id'])
for float_ann in anns_by_cat[float_type]:
all_float_children = [ann_by_id[c] for c in ann_children_ids[float_ann['id']]]
(float_child_anns, caption_anns) = ([], [])
for float_child in all_float_children:
if (float_child['category'] == float_child_type):
float_child_anns.append(float_child)
elif (float_child['category'] == caption_type):
caption_anns.append(float_child)
merged_float_child_bboxes = []
for float_child_ann in float_child_anns:
float_child_id = float_child_ann['id']
float_child_children_ids = get_all_children_ids_with_child_dictionary(float_child_id, ann_children_ids)
float_child_bbox_anns = [ann_by_id[ann_id] for ann_id in float_child_children_ids if (ann_by_id[ann_id]['category'] == 'box')]
current_merged_float_child_bbox = get_merged_bbox_from_list_of_bbox_anns(float_child_bbox_anns)
merged_float_child_bboxes.append(current_merged_float_child_bbox)
for caption_ann in caption_anns:
caption_id = caption_ann['id']
all_caption_child_ids = get_all_children_ids_with_child_dictionary(caption_id, ann_children_ids)
caption_bbox_anns = [ann_by_id[ann_id] for ann_id in all_caption_child_ids if ((ann_by_id[ann_id]['category'] == 'box') and ('bbox' in ann_by_id[ann_id]))]
for bbox_ann in caption_bbox_anns:
if ((bbox_ann['bbox'][2] <= 2) or (bbox_ann['bbox'][3] <= 2)):
bbox_ann['delete'] = True
elif any((second_bbox_contained_in_first_bbox(bbox_ann['bbox'], merged_float_child_bbox, tolerance=2.5) for merged_float_child_bbox in merged_float_child_bboxes)):
bbox_ann['delete'] = True
annotation_list = [ann for ann in annotation_list if (ann.get('delete', False) != True)]
return annotation_list |
def _build_initializer(initializer):
initializer_oneof = initializer.WhichOneof('initializer_oneof')
if (initializer_oneof == 'truncated_normal_initializer'):
return tf.truncated_normal_initializer(mean=initializer.truncated_normal_initializer.mean, stddev=initializer.truncated_normal_initializer.stddev)
if (initializer_oneof == 'variance_scaling_initializer'):
enum_descriptor = hyperparams_pb2.VarianceScalingInitializer.DESCRIPTOR.enum_types_by_name['Mode']
mode = enum_descriptor.values_by_number[initializer.variance_scaling_initializer.mode].name
return slim.variance_scaling_initializer(factor=initializer.variance_scaling_initializer.factor, mode=mode, uniform=initializer.variance_scaling_initializer.uniform)
raise ValueError('Unknown initializer function: {}'.format(initializer_oneof)) |
class Seq2SeqSpectrogramOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
spectrogram: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None |
def compute_loss(model, device, data_loader, rnn):
model.eval()
loss = 0
scores = {}
with torch.no_grad():
for (id_list, data, target) in data_loader:
(data, target) = (data.to(device), target.to(device))
target = target.view((- 1), 1).float()
if (rnn == True):
model.hidden = model.init_hidden(data.size()[0])
output = model(data)
loss += F.binary_cross_entropy(output, target, size_average=False)
for (i, id) in enumerate(id_list):
scores[id] = output[i].data.cpu().numpy()
loss /= len(data_loader.dataset)
return (loss, scores) |
class QBatchNorm2d(nn.Module):
def __init__(self, in_channels, affine=True, training=True, eps=1e-05, momentum=0.9, track_running_stats=True):
super(QBatchNorm2d, self).__init__()
self.in_channels = in_channels
self.affine = affine
self.training = training
self.track_running_stats = track_running_stats
self.register_buffer('eye', torch.diag(torch.cat(([torch.Tensor([eps])] * 4))).unsqueeze(0))
if self.affine:
self.weight = torch.nn.Parameter(torch.zeros(4, 4, in_channels))
self.bias = torch.nn.Parameter(torch.zeros(4, in_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(4, in_channels))
self.register_buffer('running_cov', torch.zeros(in_channels, 4, 4))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_cov', None)
self.momentum = momentum
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_cov.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
init.constant_(self.weight[(0, 0)], 0.5)
init.constant_(self.weight[(1, 1)], 0.5)
init.constant_(self.weight[(2, 2)], 0.5)
init.constant_(self.weight[(3, 3)], 0.5)
def forward(self, x):
x = torch.stack(torch.chunk(x, 4, 1), 1).permute(1, 0, 2, 3, 4)
(axes, d) = ((1, *range(3, x.dim())), x.shape[0])
shape = (1, x.shape[2], *([1] * (x.dim() - 3)))
if self.training:
mean = x.mean(dim=axes)
if (self.running_mean is not None):
with torch.no_grad():
self.running_mean = ((self.momentum * self.running_mean) + ((1.0 - self.momentum) * mean))
else:
mean = self.running_mean
x = (x - mean.reshape(d, *shape))
if self.training:
perm = x.permute(2, 0, *axes).flatten(2, (- 1))
cov = (torch.matmul(perm, perm.transpose((- 1), (- 2))) / perm.shape[(- 1)])
if (self.running_cov is not None):
with torch.no_grad():
self.running_cov = ((self.momentum * self.running_cov) + ((1.0 - self.momentum) * cov))
else:
cov = self.running_cov
ell = torch.cholesky((cov + self.eye), upper=True)
soln = torch.triangular_solve(x.unsqueeze((- 1)).permute(*range(1, x.dim()), 0, (- 1)), ell.reshape(*shape, d, d))
wht = soln.solution.squeeze((- 1))
z = torch.stack(torch.unbind(wht, dim=(- 1)), dim=0)
if self.affine:
weight = self.weight.view(4, 4, *shape)
scaled = torch.stack([((((z[0] * weight[(0, 0)]) + (z[1] * weight[(0, 1)])) + (z[2] * weight[(0, 2)])) + (z[3] * weight[(0, 3)])), ((((z[0] * weight[(1, 0)]) + (z[1] * weight[(1, 1)])) + (z[2] * weight[(1, 2)])) + (z[3] * weight[(1, 3)])), ((((z[0] * weight[(2, 0)]) + (z[1] * weight[(2, 1)])) + (z[2] * weight[(2, 2)])) + (z[3] * weight[(2, 3)])), ((((z[0] * weight[(3, 0)]) + (z[1] * weight[(3, 1)])) + (z[2] * weight[(3, 2)])) + (z[3] * weight[(3, 3)]))], dim=0)
z = (scaled + self.bias.reshape(4, *shape))
z = torch.cat(torch.chunk(z, 4, 0), 2).squeeze()
return Q(z) |
class LearningAlgorithm():
def __init__(self, params):
self.params = params
self.config_file = self.params['cfg']
if (not os.path.isfile(self.config_file)):
raise ValueError('Invalid config file path')
self.cfg = myconf()
self.cfg.read(self.config_file)
self.model_name = self.cfg.get('Network', 'name')
self.dataset_name = self.cfg.get('DataFrame', 'dataset_name')
self.hostname = socket.gethostname()
self.date = datetime.datetime.now().strftime('%Y-%m-%d-%Hh%M')
self.use_cuda = self.cfg.getboolean('Training', 'use_cuda')
self.device = ('cuda' if (torch.cuda.is_available() and self.use_cuda) else 'cpu')
def build_model(self):
if (self.model_name == 'VAE'):
self.model = build_VAE(cfg=self.cfg, device=self.device)
elif (self.model_name == 'DKF'):
self.model = build_DKF(cfg=self.cfg, device=self.device)
elif (self.model_name == 'STORN'):
self.model = build_STORN(cfg=self.cfg, device=self.device)
elif (self.model_name == 'VRNN'):
self.model = build_VRNN(cfg=self.cfg, device=self.device)
elif (self.model_name == 'SRNN'):
self.model = build_SRNN(cfg=self.cfg, device=self.device)
elif (self.model_name == 'RVAE'):
self.model = build_RVAE(cfg=self.cfg, device=self.device)
elif (self.model_name == 'DSAE'):
self.model = build_DSAE(cfg=self.cfg, device=self.device)
def init_optimizer(self):
optimization = self.cfg.get('Training', 'optimization')
lr = self.cfg.getfloat('Training', 'lr')
if (optimization == 'adam'):
optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
else:
optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
return optimizer
def get_basic_info(self):
basic_info = []
basic_info.append(('HOSTNAME: ' + self.hostname))
basic_info.append(('Time: ' + self.date))
basic_info.append(('Device for training: ' + self.device))
if (self.device == 'cuda'):
basic_info.append('Cuda verion: {}'.format(torch.version.cuda))
basic_info.append('Model name: {}'.format(self.model_name))
basic_info.append(('Total params: %.2fM' % (sum((p.numel() for p in self.model.parameters())) / 1000000.0)))
return basic_info
def train(self):
self.build_model()
self.model.train()
torch.autograd.set_detect_anomaly(True)
if (not self.params['reload']):
saved_root = self.cfg.get('User', 'saved_root')
z_dim = self.cfg.getint('Network', 'z_dim')
tag = self.cfg.get('Network', 'tag')
filename = '{}_{}_{}_z_dim={}'.format(self.dataset_name, self.date, tag, z_dim)
save_dir = os.path.join(saved_root, filename)
if (not os.path.isdir(save_dir)):
os.makedirs(save_dir)
else:
tag = self.cfg.get('Network', 'tag')
save_dir = self.params['model_dir']
save_cfg = os.path.join(save_dir, 'config.ini')
shutil.copy(self.config_file, save_cfg)
log_file = os.path.join(save_dir, 'log.txt')
logger_type = self.cfg.getint('User', 'logger_type')
logger = get_logger(log_file, logger_type)
for log in self.get_basic_info():
logger.info(log)
logger.info(('In this experiment, result will be saved in: ' + save_dir))
if self.cfg.getboolean('User', 'print_model'):
for log in self.model.get_info():
logger.info(log)
optimizer = self.init_optimizer()
if (self.dataset_name == 'WSJ0'):
(train_dataloader, val_dataloader, train_num, val_num) = speech_dataset.build_dataloader(self.cfg)
elif (self.dataset_name == 'H36M'):
(train_dataloader, val_dataloader, train_num, val_num) = h36m_dataset.build_dataloader(self.cfg)
else:
logger.error('Unknown datset')
logger.info('Training samples: {}'.format(train_num))
logger.info('Validation samples: {}'.format(val_num))
epochs = self.cfg.getint('Training', 'epochs')
early_stop_patience = self.cfg.getint('Training', 'early_stop_patience')
save_frequency = self.cfg.getint('Training', 'save_frequency')
beta = self.cfg.getfloat('Training', 'beta')
kl_warm = 0
if (not self.params['reload']):
train_loss = np.zeros((epochs,))
val_loss = np.zeros((epochs,))
train_recon = np.zeros((epochs,))
train_kl = np.zeros((epochs,))
val_recon = np.zeros((epochs,))
val_kl = np.zeros((epochs,))
best_val_loss = np.inf
cpt_patience = 0
cur_best_epoch = epochs
best_state_dict = self.model.state_dict()
best_optim_dict = optimizer.state_dict()
start_epoch = (- 1)
else:
cp_file = os.path.join(save_dir, '{}_checkpoint.pt'.format(self.model_name))
checkpoint = torch.load(cp_file)
self.model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optim_state_dict'])
start_epoch = checkpoint['epoch']
loss_log = checkpoint['loss_log']
logger.info('Resuming trainning: epoch: {}'.format(start_epoch))
train_loss = np.pad(loss_log['train_loss'], (0, (epochs - start_epoch)), mode='constant', constant_values=0)
val_loss = np.pad(loss_log['val_loss'], (0, (epochs - start_epoch)), mode='constant', constant_values=0)
train_recon = np.pad(loss_log['train_recon'], (0, (epochs - start_epoch)), mode='constant', constant_values=0)
train_kl = np.pad(loss_log['train_kl'], (0, (epochs - start_epoch)), mode='constant', constant_values=0)
val_recon = np.pad(loss_log['val_recon'], (0, (epochs - start_epoch)), mode='constant', constant_values=0)
val_kl = np.pad(loss_log['val_kl'], (0, (epochs - start_epoch)), mode='constant', constant_values=0)
best_val_loss = checkpoint['best_val_loss']
cpt_patience = 0
cur_best_epoch = start_epoch
best_state_dict = self.model.state_dict()
best_optim_dict = optimizer.state_dict()
for epoch in range((start_epoch + 1), epochs):
start_time = datetime.datetime.now()
if (((epoch % 10) == 0) and (kl_warm < 1)):
kl_warm = ((epoch // 10) * 0.2)
logger.info('KL warm-up, anneal coeff: {}'.format(kl_warm))
for (_, batch_data) in enumerate(train_dataloader):
batch_data = batch_data.to(self.device)
if (self.dataset_name == 'WSJ0'):
batch_data = batch_data.permute(2, 0, 1)
recon_batch_data = torch.exp(self.model(batch_data))
loss_recon = loss_ISD(batch_data, recon_batch_data)
elif (self.dataset_name == 'H36M'):
batch_data = (batch_data.permute(1, 0, 2) / 1000)
recon_batch_data = self.model(batch_data)
loss_recon = loss_MPJPE((batch_data * 1000), (recon_batch_data * 1000))
(seq_len, bs, _) = self.model.z_mean.shape
loss_recon = (loss_recon / (seq_len * bs))
if (self.model_name == 'DSAE'):
loss_kl_z = loss_KLD(self.model.z_mean, self.model.z_logvar, self.model.z_mean_p, self.model.z_logvar_p)
loss_kl_v = loss_KLD(self.model.v_mean, self.model.v_logvar, self.model.v_mean_p, self.model.v_logvar_p)
loss_kl = (loss_kl_z + loss_kl_v)
else:
loss_kl = loss_KLD(self.model.z_mean, self.model.z_logvar, self.model.z_mean_p, self.model.z_logvar_p)
loss_kl = (((kl_warm * beta) * loss_kl) / (seq_len * bs))
loss_tot = (loss_recon + loss_kl)
optimizer.zero_grad()
loss_tot.backward()
optimizer.step()
train_loss[epoch] += (loss_tot.item() * bs)
train_recon[epoch] += (loss_recon.item() * bs)
train_kl[epoch] += (loss_kl.item() * bs)
for (_, batch_data) in enumerate(val_dataloader):
batch_data = batch_data.to(self.device)
if (self.dataset_name == 'WSJ0'):
batch_data = batch_data.permute(2, 0, 1)
recon_batch_data = torch.exp(self.model(batch_data))
loss_recon = loss_ISD(batch_data, recon_batch_data)
elif (self.dataset_name == 'H36M'):
batch_data = (batch_data.permute(1, 0, 2) / 1000)
recon_batch_data = self.model(batch_data)
loss_recon = loss_MPJPE((batch_data * 1000), (recon_batch_data * 1000))
(seq_len, bs, _) = self.model.z_mean.shape
loss_recon = (loss_recon / (seq_len * bs))
if (self.model_name == 'DSAE'):
loss_kl_z = loss_KLD(self.model.z_mean, self.model.z_logvar, self.model.z_mean_p, self.model.z_logvar_p)
loss_kl_v = loss_KLD(self.model.v_mean, self.model.v_logvar, self.model.v_mean_p, self.model.v_logvar_p)
loss_kl = (loss_kl_z + loss_kl_v)
else:
loss_kl = loss_KLD(self.model.z_mean, self.model.z_logvar, self.model.z_mean_p, self.model.z_logvar_p)
loss_kl = (((kl_warm * beta) * loss_kl) / (seq_len * bs))
loss_tot = (loss_recon + loss_kl)
val_loss[epoch] += (loss_tot.item() * bs)
val_recon[epoch] += (loss_recon.item() * bs)
val_kl[epoch] += (loss_kl.item() * bs)
train_loss[epoch] = (train_loss[epoch] / train_num)
val_loss[epoch] = (val_loss[epoch] / val_num)
train_recon[epoch] = (train_recon[epoch] / train_num)
train_kl[epoch] = (train_kl[epoch] / train_num)
val_recon[epoch] = (val_recon[epoch] / val_num)
val_kl[epoch] = (val_kl[epoch] / val_num)
if ((val_loss[epoch] < best_val_loss) or (kl_warm < 1)):
best_val_loss = val_loss[epoch]
cpt_patience = 0
best_state_dict = self.model.state_dict()
best_optim_dict = optimizer.state_dict()
cur_best_epoch = epoch
else:
cpt_patience += 1
end_time = datetime.datetime.now()
interval = ((end_time - start_time).seconds / 60)
logger.info('Epoch: {} training time {:.2f}m'.format(epoch, interval))
logger.info('Train => tot: {:.2f} recon {:.2f} KL {:.2f} Val => tot: {:.2f} recon {:.2f} KL {:.2f}'.format(train_loss[epoch], train_recon[epoch], train_kl[epoch], val_loss[epoch], val_recon[epoch], val_kl[epoch]))
if ((cpt_patience == early_stop_patience) and (kl_warm >= 1.0)):
logger.info('Early stop patience achieved')
break
if ((epoch % save_frequency) == 0):
loss_log = {'train_loss': train_loss[:(cur_best_epoch + 1)], 'val_loss': val_loss[:(cur_best_epoch + 1)], 'train_recon': train_recon[:(cur_best_epoch + 1)], 'train_kl': train_kl[:(cur_best_epoch + 1)], 'val_recon': val_recon[:(cur_best_epoch + 1)], 'val_kl': val_kl[:(cur_best_epoch + 1)]}
save_file = os.path.join(save_dir, (self.model_name + '_checkpoint.pt'))
torch.save({'epoch': cur_best_epoch, 'best_val_loss': best_val_loss, 'cpt_patience': cpt_patience, 'model_state_dict': best_state_dict, 'optim_state_dict': best_optim_dict, 'loss_log': loss_log}, save_file)
logger.info('Epoch: {} ===> checkpoint stored with current best epoch: {}'.format(epoch, cur_best_epoch))
save_file = os.path.join(save_dir, (((self.model_name + '_final_epoch') + str(cur_best_epoch)) + '.pt'))
torch.save(best_state_dict, save_file)
train_loss = train_loss[:(epoch + 1)]
val_loss = val_loss[:(epoch + 1)]
train_recon = train_recon[:(epoch + 1)]
train_kl = train_kl[:(epoch + 1)]
val_recon = val_recon[:(epoch + 1)]
val_kl = val_kl[:(epoch + 1)]
loss_file = os.path.join(save_dir, 'loss_model.pckl')
with open(loss_file, 'wb') as f:
pickle.dump([train_loss, val_loss, train_recon, train_kl, val_recon, val_kl], f)
plt.clf()
fig = plt.figure(figsize=(8, 6))
plt.rcParams['font.size'] = 12
plt.plot(train_loss, label='training loss')
plt.plot(val_loss, label='validation loss')
plt.legend(fontsize=16, title=self.model_name, title_fontsize=20)
plt.xlabel('epochs', fontdict={'size': 16})
plt.ylabel('loss', fontdict={'size': 16})
fig_file = os.path.join(save_dir, 'loss_{}.png'.format(tag))
plt.savefig(fig_file)
plt.clf()
fig = plt.figure(figsize=(8, 6))
plt.rcParams['font.size'] = 12
plt.plot(train_recon, label='Training')
plt.plot(val_recon, label='Validation')
plt.legend(fontsize=16, title='{}: Recon. Loss'.format(self.model_name), title_fontsize=20)
plt.xlabel('epochs', fontdict={'size': 16})
plt.ylabel('loss', fontdict={'size': 16})
fig_file = os.path.join(save_dir, 'loss_recon_{}.png'.format(tag))
plt.savefig(fig_file)
plt.clf()
fig = plt.figure(figsize=(8, 6))
plt.rcParams['font.size'] = 12
plt.plot(train_kl, label='Training')
plt.plot(val_kl, label='Validation')
plt.legend(fontsize=16, title='{}: KL Divergence'.format(self.model_name), title_fontsize=20)
plt.xlabel('epochs', fontdict={'size': 16})
plt.ylabel('loss', fontdict={'size': 16})
fig_file = os.path.join(save_dir, 'loss_KLD_{}.png'.format(tag))
plt.savefig(fig_file) |
class LayoutLMConfig(BertConfig):
model_type = 'layoutlm'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, gradient_checkpointing=False, max_2d_position_embeddings=1024, **kwargs):
super().__init__(vocab_size=vocab_size, hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, hidden_act=hidden_act, hidden_dropout_prob=hidden_dropout_prob, attention_probs_dropout_prob=attention_probs_dropout_prob, max_position_embeddings=max_position_embeddings, type_vocab_size=type_vocab_size, initializer_range=initializer_range, layer_norm_eps=layer_norm_eps, pad_token_id=pad_token_id, gradient_checkpointing=gradient_checkpointing, **kwargs)
self.max_2d_position_embeddings = max_2d_position_embeddings |
def gptneox_context_default_params() -> gptneox_context_params:
return _lib.gptneox_context_default_params() |
(version='2.0')
class Postprocess(object):
def __init__(self, postprocess_cls, name='user_postprocess', **kwargs):
self.postprocess_cls = postprocess_cls
self.name = name
self.kwargs = kwargs |
def main(args, root='root'):
print('If this is your first time running this program:\n 1. Make an account at archive.org\n 2. On command line, run:\n pip install internetarchive\n ia configure')
print(('User supplied arguments:\n' + str(args)))
path = os.path.expanduser(args['path'])
if ((path[(- 1)] != '/') or (not os.path.isdir(path))):
print((('ERROR: We only accept directories in this script so there must be a trailing slash /. Try:\n python costar_block_stacking_ia_upload.py --path ' + str(path)) + '/\nExiting.'))
return
debug = True
if args['upload']:
debug = False
print('\n\nWARNING: ATTEMPTING A REAL UPLOAD TO THE INTERNET ARCHIVE. THIS IS NOT A TEST.\n\nWe are uploading the data to the test_collection, which will only store files for 30 days.\nWhen the uplod is complete, email , and they will move your item to a permanent collection.\nSee for details.\n')
else:
print('Performing test run.')
print(('debug: ' + str(debug)))
txt_path = os.path.join(path, args['exclude_files_txt'])
if os.path.isfile(txt_path):
glob_list = np.genfromtxt(txt_path, dtype='str', delimiter='\n')
print('Imported {} glob rules from {}'.format(len(glob_list), txt_path))
else:
with open(txt_path, 'w') as f:
print('Created exclude file txt at {}'.format(txt_path))
glob_list = []
excluded_list = []
for s in glob_list:
excluded_list += glob(s)
print('Selected {} files to be excluded'.format(len(excluded_list)))
if (not args['from_csv']):
filenames = []
include_ext = args['include_ext']
print('Selecting files with extensions: \n{}'.format(str(include_ext)))
for (root, _, files) in os.walk(path):
for filename in files:
if ((args['files_hash_csv'] in filename) or (filename in excluded_list)):
continue
if any([(ext in filename) for ext in include_ext]):
rel_dir = os.path.relpath(root, path)
if (rel_dir == '.'):
rel_dir = ''
filenames.append(os.path.join(rel_dir, filename))
if (len(filenames) == 0):
raise RuntimeError('No matching files found! Are you sure the path is correct? {}'.format(path))
print('Counted {} matching files.'.format(len(filenames)))
else:
txt_path = os.path.join(path, args['from_csv'])
if (not os.path.isfile(txt_path)):
raise ValueError('Attempted to read in filenames from a csv file, but the input file is not a file:\n{}'.format(txt_path))
filenames = np.genfromtxt(txt_path, dtype='str', delimiter=', ')
for filename in filenames:
file_path = os.path.join(path, filename)
if (not os.path.isfile(file_path)):
raise ValueError('A filename read from CSV file is not a valid file:\n{}'.format(file_path))
print('Read {} files from {}'.format(len(filenames), txt_path))
list_of_csv = glob(('*' + args['files_hash_csv']))
if list_of_csv:
csv_path = max(list_of_csv, key=os.path.getctime)
print('Loading existing filename hash CSV file: \n{}'.format(csv_path))
file_hash_listing = np.genfromtxt(csv_path, dtype='str', delimiter=', ')
else:
print('No pre-existing filename hash CSV file found. Creating a new filename hash CSV file.')
file_hash_listing = np.column_stack([filenames, (['not_uploaded_yet'] * len(filenames))])
csv_path = timeStamped(args['files_hash_csv'])
list_of_csv += [csv_path]
if (file_hash_listing.shape[0] < len(filenames)):
diff = (len(filenames) - file_hash_listing.shape[0])
print('Adding {} new files to the CSV file...'.format(diff))
for filename in filenames:
if (filename in file_hash_listing):
continue
print('Added {}'.format(filename))
file_hash_listing = np.append(file_hash_listing, [[filename, 'not_uploaded_yet']], axis=0)
elif (file_hash_listing.shape[0] > len(filenames)):
print('File count in CSV({}) is greater than actual file count({})!'.format(file_hash_listing.shape[0], len(filenames)))
print('Creating a new filename hash CSV file: \n{}'.format(csv_path))
file_hash_listing = np.column_stack([filenames, (['not_uploaded_yet'] * len(filenames))])
save_file_hash_as_csv(csv_path, file_hash_listing)
item = internetarchive.get_item('johns_hopkins_costar_dataset', debug=debug)
if args['verify']:
keys_and_md5 = {}
for server_file in item.files:
keys_and_md5[server_file['name']] = server_file['md5']
mismatch_files = []
for (key, md5) in file_hash_listing:
try:
server_md5 = keys_and_md5[key]
with open(os.path.join(path, key), 'rb') as f:
current_hash = internetarchive.utils.get_md5(f)
if (not (md5 == server_md5)):
print('Local md5 for {} does not match server md5!'.format(key))
mismatch_files += key
elif (not (md5 == current_hash)):
print('Local md5 for {} has been changed!'.format(key))
mismatch_files += key
except KeyError:
print('{} is not on the server!'.format(key))
mismatch_files += key
print('Verify result: {} files not on the server or hash does not match'.format(len(mismatch_files)))
md = dict(collection='test_collection', title='The Johns Hopkins CoSTAR Robotics Dataset', version='v0.4', contributor='Andrew Hundt, Varun Jain, Chris Paxton, Chunting Jiao, Chia-Hung Lin, and Gregory D. Hager', creator='Andrew Hundt <>', credits='\n Andrew Hundt, Varun Jain, Chris Paxton, Chunting Jiao, Chia-Hung Lin, and Gregory D. Hager<br>\n The Johns Hopkins University<br>\n <a href=" Interaction and Robotics Laboratory</a><br>\n This material is based upon work supported by the National Science Foundation under NRI Grant Award No. 1637949.\n ', date='2018-10-19', description='\n Stack blocks like a champion! The CoSTAR Block Stacking Dataset includes a\n real robot trying to stack colored children\'s blocks more than 10,000 times\n in a scene with challenging lighting and a movable bin obstacle which must\n be avoided. This dataset is especially well suited to the benchmarking and\n comparison of deep learning algorithms.<br>\n Visit the <a href=\' Dataset Website</a> for more info.<br>\n <b>If you use the dataset, please cite our paper introducing it:</b>\n <a href=\' Frankenstein\'s Creature to Stack: HyperTree Architecture Search</a>\n\n Andrew Hundt, Varun Jain, Chris Paxton, Chunting Jiao, Chia-Hung Lin, and Gregory D. Hager<br>\n The Johns Hopkins University<br>\n <a href=" Interaction and Robotics Laboratory</a><br>\n This material is based upon work supported by the National Science Foundation under NRI Grant Award No. 1637949.\n ', license=' mediatype='data', noindex='True')
print('Uploading {} files in the following directory:\n{}'.format(len(filenames), str(path)))
(success_count, failed_count, skip_count, changed_count) = (0, 0, 0, 0)
changed_files = []
hash_csv_idx = (- 1)
results_url = []
results_path_url = []
pb = tqdm(range(len(filenames)))
for i in pb:
(file_path, md5_hash) = file_hash_listing[i]
pb.write('Uploading {}'.format(file_path))
if (args['files_hash_csv'] in file_path):
hash_csv_idx = i
continue
if ((not args['verify']) and (md5_hash != 'not_uploaded_yet')):
with open(os.path.join(path, file_path), 'rb') as f:
current_hash = internetarchive.utils.get_md5(f)
if (current_hash == md5_hash):
skip_count += 1
pb.write('Skipping {} because it has been uploaded'.format(file_path))
continue
elif args['replace_changed']:
pb.write('Reuploading {} because the md5 hash has been changed locally'.format(file_path))
else:
pb.write('The md5 hash has been changed locally for {}'.format(file_path))
changed_count += 1
changed_files += [file_path]
if (args['verify'] and (file_path not in mismatch_files)):
skip_count += 1
pb.write('Skipping {} because it has been verified to be on the sever'.format(file_path))
continue
resp = item.upload_file(os.path.join(path, file_path), key=file_path, metadata=md, verify=True, checksum=True, retries=10, retries_sleep=30, queue_derive=False, debug=debug)
if debug:
pb.write('[DEBUG] item key = {}'.format(file_path))
if resp.url:
results_url.append(resp.url)
results_path_url.append(resp.path_url)
else:
pb.write('{} is already on the server.'.format(file_path))
with open(os.path.join(path, file_path), 'rb') as f:
md5_hash = internetarchive.utils.get_md5(f)
success_count += 1
elif (resp.status_code is None):
if (not args['verify']):
pb.write('{} is already on the server.'.format(file_path))
elif (args['verify'] and (file_path in mismatch_files)):
raise RuntimeError('Empty response, but {} is changed or was not on server!'.format(file_path))
with open(os.path.join(path, file_path), 'rb') as f:
md5_hash = internetarchive.utils.get_md5(f)
skip_count += 1
elif (resp.status_code != 200):
pb.write('Upload failed for {}, status code = {}'.format(file_path, resp.status_code))
failed_count += 1
else:
results_url.append(resp.request.url)
results_path_url.append(resp.request.path_url)
with open(os.path.join(path, file_path), 'rb') as f:
md5_hash = internetarchive.utils.get_md5(f)
success_count += 1
file_hash_listing[i] = np.array([file_path, md5_hash])
if (((success_count + 1) % 10) == 0):
pb.write(timeStamped(('[%d] Check point, saving csv' % i)))
save_file_hash_as_csv(csv_path, file_hash_listing)
print('Uploaded {} files. Skipped {} files. {} files failed to upload.'.format(success_count, skip_count, failed_count))
if ((not args['replace_changed']) and (changed_count is not 0)):
print('Local hash changed for {} files. Use --replace_changed to upload these files'.format(changed_count))
changed_hash_file_txt = os.path.join(path, 'changed_hash_files.txt')
with open(changed_hash_file_txt, 'w') as f:
for file_path in changed_files:
f.write('{}\n'.format(file_path))
print('Changed files have been saved as {}'.format(changed_hash_file_txt))
print('Total file count {}, expected file count {}'.format((((success_count + skip_count) + failed_count) + changed_count), len(filenames)))
print('Saving file hash CSV file for uploading:\n{}'.format(csv_path))
with open(csv_path, 'rb') as f:
md5_hash = internetarchive.utils.get_md5(f)
file_hash_listing[hash_csv_idx][1] = md5_hash
save_file_hash_as_csv(csv_path, file_hash_listing)
resp = item.upload_file(csv_path, key=args['files_hash_csv'], metadata=md, verify=True, checksum=True, retries=10, retries_sleep=30, queue_derive=False, debug=debug)
if debug:
print('[DEBUG] item key = {}'.format(args['files_hash_csv']))
elif (resp.status_code != 200):
print('Upload failed for {}, status code = {}'.format(csv_path, resp.status_code))
failed_count += 1
else:
results_url.append(resp.request.url)
results_path_url.append(resp.request.path_url)
print('Upload finished, printing the results:')
server_urls = [url for url in results_url]
local_urls = [path_url for path_url in results_path_url]
if debug:
debug_str = '_debug'
else:
debug_str = ''
prefix = timeStamped(('internet_archive_uploaded' + debug_str))
server_txt = (prefix + '_server_urls.txt')
local_txt = (prefix + '_local_path_urls.txt')
with open(server_txt, mode='w') as set_file:
set_file.write('\n'.join(server_urls))
with open(local_txt, mode='w') as set_file:
set_file.write('\n'.join(local_urls))
print(('-' * 80))
print(('local_urls:' + str(local_urls)))
print(('-' * 80))
print(('server_urls:' + str(server_urls)))
print(('-' * 80))
print(((('internet archive upload complete! file lists are in: \n' + str(server_txt)) + '\n') + str(local_txt)))
if (failed_count != 0):
print('{} files failed to upload! Re-run the script to try again'.format(failed_count)) |
class PixelMultiplexer(nn.Module):
encoder_cls: Type[nn.Module]
network_cls: Type[nn.Module]
latent_dim: int
stop_gradient: bool = False
pixel_keys: Tuple[(str, ...)] = ('pixels',)
depth_keys: Tuple[(str, ...)] = ()
def __call__(self, observations: Union[(FrozenDict, Dict)], actions: Optional[jnp.ndarray]=None, training: bool=False) -> jnp.ndarray:
observations = FrozenDict(observations)
if (len(self.depth_keys) == 0):
depth_keys = ([None] * len(self.pixel_keys))
else:
depth_keys = self.depth_keys
xs = []
for (i, (pixel_key, depth_key)) in enumerate(zip(self.pixel_keys, depth_keys)):
x = (observations[pixel_key].astype(jnp.float32) / 255.0)
if (depth_key is not None):
x = jnp.concatenate([x, observations[depth_key]], axis=(- 2))
x = jnp.reshape(x, (*x.shape[:(- 2)], (- 1)))
x = self.encoder_cls(name=f'encoder_{i}')(x)
if self.stop_gradient:
x = jax.lax.stop_gradient(x)
x = nn.Dense(self.latent_dim, kernel_init=default_init())(x)
x = nn.LayerNorm()(x)
x = nn.tanh(x)
xs.append(x)
x = jnp.concatenate(xs, axis=(- 1))
if ('state' in observations):
y = nn.Dense(self.latent_dim, kernel_init=default_init())(observations['state'])
y = nn.LayerNorm()(y)
y = nn.tanh(y)
x = jnp.concatenate([x, y], axis=(- 1))
if (actions is None):
return self.network_cls()(x, training)
else:
return self.network_cls()(x, actions, training) |
def _mcfg(**kwargs):
cfg = dict(se_ratio=0.0, bottle_ratio=1.0, stem_width=32)
cfg.update(**kwargs)
return cfg |
_materialize('torch')
class Linear(UnaryOpBase):
in_dtypes = [(DType.float32,)]
out_dtypes = [(DType.float32,)]
def __init__(self, ifeat: Union[(int, z3.ExprRef)], ofeat: Union[(int, z3.ExprRef)]):
super().__init__()
self.ifeat = ifeat
self.ofeat = ofeat
self.inp_ranks = [rank_from(1)]
self.out_ranks = [rank_from(1)]
def type_transfer(self, input_shapes: List[AbsTensor]) -> List[AbsTensor]:
assert (len(input_shapes) == 1), 'Linear only takes one input, but got {}'.format(len(input_shapes))
return [AbsTensor(shape=[*input_shapes[0].shape[:(- 1)], self.ofeat], dtype=DType.float32)]
def requires(self, input_shapes: List[AbsTensor]) -> List[z3.ExprRef]:
ConstraintCheck.true((input_shapes[0].ndims >= 1))
return [nnsmith_ge(self.ifeat, 1), nnsmith_ge(self.ofeat, 1), nnsmith_eq(input_shapes[0].shape[(- 1)], self.ifeat)]
def deduct_inp_ranks_and_dtype(self, out_abs_tensor: List[AbsTensor]) -> List[Tuple[(int, DType)]]:
return [(out_abs_tensor[0].ndims, DType.float32)] |
class Task():
def __init__(self):
self.cond = Condition()
self.is_complete = False
self.response = None
def done(self):
logger.info('task is done')
self.cond.acquire()
self.cond.notify()
self.cond.release()
def wait_for_done(self):
self.cond.acquire()
self.cond.wait_for(predicate=self.get_task_status, timeout=None)
self.cond.release()
def set_prompt(self, prompt):
self.prompt = prompt
def set_steps(self, num_inference_steps):
self.num_inference_steps = num_inference_steps
def set_scale(self, guidance_scale):
self.guidance_scale = guidance_scale
def set_seed(self, seed):
self.seed = seed
def set_source_image(self, source_img):
self.source_img = source_img
def set_strength(self, strength):
self.strength = strength
def set_task_type(self, task_type):
self.task_type = task_type
def set_start_time(self, time):
self.start_time = time
def get_task_status(self):
return self.is_complete |
def check_if_activity_exists_and_time_less_than(group, activity):
relevant_activity_idxs = np.where((group[activity_col] == activity))[0]
if (len(relevant_activity_idxs) > 0):
idx = relevant_activity_idxs[0]
if (group['timesincelastevent'].iloc[idx] <= (28 * 1440)):
group[label_col] = pos_label
return group[:idx]
else:
group[label_col] = neg_label
return group[:idx]
else:
group[label_col] = neg_label
return group |
def collate_molgraphs(data):
if (len(data[0]) == 3):
(smiles, graphs, labels) = map(list, zip(*data))
else:
(smiles, graphs, labels, masks) = map(list, zip(*data))
bg = dgl.batch(graphs)
bg.set_n_initializer(dgl.init.zero_initializer)
bg.set_e_initializer(dgl.init.zero_initializer)
labels = torch.stack(labels, dim=0)
if (len(data[0]) == 3):
masks = torch.ones(labels.shape)
else:
masks = torch.stack(masks, dim=0)
return (smiles, bg, labels, masks) |
def strictly_positive(val):
val = float(val)
if (not (val > 0)):
raise argparse.ArgumentTypeError('Should be strictly positive.')
return val |
def test_link_func_monoclassification():
predictions = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
with pytest.raises(ValueError):
link_func(predictions, 'monoclassification') |
def load_checkpoint(args, model, optimizer=None, verbose=True):
checkpoint = torch.load(args.resume)
start_epoch = 0
best_acc = 0
if ('epoch' in checkpoint):
start_epoch = checkpoint['epoch']
if ('best_acc' in checkpoint):
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'], False)
if ((optimizer is not None) and ('optimizer' in checkpoint)):
optimizer.load_state_dict(checkpoint['optimizer'])
for state in optimizer.state.values():
for (k, v) in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(args.device)
if verbose:
print("=> loading checkpoint '{}' (epoch {})".format(args.resume, start_epoch))
return (model, optimizer, best_acc, start_epoch) |
def get_constellation(node1, node2):
constellation1 = {'permutation1': ((node1[0] > node2[0]) and (node1[1] < node2[1])), 'permutation2': ((node1[0] < node2[0]) and (node1[1] > node2[1]))}
if (constellation1['permutation1'] or constellation1['permutation2']):
return 1
else:
return 2 |
class CNN(ModelBase):
def add_config(cfgparser):
super(CNN, CNN).add_config(cfgparser)
cfgparser.add_argument('--n_d', type=int, help='hidden dimension (for CNN: #output channel)')
cfgparser.add_argument('--activation', '--act', type=str, help='activation func')
cfgparser.add_argument('--dropout', type=float, help='dropout prob')
cfgparser.add_argument('--num_layers', type=int, help='number of non-linear layers')
cfgparser.add_argument('--kernel_width', type=str, help='width of kernel')
def __init__(self, embedding_layer, configs):
super(CNN, self).__init__(configs)
self.embedding_layer = embedding_layer
self.embedding = embedding_layer.embedding
self.n_e = embedding_layer.n_d
self.n_d = (configs.n_d or 300)
self.activation = (configs.activation or 'tanh')
self.dropout = (configs.dropout or 0.0)
self.num_layers = (configs.num_layers or 0)
self.kernel_widths = map(int, configs.kernel_width.split(','))
self.use_cuda = configs.cuda
self.dropout_op = nn.Dropout(self.dropout)
Ci = 1
Co = self.n_d
kernel_Ws = self.kernel_widths
D = self.n_e
padding_Ws = map((lambda k: int(((k - 1) / 2.0))), kernel_Ws)
padding_H = 0
convs = []
for (k, w) in zip(kernel_Ws, padding_Ws):
convs.append(nn.Conv2d(Ci, Co, (k, D), padding=(w, padding_H)))
self.convs = nn.ModuleList(convs)
activation_module = get_activation_module(self.activation)
self.seq = seq = nn.Sequential()
self.n_out = (len(self.convs) * self.n_d)
for i in range(self.num_layers):
seq.add_module('linear-{}'.format(i), nn.Linear(self.n_out, self.n_out))
seq.add_module('activation-{}'.format(i), activation_module())
if (self.dropout > 0):
seq.add_module('dropout-{}'.format(i), nn.Dropout(p=configs.dropout))
for layer in seq:
try:
nn.init.xavier_normal(layer.weight)
nn.init.constant(layer.bias, 0.1)
except:
pass
self.build_output_op()
def forward(self, batch):
emb = self.embedding(batch)
emb = emb.transpose(0, 1)
assert (emb.dim() == 3)
if (self.dropout > 0):
emb = self.dropout_op(emb)
emb = emb.unsqueeze(1)
outputs = [F.relu(conv(emb)).squeeze(3) for conv in self.convs]
output = torch.cat(outputs, 1)
output = F.max_pool1d(output, output.size(2)).squeeze(2)
if (self.num_layers > 0):
output = self.seq(output)
return output |
def build_model_from_cfg(config_path, checkpoint_path):
from mmdet.models import build_detector
cfg = mmcv.Config.fromfile(config_path)
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
cfg.model.pretrained = None
cfg.data.test.test_mode = True
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
load_checkpoint(model, checkpoint_path, map_location='cpu')
model.cpu().eval()
return model |
class Text2TextGenerationPipelineTests(MonoInputPipelineCommonMixin, unittest.TestCase):
pipeline_task = 'text2text-generation'
small_models = ['patrickvonplaten/t5-tiny-random']
large_models = []
invalid_inputs = [4, '<mask>']
mandatory_keys = ['generated_text'] |
class vgg16_oicr(_OICR):
def __init__(self, classes, pretrained=False, class_agnostic=False, summary=None):
self.model_path = 'data/pretrained_model/vgg16_caffe.pth'
self.dout_base_model = 512
self.pretrained = pretrained
self.class_agnostic = class_agnostic
_OICR.__init__(self, classes, class_agnostic, tb=summary)
def _init_modules(self):
vgg = models.vgg16()
if self.pretrained:
print(('Loading pretrained weights from %s' % self.model_path))
state_dict = torch.load(self.model_path)
vgg.load_state_dict({k: v for (k, v) in state_dict.items() if (k in vgg.state_dict())})
vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:(- 1)])
self.OICR_base = nn.Sequential(*list(vgg.features._modules.values())[:(- 1)])
for layer in range(10):
for p in self.OICR_base[layer].parameters():
p.requires_grad = False
for i in range(24, len(self.OICR_base)):
self.OICR_base[(i - 1)] = self.OICR_base[i]
self.OICR_base = self.OICR_base[:(- 1)]
for i in range(3):
self.OICR_base[(23 + (2 * i))].dilation = (2, 2)
self.OICR_base[(23 + (2 * i))].padding = (2, 2)
self.OICR_top = vgg.classifier
self.midn_score0 = nn.Linear(4096, self.n_classes)
self.midn_score1 = nn.Linear(4096, self.n_classes)
self.ic_score = nn.Linear(4096, (self.n_classes + 1))
self.ic_score1 = nn.Linear(4096, (self.n_classes + 1))
self.ic_score2 = nn.Linear(4096, (self.n_classes + 1))
self.groups = self.get_parameter_groups()
def _head_to_tail(self, pool5):
pool5_flat = pool5.view(pool5.size(0), (- 1))
fc7 = self.OICR_top(pool5_flat)
return fc7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.