code stringlengths 101 5.91M |
|---|
class DataLoader():
def __init__(self, module_name, train_bs, eval_bs, device, log):
self.module_name = module_name
split_chars = (lambda x: list(x))
source = Field(tokenize=split_chars, init_token='<sos>', eos_token='<eos>', batch_first=True)
target = Field(tokenize=split_chars, init_token='<sos>', eos_token='<eos>', batch_first=True)
log('Loading FULL datasets ...')
folder = os.path.join(DATASET_TARGET_DIR, module_name)
(train_dataset, eval_dataset, _) = TranslationDataset.splits(path=folder, root=folder, exts=(INPUTS_FILE_ENDING, TARGETS_FILE_ENDING), fields=(source, target), train=TRAIN_FILE_NAME, validation=EVAL_FILE_NAME, test=EVAL_FILE_NAME)
log('Building vocab ...')
source.build_vocab(train_dataset)
target.vocab = source.vocab
log('Creating iterators ...')
train_iterator = Iterator(dataset=train_dataset, batch_size=train_bs, train=True, repeat=True, shuffle=True, device=device)
eval_iterator = Iterator(dataset=eval_dataset, batch_size=eval_bs, train=False, repeat=False, shuffle=False, device=device)
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.train_iterator = train_iterator
self.eval_iterator = eval_iterator
self.source = source
self.target = target |
class TestOurQueue(unittest.TestCase):
def test_simple(self):
q = OurQueue()
q.push(0)
q.push(((0.8 * 3600) * 24))
q.push(((5 * 3600) * 24))
q.push(((40 * 3600) * 24))
self.assertEqual(q.get_counters(((40 * 3600) * 24)), [4, 1, 1, 1, 1])
def test_complex(self):
q = OurQueue()
q.push(0)
q.push(10)
q.push(3599)
q.push(3600)
q.push(3601)
q.push((3600 * 24))
q.push(((3600 * 24) + 1))
q.push(((3600 * 24) * 7))
q.push((((3600 * 24) * 7) + 1))
q.push((((3600 * 24) * 7) * 30))
q.push(((((3600 * 24) * 7) * 30) + 1))
self.assertEqual(q.get_counters(((((3600 * 24) * 7) * 30) + 1)), [11, 2, 2, 2, 2]) |
def download_from_google_drive(file_id, output_dir):
url = (' % file_id)
output = os.path.join(output_dir, 'tmp.tar.gz')
gdown.download(url, output, quiet=False)
file = tarfile.open(output, 'r:gz')
file.extractall(output_dir)
file.close()
os.remove(output)
target_dir = glob.glob(('%s/*' % output_dir))[0]
files_to_move = glob.glob(('%s/*' % target_dir))
for f in files_to_move:
shutil.move(f, output_dir)
os.rmdir(target_dir)
print() |
def glue_compute_metrics(task_name, preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_sklearn(glue_compute_metrics)
assert (len(preds) == len(labels)), f'Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}'
if (task_name == 'cola'):
return {'mcc': matthews_corrcoef(labels, preds)}
elif (task_name == 'sst-2'):
return {'acc': simple_accuracy(preds, labels)}
elif (task_name == 'mrpc'):
return acc_and_f1(preds, labels)
elif (task_name == 'sts-b'):
return pearson_and_spearman(preds, labels)
elif (task_name == 'qqp'):
return acc_and_f1(preds, labels)
elif (task_name == 'mnli'):
return {'mnli/acc': simple_accuracy(preds, labels)}
elif (task_name == 'mnli-mm'):
return {'mnli-mm/acc': simple_accuracy(preds, labels)}
elif (task_name == 'qnli'):
return {'acc': simple_accuracy(preds, labels)}
elif (task_name == 'rte'):
return {'acc': simple_accuracy(preds, labels)}
elif (task_name == 'wnli'):
return {'acc': simple_accuracy(preds, labels)}
elif (task_name == 'hans'):
return {'acc': simple_accuracy(preds, labels)}
else:
raise KeyError(task_name) |
class Gowalla(BaseData):
def __init__(self, data_root: Optional[str]=None) -> None:
super().__init__('gowalla', data_root)
self._content = {'num_users': 29858, 'num_items': 40981, 'num_interactions': 1027370, 'train_adj_list': {'upon': [{'filename': 'train.txt', 'md5': '5eec1eb2edb8dd648377d348b8e136cf'}], 'loader': partial(load_from_txt, dtype='int', sep=' ')}, 'test_adj_list': {'upon': [{'filename': 'test.txt', 'md5': 'c04e2c4bcd2389f53ed'}], 'loader': partial(load_from_txt, dtype='int', sep=' ')}} |
def basic_blocks(dim, index, layers, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU, norm_layer=GroupNorm, drop_rate=0.0, drop_path_rate=0.0, use_layer_scale=True, layer_scale_init_value=1e-05):
blocks = []
for block_idx in range(layers[index]):
block_dpr = ((drop_path_rate * (block_idx + sum(layers[:index]))) / (sum(layers) - 1))
blocks.append(PoolFormerBlock(dim, pool_size=pool_size, mlp_ratio=mlp_ratio, act_layer=act_layer, norm_layer=norm_layer, drop=drop_rate, drop_path=block_dpr, use_layer_scale=use_layer_scale, layer_scale_init_value=layer_scale_init_value))
blocks = nn.Sequential(*blocks)
return blocks |
def slice_data(START, END):
data = load_dataset_foreign(data_name='yelp')
data_pos = data[(data['label'] == 1)].reset_index(drop=True)
data_neg = data[(data['label'] == 0)].reset_index(drop=True)
train = pd.concat([data_pos[START:END], data_neg[START:END]]).reset_index(drop=True)
return train |
('/stylize/', methods=['POST'])
def stylize():
inputs = json.loads(request.data)
session_token = inputs['session_token']
objects = inputs['objects']
object_index = inputs['object_index']
option_index = inputs['option_index']
preview = None
if ('preview' in inputs):
preview = inputs['preview']
backdrop = (None if ('backdrop' not in inputs) else inputs['backdrop'])
guidance_scale = (None if ('guidance_scale' not in inputs) else inputs['guidance_scale'])
if (object_index >= len(objects)):
return (f'object index {object_index} out of range for object list of length {len(objects)}', 500)
if (option_index >= len(objects[object_index]['results'])):
return (f'option index {option_index} out of range for option list of length {len(objects[object_index])}', 500)
query = objects[object_index]['query']
shape_path = objects[object_index]['results'][option_index]['model']
shape_info = objects[object_index]['results'][option_index]
input_event = RetrievalSelection(session_token, selection={'query': query, 'shape_path': shape_path, 'shape_info': shape_info, 'object_index': object_index, 'option_index': option_index})
input_event_file = input_event.save_to_event_file(event_file(session_token))
output_event = TexturedObject(session_token).tick()
if shape_path.endswith('.glb'):
logger.info(f'Converting {shape_path}...')
shape_path = glbfile2objfile(shape_path, temp_file(cfg.general.cache_dir, extension='.obj'), transform=(lambda mesh: global_rotation(recenter(resize_to_unit_cube(mesh), center_fn=get_center_bbox), rotation_matrix=np.array([[1, 0, 0], [0, 0, 1], [0, (- 1), 0]]))))
logger.info(f'Done! Saved to {shape_path}.')
object_id = str(ObjectId())
query_cmd_string = (('"' + query) + '"')
cmd = ['python', 'texturize/TexturingInterface.py', f'+texturing.query_instance={query_cmd_string}', f'+texturing.shape_path_instance={str(shape_path)}', f'+texturing.backdrop_instance={str(backdrop)}', f'+texturing.guidance_scale_instance={str(guidance_scale)}', f'+texturing.object_id_instance={str(object_id)}']
try:
subprocess.run(cmd, timeout=cfg.texturing.timeout)
except subprocess.TimeoutExpired:
logger.info('Texturing of {} expired! Moving on.')
return ('Subprocess call timed out.', 500)
directory_path = os.path.join(cfg.server.mesh_download_root, object_id)
mesh_obj = os.path.join(directory_path, 'mesh', 'mesh.obj')
mesh_mtl = os.path.join(directory_path, 'mesh', 'mesh.mtl')
mesh_alb = os.path.join(directory_path, 'mesh', 'albedo.png')
mesh_output = {'top_directory': directory_path, 'obj': mesh_obj, 'mtl': mesh_mtl, 'albedo': mesh_alb}
if cfg.server.bucket_mesh:
logger.info('Beginning upload procedure to S3 buckets!')
output_id = str(ObjectId())
shutil.make_archive(os.path.join(cfg.general.cache_dir, output_id), 'zip', os.path.join(directory_path, 'mesh'))
logger.info(f'Compressed {directory_path}/mesh.')
download_url = buckets.upload_file(cfg.server.mesh_download_aws_bucket, os.path.join(cfg.general.cache_dir, f'{output_id}.zip'), f'{output_id}.zip')
logger.info(f'Uploaded to {download_url}')
mesh_output['aws_bucket_zip'] = download_url
logger.info('Finished upload procedure to S3 buckets')
output_event.tock().update(mesh_output)
output_event_file = output_event.save_to_event_file(event_file(session_token))
return jsonify(mesh_output) |
def main(args, model=None) -> FEVERClassifierModule:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
if ((len(os.listdir(args.output_dir)) > 3) and args.do_train):
raise ValueError('Output directory ({}) already exists and is not empty.'.format(args.output_dir))
if (model is None):
model: FEVERClassifierModule = FEVERClassifierModule(args)
logger = True
trainer: pl.Trainer = generic_train(model, args, logging_callback=Seq2SeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric), logger=logger)
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
if (not args.do_predict):
return model
model.hparams.test_checkpoint = ''
checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, '*.ckpt'), recursive=True)))
print(checkpoints)
if checkpoints:
model.hparams.test_checkpoint = checkpoints[(- 1)]
trainer.resume_from_checkpoint = checkpoints[(- 1)]
trainer.logger.log_hyperparams(model.hparams)
trainer.model = model
trainer.test(ckpt_path=checkpoints[(- 1)])
return model |
('kitti_lmdb')
class KittiRawLMDBDataset(KittiRawDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.image_dbs = {}
self.depth_dbs = {}
self.poses_dbs = {}
self.hints_dbs = {}
self.calib_dbs = {}
self.preload()
def preload(self) -> None:
drives = set((item['seq'] for item in self.items))
for d in drives:
self.image_dbs[f'{d}/image_02'] = kr.load_images(*d.split('/'), 'image_02')
self.image_dbs[f'{d}/image_03'] = kr.load_images(*d.split('/'), 'image_03')
if self.use_hints:
for d in drives:
self.hints_dbs[f'{d}/image_02'] = kr.load_hints(*d.split('/'), 'image_02')
self.hints_dbs[f'{d}/image_03'] = kr.load_hints(*d.split('/'), 'image_03')
if self.use_depth:
if self.use_benchmark:
for d in drives:
self.depth_dbs[f'{d}/image_02'] = kr.load_depths(*d.split('/'), 'image_02')
self.depth_dbs[f'{d}/image_03'] = kr.load_depths(*d.split('/'), 'image_03')
else:
seqs = set((seq.split('/')[0] for seq in drives))
self.calib_dbs = {s: kr.load_calib(s) for s in seqs}
for d in drives:
(s, d2) = d.split('/')
self.depth_dbs[d] = kr.load_velo_depths(s, d2, self.calib_dbs[s])
def parse_items(self) -> tuple[(Path, list[KittiRawItem])]:
file = kr.get_split_file(self.depth_split, self.mode)
lines = [line.split() for line in io.readlines(file)]
items = [{'seq': line[0], 'cam': self.side2cam[line[2]], 'stem': int(line[1])} for line in lines]
return (file, items)
def load_image(self, data: KittiRawItem, offset: int=0) -> Image:
k = f"{(data['stem'] + offset):010}"
kdb = f"{data['seq']}/{data['cam']}"
db = self.image_dbs[kdb]
if (k not in db):
raise FileNotFoundError(f'Could not find specified file "{kdb}/{k}" with "offset={offset!r}"')
image = db[k].resize(self.size, resample=Image.BILINEAR)
return image
def load_depth(self, data: KittiRawItem) -> np.ndarray:
if self.use_benchmark:
k = f"{data['stem']:010}"
kdb = f"{data['seq']}/{data['cam']}"
depth = self.depth_dbs[kdb][k]
else:
k = (f"{data['stem']:010}", int(data['cam'][(- 2):]))
kdb = data['seq']
depth = self.depth_dbs[kdb][k]
depth = skit.resize(depth, (self.h_full, self.w_full), order=0, preserve_range=True, mode='constant')
return depth[(..., None)]
def load_hint(self, data: KittiRawItem) -> np.ndarray:
k = f"{data['stem']:010}"
kdb = f"{data['seq']}/{data['cam']}"
depth = cv2.resize(self.hints_dbs[kdb][k], dsize=self.size, interpolation=cv2.INTER_NEAREST)
return depth[(..., None)] |
def test_core_count(vrblvl=0):
cores = get_core_count(vrblvl)
if (vrblvl > 0):
print('The number of available cores :', cores)
fail = int((cores <= 0))
if (vrblvl > 0):
if (fail == 0):
print('=> Test on get core count passed.')
else:
print('Test on get core count failed!')
return fail |
def _init_weight_goog(m, n='', fix_group_fanout=True):
if isinstance(m, CondConv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
if fix_group_fanout:
fan_out //= m.groups
init_weight_fn = get_condconv_initializer((lambda w: nn.init.normal_(w, 0, math.sqrt((2.0 / fan_out)))), m.num_experts, m.weight_shape)
init_weight_fn(m.weight)
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
if fix_group_fanout:
fan_out //= m.groups
nn.init.normal_(m.weight, 0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
fan_out = m.weight.size(0)
fan_in = 0
if ('routing_fn' in n):
fan_in = m.weight.size(1)
init_range = (1.0 / math.sqrt((fan_in + fan_out)))
nn.init.uniform_(m.weight, (- init_range), init_range)
nn.init.zeros_(m.bias) |
_metric
def fid10k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=10000)
return dict(fid10k_full=fid) |
def get_p_coupler_config(config, flattened):
return get_coupler_config('p_mu', 'p_sigma', 'p', config, flattened) |
class LLMResult():
result: typing.Any
prompt: str
answer: str
duration: float = 0
tokens_query: int = 0
tokens_response: int = 0 |
def load_image_resized(fn, sz):
return cv2.resize(imageio.imread(str(fn)), dsize=(sz, sz), interpolation=cv2.INTER_CUBIC).astype(np.float32) |
_module
class SemanticNuscDataset(Dataset):
NumPointFeatures = 5
CLASSES = 17
def __init__(self, info_path, root_path, cfg=None, pipeline=None, class_names=None, cam_names=None, cam_chan=None, cam_attributes=None, img_resized_shape=None, test_mode=False, sample=False, nsweeps=1, load_interval=1, version='v1.0-trainval', **kwargs):
super().__init__()
self.test_mode = test_mode
self._root_path = root_path
self._class_names = class_names
self._use_img = (cam_names is not None)
if self._use_img:
self._cam_names = cam_names
self._cam_chan = cam_chan
self.img_resized_shape = img_resized_shape
_cam_attributes = {}
for (cam_id, cam_attribute) in cam_attributes.items():
mean_np = np.array(cam_attribute['mean'], dtype=np.float32).reshape(1, 1, 3)
std_np = np.array(cam_attribute['std'], dtype=np.float32).reshape(1, 1, 3)
_cam_attributes[cam_id] = {'mean': mean_np, 'std': std_np}
self._cam_attributes = _cam_attributes
self.learning_map = learning_map
self.load_interval = load_interval
self.sample = sample
self.nsweeps = nsweeps
print('Using {} sweeps'.format(nsweeps))
self._info_path = info_path
self._class_names = class_names
self._num_point_features = (SemanticNuscDataset.NumPointFeatures if (nsweeps == 1) else (SemanticNuscDataset.NumPointFeatures + 1))
self._set_group_flag()
if (pipeline is None):
self.pipeline = None
else:
self.pipeline = Compose(pipeline)
if self.test_mode:
self.nusc = NuScenes(version=version, dataroot=str(self._root_path), verbose=True)
def reset(self):
assert False
def load_infos(self, info_path):
print('==> Using info: {}'.format(info_path))
with open(info_path, 'rb') as f:
_semantic_nusc_infos_all = pickle.load(f)
self._semantic_nusc_infos = _semantic_nusc_infos_all[::self.load_interval]
print('Using seg annotated {} frames out of {} frames'.format(len(self._semantic_nusc_infos), len(_semantic_nusc_infos_all)))
def __len__(self):
if (not hasattr(self, '_semantic_nusc_infos')):
self.load_infos(self._info_path)
return len(self._semantic_nusc_infos)
def _set_group_flag(self):
self.flag = np.ones(len(self), dtype=np.uint8)
def get_sensor_data(self, idx):
info = self._semantic_nusc_infos[idx]
info['dim'] = {'points': self._num_point_features, 'sem_labels': 1}
if self._use_img:
res_cam = {'names': self._cam_names, 'chan': self._cam_chan, 'attributes': self._cam_attributes, 'resized_shape': self.img_resized_shape, 'annotations': None}
else:
res_cam = {}
res = {'lidar': {'type': 'lidar', 'points': None, 'nsweeps': self.nsweeps, 'annotations': None}, 'metadata': {'image_prefix': self._root_path, 'num_point_features': self._num_point_features, 'token': info['token'], 'num_points_of_top_lidar': None}, 'calib': None, 'cam': res_cam, 'mode': ('val' if self.test_mode else 'train'), 'learning_map': self.learning_map}
(data, _) = self.pipeline(res, info)
return data
def __getitem__(self, idx):
return self.get_sensor_data(idx)
def get_anno_for_eval(self, token):
lidar_sd_token = self.nusc.get('sample', token)['data']['LIDAR_TOP']
lidarseg_labels_filename = os.path.join(self.nusc.dataroot, self.nusc.get('lidarseg', lidar_sd_token)['filename'])
point_labels = np.fromfile(lidarseg_labels_filename, dtype=np.uint8).reshape((- 1))
point_labels = np.vectorize(self.learning_map.__getitem__)(point_labels).astype(np.float32)
anno_dict = {'point_sem_labels': point_labels}
return anno_dict
def evaluation(self, detections, output_dir=None, testset=False, **kwargs):
if (not testset):
SemNuScene_label_name = labels_16
unique_label = (np.asarray(sorted(list(SemNuScene_label_name.keys())))[1:] - 1)
unique_label_str = [SemNuScene_label_name[x] for x in (unique_label + 1)]
hist_list = []
for (token, pred_dict) in detections.items():
anno_dict = self.get_anno_for_eval(token)
assert ('point_sem_labels' in anno_dict)
pred_point_sem_labels = pred_dict['pred_point_sem_labels'].numpy()
gt_point_sem_labels = anno_dict['point_sem_labels']
assert (pred_point_sem_labels.shape[0] == gt_point_sem_labels.shape[0]), 'pred_point_sem_labels.shape: {}, gt_point_sem_labels.shape: {}'.format(pred_point_sem_labels.shape, gt_point_sem_labels.shape)
hist_list.append(fast_hist_crop_func(output=pred_point_sem_labels, target=gt_point_sem_labels, unique_label=unique_label))
per_class_ious = per_class_iou_func(sum(hist_list))
miou = np.nanmean(per_class_ious)
detail = {}
result = {'mIoU': (miou * 100)}
for (class_name, class_iou) in zip(unique_label_str, per_class_ious):
result[class_name] = (class_iou * 100)
res = {'results': result, 'detail': detail}
else:
res = None
output_dir_ = (output_dir + '/results_folder/lidarseg/test')
json_dir = (output_dir + '/results_folder/test')
results_dir = (output_dir + '/results_folder')
meta_ = {'meta': {'use_camera': self._use_img, 'use_lidar': True, 'use_radar': False, 'use_map': False, 'use_external': False}}
file_name = (json_dir + '/submission.json')
if (not os.path.exists(os.path.dirname(file_name))):
os.makedirs(os.path.dirname(file_name))
with open(file_name, 'w') as file_object:
json.dump(meta_, file_object)
for (token, pred_dict) in detections.items():
sample = self.nusc.get('sample', token)
lidar_sd_token = sample['data']['LIDAR_TOP']
pred_point_sem_labels = pred_dict['pred_point_sem_labels'].numpy()
bin_file_path = (((output_dir_ + '/') + lidar_sd_token) + '_lidarseg.bin')
if (not os.path.exists(os.path.dirname(bin_file_path))):
os.makedirs(os.path.dirname(bin_file_path))
np.array(pred_point_sem_labels).astype(np.uint8).tofile(bin_file_path)
validate_submission(nusc=self.nusc, results_folder=results_dir, eval_set='test', verbose=True, zip_out=output_dir)
return (res, None) |
def default_collate(batch):
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
return torch.stack(batch, 0, out=out)
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
elem = batch[0]
if (elem_type.__name__ == 'ndarray'):
if (np_str_obj_array_pattern.search(elem.dtype.str) is not None):
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return default_collate([torch.as_tensor(b) for b in batch])
elif (elem.shape == ()):
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int_classes):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, container_abcs.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in elem}
elif (isinstance(elem, tuple) and hasattr(elem, '_fields')):
return elem_type(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, container_abcs.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
else:
try:
from maskrcnn_benchmark.structures.bounding_box import BoxList
if isinstance(elem, BoxList):
return batch
except:
raise TypeError(default_collate_err_msg_format.format(elem_type)) |
def spCNN():
filename = sys.argv[1]
config = {}
config['jobs'] = []
job1 = {}
sp_list = [0.3, 0.2, 0.1, 0.05, 0.02, 0.01, 0.007, 0.005]
channels = np.array([32, 32, 64, 64])
factor_list = [1, 2, 4]
for factor in factor_list:
for sp in sp_list:
job = {}
job['eps'] = 0.3
job['alpha'] = 0.01
job['model'] = {}
job['model']['name'] = 'spCNN'
job['model']['hidden_size'] = 20000
c = (channels * factor)
job['model']['channels'] = [int(c[0]), int(c[1]), int(c[2]), int(c[3])]
job['model']['sp1'] = sp
job['model']['sp2'] = sp
job['logfilename'] = './log/MNIST/{}_f{}_sp{}.log'.format(job['model']['name'], factor, sp)
job['savename'] = './models/MNIST/{}_f{}_sp{}.pth'.format(job['model']['name'], factor, sp)
job['epoch'] = 20
job['adv_train'] = False
job['lr'] = 0.01
job['momentum'] = 0.9
job['train_batch_size'] = 200
job['test_batch_size'] = 100
job['n_test_adv'] = 1000
config['jobs'].append(job)
j = json.dumps(config, indent=4)
with open(filename, 'w+') as f:
f.write(j) |
_model
def cspresnext50_iabn(pretrained=False, **kwargs):
norm_layer = get_norm_act_layer('iabn')
return _create_cspnet('cspresnext50_iabn', pretrained=pretrained, norm_layer=norm_layer, **kwargs) |
_model
def tv_resnet34(pretrained=False, **kwargs):
model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)
return _create_resnet('tv_resnet34', pretrained, **model_args) |
class SklearnDataModule(LightningDataModule):
name = 'sklearn'
def __init__(self, X, y, x_val=None, y_val=None, x_test=None, y_test=None, val_split=0.2, test_split=0.1, num_workers=0, random_state=1234, shuffle=True, batch_size: int=16, pin_memory=True, drop_last=False, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.num_workers = num_workers
self.batch_size = batch_size
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if shuffle:
(X, y) = sk_shuffle(X, y, random_state=random_state)
val_split = (0 if ((x_val is not None) or (y_val is not None)) else val_split)
test_split = (0 if ((x_test is not None) or (y_test is not None)) else test_split)
hold_out_split = (val_split + test_split)
if (hold_out_split > 0):
val_split = (val_split / hold_out_split)
hold_out_size = math.floor((len(X) * hold_out_split))
(x_holdout, y_holdout) = (X[:hold_out_size], y[:hold_out_size])
test_i_start = int((val_split * hold_out_size))
(x_val_hold_out, y_val_holdout) = (x_holdout[:test_i_start], y_holdout[:test_i_start])
(x_test_hold_out, y_test_holdout) = (x_holdout[test_i_start:], y_holdout[test_i_start:])
(X, y) = (X[hold_out_size:], y[hold_out_size:])
if ((x_val is None) and (y_val is None) and (val_split > 0)):
(x_val, y_val) = (x_val_hold_out, y_val_holdout)
if ((x_test is None) and (y_test is None) and (test_split > 0)):
(x_test, y_test) = (x_test_hold_out, y_test_holdout)
self._init_datasets(X, y, x_val, y_val, x_test, y_test)
def _init_datasets(self, X: np.ndarray, y: np.ndarray, x_val: np.ndarray, y_val: np.ndarray, x_test: np.ndarray, y_test: np.ndarray) -> None:
self.train_dataset = SklearnDataset(X, y)
self.val_dataset = SklearnDataset(x_val, y_val)
self.test_dataset = SklearnDataset(x_test, y_test)
def train_dataloader(self) -> DataLoader:
loader = DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=self.shuffle, num_workers=self.num_workers, drop_last=self.drop_last, pin_memory=self.pin_memory)
return loader
def val_dataloader(self) -> DataLoader:
loader = DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, drop_last=self.drop_last, pin_memory=self.pin_memory)
return loader
def test_dataloader(self) -> DataLoader:
loader = DataLoader(self.test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, drop_last=self.drop_last, pin_memory=self.pin_memory)
return loader |
((not huggingface_hub), 'Requires huggingface_hub install')
class TestHuggingFaceHub(unittest.TestCase):
_grad()
def test_hf_fastspeech2(self):
hf_model_id = 'facebook/fastspeech2-en-ljspeech'
(models, cfg, task) = load_model_ensemble_and_task_from_hf_hub(hf_model_id)
self.assertTrue((len(models) > 0)) |
def search_absorbe_tuning_bn(model, prev=None, remove_bn=True, verbose=False):
with torch.no_grad():
for m in model.children():
if (is_fake_bn(m) and is_absorbing(prev) and need_tuning(prev)):
absorb_bn(prev, m.bn, remove_bn=remove_bn, verbose=verbose)
m.forward = m.forward_orig
m.bn = None
search_absorbe_tuning_bn(m, remove_bn=remove_bn, verbose=verbose)
prev = m |
class CLAM_MB(_CLAM_Base):
sizes = {'small': [1024, 512, 256], 'big': [1024, 512, 384], 'multiscale': [2048, 512, 256]}
def __init__(self, size: Union[(str, List[int])]='small', dropout: bool=False, k_sample: int=8, n_classes: int=2, instance_loss_fn: Optional[Callable]=None, subtyping: bool=False, gate: bool=True) -> None:
super().__init__(size=size, dropout=dropout, k_sample=k_sample, n_classes=n_classes, instance_loss_fn=instance_loss_fn, subtyping=subtyping, gate=gate, multi_head_attention=True)
self.classifiers = nn.ModuleList([nn.Linear(self.size[1], 1) for _ in range(n_classes)])
initialize_weights(self)
def _inst_eval(self, A, h, classifier, index):
return super()._inst_eval(A[index], h, classifier)
def _inst_eval_out(self, A, h, classifier, index):
return super()._inst_eval(A[index], h, classifier)
def _logits_from_m(self, M):
logits = torch.empty(1, self.n_classes).float().to(M.device)
for c in range(self.n_classes):
logits[(0, c)] = self.classifiers[c](M[c])
return logits |
def resnetal50(**kwargs):
model = ResNetAL(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
return model |
def read_all_sentences(input_files):
all_sentences = []
for input_file in input_files:
with open(input_file, 'r') as reader:
for line in reader.readlines():
line = line.strip()
if (not line):
continue
else:
all_sentences.append(line)
return all_sentences |
class MassMapsDatasetResized(Dataset):
def __init__(self, root_dir, img_size=64):
dataset_zip = np.load(opj(root_dir, 'cosmo_resize_{}.npz'.format(img_size)))
self.imgs = dataset_zip['imgs']
self.params = dataset_zip['params']
def __len__(self):
return len(self.params)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = self._ToTensor(self.imgs[idx])
params = torch.from_numpy(self.params[idx].astype('float32'))
return (sample, params)
def _ToTensor(self, x):
return torch.from_numpy(x.reshape(([1] + list(x.shape))).astype('float32')) |
class TestWordlistDataset(TestCase):
def setUp(self):
clear_vocabs()
build_vocabs('data/test.es-fr-en.toy.cog', 'es', 'en')
def test_basic(self):
vocab = get_vocab('es')
dataset = WordlistDataset(vocab.words[1:], 'es')
ans = dataset[0].char_seq
self.assertListEqual(ans.tolist(), np.asarray(['e', 's', '2', EOW]).tolist()) |
def get_model_and_tokenizer(name):
global T5_CONFIGS
if (name not in T5_CONFIGS):
T5_CONFIGS[name] = dict()
if ('model' not in T5_CONFIGS[name]):
T5_CONFIGS[name]['model'] = get_model(name)
if ('tokenizer' not in T5_CONFIGS[name]):
T5_CONFIGS[name]['tokenizer'] = get_tokenizer(name)
return (T5_CONFIGS[name]['model'], T5_CONFIGS[name]['tokenizer']) |
def filter_manifest_df(df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000):
filters = {'no speech': (df['audio'] == ''), f'short speech (<{min_n_frames} frames)': (df['n_frames'] < min_n_frames), 'empty sentence': (df['tgt_text'] == '')}
if is_train_split:
filters[f'long speech (>{max_n_frames} frames)'] = (df['n_frames'] > max_n_frames)
if (extra_filters is not None):
filters.update(extra_filters)
invalid = reduce((lambda x, y: (x | y)), filters.values())
valid = (~ invalid)
print((('| ' + ', '.join((f'{n}: {f.sum()}' for (n, f) in filters.items()))) + f', total {invalid.sum()} filtered, {valid.sum()} remained.'))
return df[valid] |
class CPUAdam(torch.optim.Optimizer):
optimizer_id = 0
def __init__(self, params, lr=0.001, bias_correction=True, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, use_fp16_stats=False):
defaults = {'lr': lr, 'bias_correction': bias_correction, 'betas': betas, 'eps': eps, 'weight_decay': weight_decay}
super().__init__(params, defaults)
self.use_fp16_stats = use_fp16_stats
self.FLOAT16_MAX = 65504.0
if (not has_deepspeed):
raise ImportError('Please install DeepSpeed: pip install deepspeed')
self.opt_id = CPUAdam.optimizer_id
CPUAdam.optimizer_id = (CPUAdam.optimizer_id + 1)
self.ds_opt_adam = _get_cpu_adam()
adamw_mode = True
self.ds_opt_adam.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode)
def supports_memory_efficient_fp16(self):
return True
def supports_flat_params(self):
return True
_grad()
def step(self, closure=None):
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
torch.cuda.synchronize()
for (group_id, group) in enumerate(self.param_groups):
for (param_id, p) in enumerate(group['params']):
if (p.grad is None):
continue
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
dtype = (torch.float16 if self.use_fp16_stats else p.data.dtype)
state['exp_avg'] = torch.zeros_like(p.data, dtype=dtype, device='cpu')
state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=dtype, device='cpu')
if self.use_fp16_stats:
assert torch.is_floating_point(p.data)
state['exp_avg_scale'] = 1.0
state['exp_avg_sq_scale'] = 1.0
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
p_data_bak = p.data
p.data = p.data.to(dtype=torch.float32, device='cpu')
p.grad.data = p.grad.data.to(dtype=torch.float32, device='cpu')
if self.use_fp16_stats:
exp_avg = (exp_avg.float() * state['exp_avg_scale'])
exp_avg_sq = (exp_avg_sq.float() * state['exp_avg_sq_scale'])
state['step'] += 1
(beta1, beta2) = group['betas']
self.ds_opt_adam.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'], group['weight_decay'], group['bias_correction'], p.data, p.grad.data, exp_avg, exp_avg_sq)
if (p_data_bak.data_ptr() != p.data.data_ptr()):
p_data_bak.copy_(p.data)
p.data = p_data_bak
if self.use_fp16_stats:
def inf_norm(t):
return torch.norm(t, float('inf'))
(state['exp_avg_scale'], state['exp_avg_sq_scale']) = ((1e-08 + (inf_norm(exp_avg) / self.FLOAT16_MAX)), (1e-08 + (inf_norm(exp_avg_sq) / self.FLOAT16_MAX)))
(state['exp_avg'], state['exp_avg_sq']) = ((exp_avg / state['exp_avg_scale']).half(), (exp_avg_sq / state['exp_avg_sq_scale']).half())
return loss |
class DataTrainingArguments():
dataset_name: str = field(metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
dataset_config_name: str = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_split_name: str = field(default='train+validation', metadata={'help': "The name of the training data set split to use (via the datasets library). Defaults to 'train+validation'"})
eval_split_name: str = field(default='test', metadata={'help': "The name of the evaluation data set split to use (via the datasets library). Defaults to 'test'"})
audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"})
text_column_name: str = field(default='text', metadata={'help': "The name of the dataset column containing the text data. Defaults to 'text'"})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of validation examples to this value if set.'})
chars_to_ignore: Optional[List[str]] = list_field(default=None, metadata={'help': 'A list of characters to remove from the transcripts.'})
eval_metrics: List[str] = list_field(default=['wer'], metadata={'help': "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"})
max_duration_in_seconds: float = field(default=20.0, metadata={'help': "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"})
min_duration_in_seconds: float = field(default=0.0, metadata={'help': 'Filter audio files that are shorter than `min_duration_in_seconds` seconds'})
preprocessing_only: bool = field(default=False, metadata={'help': 'Whether to only do data preprocessing and skip training. This is especially useful when data preprocessing errors out in distributed training due to timeout. In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets can consequently be loaded in distributed training'})
use_auth_token: bool = field(default=False, metadata={'help': 'If :obj:`True`, will use the token generated when running:obj:`transformers-cli login` as HTTP bearer authorization for remote files.'})
unk_token: str = field(default='[UNK]', metadata={'help': 'The unk token for the tokenizer'})
pad_token: str = field(default='[PAD]', metadata={'help': 'The padding token for the tokenizer'})
word_delimiter_token: str = field(default='|', metadata={'help': 'The word delimiter token for the tokenizer'})
phoneme_language: Optional[str] = field(default=None, metadata={'help': 'The target language that should be used be passed to the tokenizer for tokenization. Note that this is only relevant if the model classifies the input audio to a sequence of phoneme sequences.'}) |
class ConcatDataset(_ConcatDataset):
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags) |
def add_args(parser, cfg, prefix=''):
for (k, v) in cfg.items():
if isinstance(v, str):
parser.add_argument((('--' + prefix) + k))
elif isinstance(v, int):
parser.add_argument((('--' + prefix) + k), type=int)
elif isinstance(v, float):
parser.add_argument((('--' + prefix) + k), type=float)
elif isinstance(v, bool):
parser.add_argument((('--' + prefix) + k), action='store_true')
elif isinstance(v, dict):
add_args(parser, v, (k + '.'))
elif isinstance(v, collections_abc.Iterable):
parser.add_argument((('--' + prefix) + k), type=type(v[0]), nargs='+')
else:
print('connot parse key {} of type {}'.format((prefix + k), type(v)))
return parser |
class MinibatchRlBase(BaseRunner):
_eval = False
def __init__(self, algo, agent, sampler, n_steps, seed=None, affinity=None, log_interval_steps=100000.0):
n_steps = int(n_steps)
log_interval_steps = int(log_interval_steps)
affinity = (dict() if (affinity is None) else affinity)
save__init__args(locals())
def startup(self):
p = psutil.Process()
try:
if ((self.affinity.get('master_cpus', None) is not None) and self.affinity.get('set_affinity', True)):
p.cpu_affinity(self.affinity['master_cpus'])
cpu_affin = p.cpu_affinity()
except AttributeError:
cpu_affin = 'UNAVAILABLE MacOS'
logger.log(f"Runner {getattr(self, 'rank', '')} master CPU affinity: {cpu_affin}.")
if (self.affinity.get('master_torch_threads', None) is not None):
torch.set_num_threads(self.affinity['master_torch_threads'])
logger.log(f"Runner {getattr(self, 'rank', '')} master Torch threads: {torch.get_num_threads()}.")
if (self.seed is None):
self.seed = make_seed()
set_seed(self.seed)
self.rank = rank = getattr(self, 'rank', 0)
self.world_size = world_size = getattr(self, 'world_size', 1)
examples = self.sampler.initialize(agent=self.agent, affinity=self.affinity, seed=(self.seed + 1), bootstrap_value=getattr(self.algo, 'bootstrap_value', False), traj_info_kwargs=self.get_traj_info_kwargs(), rank=rank, world_size=world_size)
self.itr_batch_size = (self.sampler.batch_spec.size * world_size)
n_itr = self.get_n_itr()
self.agent.to_device(self.affinity.get('cuda_idx', None))
if (world_size > 1):
self.agent.data_parallel()
self.algo.initialize(agent=self.agent, n_itr=n_itr, batch_spec=self.sampler.batch_spec, mid_batch_reset=self.sampler.mid_batch_reset, examples=examples, world_size=world_size, rank=rank)
self.initialize_logging()
return n_itr
def get_traj_info_kwargs(self):
return dict(discount=getattr(self.algo, 'discount', 1))
def get_n_itr(self):
log_interval_itrs = max((self.log_interval_steps // self.itr_batch_size), 1)
n_itr = (math.ceil((self.n_steps / self.log_interval_steps)) * log_interval_itrs)
self.log_interval_itrs = log_interval_itrs
self.n_itr = n_itr
logger.log(f'Running {n_itr} iterations of minibatch RL.')
return n_itr
def initialize_logging(self):
self._opt_infos = {k: list() for k in self.algo.opt_info_fields}
self._start_time = self._last_time = time.time()
self._cum_time = 0.0
self._cum_completed_trajs = 0
self._last_update_counter = 0
self._cum_pyflex_steps = 0
def shutdown(self):
logger.log('Training complete.')
self.pbar.stop()
self.sampler.shutdown()
def get_itr_snapshot(self, itr):
return dict(itr=itr, cum_steps=((itr * self.sampler.batch_size) * self.world_size), agent_state_dict=self.agent.state_dict(), optimizer_state_dict=self.algo.optim_state_dict())
def save_itr_snapshot(self, itr):
logger.log('saving snapshot...')
params = self.get_itr_snapshot(itr)
logger.save_itr_params(itr, params)
logger.log('saved')
def store_diagnostics(self, itr, traj_infos, opt_info):
self._cum_completed_trajs += len(traj_infos)
for (k, v) in self._opt_infos.items():
new_v = getattr(opt_info, k, [])
v.extend((new_v if isinstance(new_v, list) else [new_v]))
self.pbar.update(((itr + 1) % self.log_interval_itrs))
def log_diagnostics(self, itr, traj_infos=None, eval_time=0):
if (itr > 0):
self.pbar.stop()
self.save_itr_snapshot(itr)
new_time = time.time()
self._cum_time = (new_time - self._start_time)
train_time_elapsed = ((new_time - self._last_time) - eval_time)
new_updates = (self.algo.update_counter - self._last_update_counter)
new_samples = ((self.sampler.batch_size * self.world_size) * self.log_interval_itrs)
updates_per_second = (float('nan') if (itr == 0) else (new_updates / train_time_elapsed))
samples_per_second = (float('nan') if (itr == 0) else (new_samples / train_time_elapsed))
replay_ratio = (((new_updates * self.algo.batch_size) * self.world_size) / new_samples)
cum_replay_ratio = ((self.algo.batch_size * self.algo.update_counter) / ((itr + 1) * self.sampler.batch_size))
cum_steps = (((itr + 1) * self.sampler.batch_size) * self.world_size)
if self._eval:
logger.record_tabular('CumTrainTime', (self._cum_time - self._cum_eval_time))
logger.record_tabular('Iteration', itr)
logger.record_tabular('CumTime (s)', self._cum_time)
logger.record_tabular('CumSteps', cum_steps)
logger.record_tabular('CumCompletedTrajs', self._cum_completed_trajs)
logger.record_tabular('CumUpdates', self.algo.update_counter)
logger.record_tabular('StepsPerSecond', samples_per_second)
logger.record_tabular('UpdatesPerSecond', updates_per_second)
logger.record_tabular('ReplayRatio', replay_ratio)
logger.record_tabular('CumReplayRatio', cum_replay_ratio)
self._cum_pyflex_steps += sum((getattr(info['env_infos'][(- 1)], 'total_steps') for info in traj_infos))
logger.record_tabular('CumPyflexSteps', self._cum_pyflex_steps)
self._log_infos(traj_infos)
logger.dump_tabular(with_prefix=False)
self._last_time = new_time
self._last_update_counter = self.algo.update_counter
if (itr < (self.n_itr - 1)):
logger.log(f'Optimizing over {self.log_interval_itrs} iterations.')
self.pbar = ProgBarCounter(self.log_interval_itrs)
def _log_infos(self, traj_infos=None):
if (traj_infos is None):
traj_infos = self._traj_infos
if traj_infos:
for k in traj_infos[0]:
if ((not k.startswith('_')) and (k != 'env_infos')):
logger.record_tabular_misc_stat(k, [info[k] for info in traj_infos])
elif (k == 'env_infos'):
env_info = traj_infos[0][k][0]
for field in env_info._fields:
if (field != 'total_steps'):
logger.record_tabular_misc_stat(('info_final_' + field), [getattr(info[k][(- 1)], field) for info in traj_infos])
if self._opt_infos:
for (k, v) in self._opt_infos.items():
logger.record_tabular_misc_stat(k, v)
self._opt_infos = {k: list() for k in self._opt_infos} |
def plot_training(training_losses, validation_losses, learning_rate, gaussian=True, sigma=2, figsize=(8, 6)):
import matplotlib.pyplot as plt
from matplotlib import gridspec
from scipy.ndimage import gaussian_filter
list_len = len(training_losses)
x_range = list(range(1, (list_len + 1)))
fig = plt.figure(figsize=figsize)
grid = gridspec.GridSpec(ncols=2, nrows=1, figure=fig)
subfig1 = fig.add_subplot(grid[(0, 0)])
subfig2 = fig.add_subplot(grid[(0, 1)])
subfigures = fig.get_axes()
for (i, subfig) in enumerate(subfigures, start=1):
subfig.spines['top'].set_visible(False)
subfig.spines['right'].set_visible(False)
if gaussian:
training_losses_gauss = gaussian_filter(training_losses, sigma=sigma)
validation_losses_gauss = gaussian_filter(validation_losses, sigma=sigma)
linestyle_original = '.'
color_original_train = 'lightcoral'
color_original_valid = 'lightgreen'
color_smooth_train = 'red'
color_smooth_valid = 'green'
alpha = 0.25
else:
linestyle_original = '-'
color_original_train = 'red'
color_original_valid = 'green'
alpha = 1.0
subfig1.plot(x_range, training_losses, linestyle_original, color=color_original_train, label='Training', alpha=alpha)
subfig1.plot(x_range, validation_losses, linestyle_original, color=color_original_valid, label='Validation', alpha=alpha)
if gaussian:
subfig1.plot(x_range, training_losses_gauss, '-', color=color_smooth_train, label='Training', alpha=0.75)
subfig1.plot(x_range, validation_losses_gauss, '-', color=color_smooth_valid, label='Validation', alpha=0.75)
subfig1.title.set_text('Training & validation loss')
subfig1.set_xlabel('Epoch')
subfig1.set_ylabel('Loss')
subfig1.legend(loc='upper right')
subfig2.plot(x_range, learning_rate, color='black')
subfig2.title.set_text('Learning rate')
subfig2.set_xlabel('Epoch')
subfig2.set_ylabel('LR')
return fig |
def main():
(args, cfg) = parse_config()
if (args.launcher == 'none'):
print('None args.launcher', args.launcher)
dist_train = False
total_gpus = 1
else:
print('args.launcher', args.launcher)
(total_gpus, cfg.LOCAL_RANK) = getattr(common_utils, ('init_dist_%s' % args.launcher))(args.tcp_port, args.local_rank, backend='nccl')
dist_train = True
if (args.batch_size is None):
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert ((args.batch_size % total_gpus) == 0), 'Batch size should match the number of gpus'
args.batch_size = (args.batch_size // total_gpus)
args.epochs = (cfg.OPTIMIZATION.NUM_EPOCHS if (args.epochs is None) else args.epochs)
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = ((((cfg.ROOT_DIR / 'output') / cfg.EXP_GROUP_PATH) / cfg.TAG) / args.extra_tag)
ckpt_dir = (output_dir / 'ckpt')
ps_label_dir = (output_dir / 'ps_label')
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = (output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S')))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
logger.info('Start logging')
gpu_list = (os.environ['CUDA_VISIBLE_DEVICES'] if ('CUDA_VISIBLE_DEVICES' in os.environ.keys()) else 'ALL')
logger.info(('CUDA_VISIBLE_DEVICES=%s' % gpu_list))
if dist_train:
logger.info(('total_batch_size: %d' % (total_gpus * args.batch_size)))
for (key, val) in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if (cfg.LOCAL_RANK == 0):
os.system(('cp %s %s' % (args.cfg_file, output_dir)))
tb_log = (SummaryWriter(log_dir=str((output_dir / 'tensorboard'))) if (cfg.LOCAL_RANK == 0) else None)
(source_set, source_loader, source_sampler) = build_dataloader(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, batch_size=args.batch_size, dist=dist_train, workers=args.workers, logger=logger, training=True, merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch, total_epochs=args.epochs)
if cfg.get('MULTI_DB', None):
logger.info('Using Two DataLoader and Merge Loss')
(source_set_2, source_loader_2, source_sampler_2) = build_dataloader(dataset_cfg=cfg.DATA_CONFIG_SRC_2, class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES, batch_size=args.batch_size, dist=dist_train, workers=args.workers, logger=logger, training=True, merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch, total_epochs=args.epochs)
else:
source_set_2 = source_loader_2 = source_sampler_2 = None
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
start_epoch = it = 0
last_epoch = (- 1)
if (args.pretrained_model is not None):
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if (args.ckpt is not None):
(it, start_epoch) = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = (start_epoch + 1)
else:
ckpt_list = glob.glob(str((ckpt_dir / '*checkpoint_epoch_*.pth')))
if (len(ckpt_list) > 0):
ckpt_list.sort(key=os.path.getmtime)
(it, start_epoch) = model.load_params_with_optimizer(ckpt_list[(- 1)], to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = (start_epoch + 1)
model.train()
if dist_train:
if cfg.get('MULTI_DB', None):
model = nn.parallel.DistributedDataParallel(model, device_ids=[(cfg.LOCAL_RANK % torch.cuda.device_count())], broadcast_buffers=False, find_unused_parameters=True)
else:
model = nn.parallel.DistributedDataParallel(model, device_ids=[(cfg.LOCAL_RANK % torch.cuda.device_count())])
logger.info(model)
max_len_dataset = (len(source_loader) if (len(source_loader) > len(source_loader_2)) else len(source_loader_2))
total_iters_each_epoch = (max_len_dataset if (not args.merge_all_iters_to_one_epoch) else (max_len_dataset // args.epochs))
(lr_scheduler, lr_warmup_scheduler) = build_scheduler(optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs, last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION)
if cfg.get('MULTI_DB', None):
train_func = train_multi_db_model
else:
train_func = train_model
logger.info(('Start training %s/%s(%s)' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)))
if cfg.get('MULTI_DB', None):
train_func(model, optimizer, source_loader, source_loader_2, model_func=model_fn_decorator(), lr_scheduler=lr_scheduler, optim_cfg=cfg.OPTIMIZATION, start_epoch=start_epoch, total_epochs=args.epochs, start_iter=it, rank=cfg.LOCAL_RANK, tb_log=tb_log, ckpt_save_dir=ckpt_dir, ps_label_dir=ps_label_dir, source_sampler=source_sampler, target_sampler=source_sampler_2, lr_warmup_scheduler=lr_warmup_scheduler, ckpt_save_interval=args.ckpt_save_interval, max_ckpt_save_num=args.max_ckpt_save_num, merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch, logger=logger, ema_model=None)
else:
train_model(model, optimizer, source_loader, model_func=model_fn_decorator(), lr_scheduler=lr_scheduler, optim_cfg=cfg.OPTIMIZATION, start_epoch=start_epoch, total_epochs=args.epochs, start_iter=it, rank=cfg.LOCAL_RANK, tb_log=tb_log, ckpt_save_dir=ckpt_dir, source_sampler=source_sampler, lr_warmup_scheduler=lr_warmup_scheduler, ckpt_save_interval=args.ckpt_save_interval, max_ckpt_save_num=args.max_ckpt_save_num, merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch)
if cfg.get('MULTI_DB', None):
if (hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory):
source_set.clean_shared_memory()
source_set_2.clean_shared_memory()
elif (hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory):
source_set.clean_shared_memory()
logger.info(('End training %s/%s(%s)\n\n\n' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)))
logger.info(('Start evaluation %s/%s(%s)' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)))
(test_set, test_loader, sampler) = build_dataloader(dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, batch_size=args.batch_size, dist=dist_train, workers=args.workers, logger=logger, training=False)
eval_output_dir = ((output_dir / 'eval') / 'eval_with_train')
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max((args.epochs - args.num_epochs_to_eval), 0)
repeat_eval_ckpt((model.module if dist_train else model), test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_train)
logger.info(('End evaluation %s/%s(%s)' % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))) |
def resnet50(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
from ..models.model_store import get_model_file
model.load_state_dict(torch.load(get_model_file('resnet50', root=root)), strict=False)
return model |
def get_files(root, test=False):
data_root1 = os.path.join('/tmp', root, datasetname[3])
data_root2 = os.path.join('/tmp', root, datasetname[0])
path1 = os.path.join('/tmp', data_root1, normalname[0])
(data, lab) = data_load(path1, axisname=normalname[0], label=0)
for i in tqdm(range(len(dataname1))):
path2 = os.path.join('/tmp', data_root2, dataname1[i])
(data1, lab1) = data_load(path2, dataname1[i], label=label[i])
data += data1
lab += lab1
return [data, lab] |
def learn_halut(l: str, C: int, data_path: str, store_path: str, K: int=16, loop_order: Literal[('im2col', 'kn2col')]='im2col', kernel_size: tuple[(int, int)]=(1, 1), stride: tuple[(int, int)]=(1, 1), padding: tuple[(int, int)]=(0, 0), niter=2, nredo=1, min_points_per_centroid=100, max_points_per_centroid=1000, codebook: int=(- 1)) -> None:
files = glob.glob(((data_path + f'/{l}') + END_STORE_A))
files = [x.split('/')[(- 1)] for x in files]
if (len(files) > 1):
raise Exception('more than one file not supported anymore')
assert (len(files) == 1)
a_numpy = np.load(((data_path + f'/{l}') + END_STORE_A))
save_path = (store_path + f'/{l}_{C}_{K}.npy')
b_numpy = np.load(((data_path + f'/{l}') + END_STORE_B))
_exists = os.path.exists(save_path)
if _exists:
print('already learned')
if (codebook == (- 1)):
return
if (loop_order == 'kn2col'):
raise Exception('not implemented')
halut_numpy = None
if _exists:
already_learned = np.load(save_path, allow_pickle=True)
halut_numpy = hm.learn_halut_offline(a_numpy, b_numpy, C, K=K, niter=niter, nredo=nredo, min_points_per_centroid=min_points_per_centroid, max_points_per_centroid=max_points_per_centroid, codebook=codebook, already_learned=already_learned)
elif (loop_order == 'im2col'):
halut_numpy = hm.learn_halut_offline(a_numpy, b_numpy, C, K=K, niter=niter, nredo=nredo, min_points_per_centroid=min_points_per_centroid, max_points_per_centroid=max_points_per_centroid, codebook=codebook)
elif (loop_order == 'kn2col'):
halut_numpy_list = []
lut_list = []
dims_list = []
thresholds_list = []
conv_layer = HalutConv2d(a_numpy.shape[(- 1)], b_numpy.shape[(- 1)], kernel_size, stride, padding)
a_torch = torch.from_numpy(a_numpy)
for k_x in range(kernel_size[0]):
for k_y in range(kernel_size[1]):
input_slice = conv_layer.kn2col_input_slice(a_torch, a_torch.shape[1], a_torch.shape[2], k_x, k_y)
input_slice = input_slice.reshape((- 1), input_slice.shape[(- 1)])
halut_numpy = hm.learn_halut_offline(input_slice.detach().cpu().numpy(), b_numpy[((k_x * kernel_size[0]) + k_y)], C, K=K)
halut_numpy_list.append(halut_numpy)
lut_list.append(halut_numpy[hm.HalutOfflineStorage.LUT])
dims_list.append(halut_numpy[hm.HalutOfflineStorage.DIMS])
thresholds_list.append(halut_numpy[hm.HalutOfflineStorage.THRESHOLDS])
lut = np.array(lut_list)
dims = np.array(dims_list)
thresholds = np.array(thresholds_list)
halut_numpy = halut_numpy_list[0]
halut_numpy[hm.HalutOfflineStorage.LUT] = lut
halut_numpy[hm.HalutOfflineStorage.DIMS] = dims
halut_numpy[hm.HalutOfflineStorage.THRESHOLDS] = thresholds
if (halut_numpy is None):
raise Exception('halut_numpy is None')
print(f'Store in {save_path}: {(halut_numpy.nbytes / (1024 * 1024))} MB')
_exists = os.path.exists(store_path)
if (not _exists):
os.makedirs(store_path)
print(f'created directory {store_path}')
np.save(save_path, halut_numpy) |
def test_accuracy(data_loader, net, num_steps, population_code=False, num_classes=False):
with torch.no_grad():
total = 0
acc = 0
net.eval()
data_loader = iter(data_loader)
for (data, targets) in data_loader:
data = data.to(device)
targets = targets.to(device)
utils.reset(net)
(spk_rec, _) = net(data)
if population_code:
acc += (SF.accuracy_rate(spk_rec.unsqueeze(0), targets, population_code=True, num_classes=10) * spk_rec.size(1))
else:
acc += (SF.accuracy_rate(spk_rec.unsqueeze(0), targets) * spk_rec.size(1))
total += spk_rec.size(1)
return (acc / total) |
def remove_by_name(container, name, name_field='name'):
item = get_by_name(container, name, name_field)
if (item is not None):
container.remove(item) |
def main(args):
torch.manual_seed(3)
np.random.seed(2)
random.seed(2)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if (args.dataset in ['ppi', 'reddit']):
data = load_data(args)
g = data.g
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
test_mask = g.ndata['test_mask']
labels = g.ndata['label']
elif (args.dataset in ['ogbn-arxiv', 'ogbn-products']):
data = DglNodePropPredDataset(name=args.dataset)
split_idx = data.get_idx_split()
(g, labels) = data[0]
train_mask = split_idx['train']
val_mask = split_idx['valid']
test_mask = split_idx['test']
else:
path = osp.join('./qgtc_graphs', (args.dataset + '.npz'))
data = QGTC_dataset(path, args.dim, args.n_classes)
g = data.g
train_mask = data.train_mask
val_mask = data.val_mask
test_mask = data.test_mask
train_nid = np.nonzero(train_mask.data.numpy())[0].astype(np.int64)
in_feats = g.ndata['feat'].shape[1]
n_classes = data.num_classes
g = g.long()
cluster_iterator = ClusterIter(args.dataset, g, args.psize, args.batch_size, train_nid, use_pp=False, regular=args.regular)
torch.cuda.set_device(args.gpu)
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
g = g.int().to(args.gpu)
feat_size = g.ndata['feat'].shape[1]
if args.use_PyG:
model = SAGE_PyG(in_feats, args.n_hidden, n_classes, num_layers=(args.n_layers + 2))
elif args.run_GIN:
model = GIN(in_feats, args.n_hidden, n_classes)
else:
model = GraphSAGE(in_feats, args.n_hidden, n_classes, args.n_layers)
model.cuda()
train_nid = torch.from_numpy(train_nid).cuda()
start_time = time.time()
hidden_1 = args.n_hidden
output = args.n_classes
transfering = 0
running_time = 0
W_1 = torch.ones((feat_size, hidden_1)).cuda()
W_2 = torch.ones((hidden_1, hidden_1)).cuda()
W_3 = torch.ones((hidden_1, output)).cuda()
bw_A = 1
bw_X = 1
bw_W = bw_X
bit_W1 = QGTC.val2bit(W_1.cuda(), bw_W, True, False)
bit_W2 = QGTC.val2bit(W_2.cuda(), bw_W, True, False)
bit_W3 = QGTC.val2bit(W_3.cuda(), bw_W, True, True)
layer1_t = 0
layer2_t = 0
layer3_t = 0
cnt = 0
for epoch in tqdm(range(args.n_epochs)):
for (j, cluster) in enumerate(cluster_iterator):
if args.regular:
torch.cuda.synchronize()
t = time.perf_counter()
cluster = cluster.to(torch.cuda.current_device())
torch.cuda.synchronize()
transfering += (time.perf_counter() - t)
torch.cuda.synchronize()
t = time.perf_counter()
if args.use_PyG:
edge_idx = torch.stack([cluster.edges()[0], cluster.edges()[1]], dim=0).long()
model(cluster.ndata['feat'], edge_idx)
else:
model(cluster)
torch.cuda.synchronize()
running_time += (time.perf_counter() - t)
else:
torch.cuda.synchronize()
t = time.perf_counter()
cluster = cluster.cuda()
A = cluster.A.to_dense()
X = cluster.X
torch.cuda.synchronize()
transfering += (time.perf_counter() - t)
torch.cuda.synchronize()
t = time.perf_counter()
if args.use_QGTC:
if args.run_GIN:
bit_A = QGTC.val2bit(A, bw_A, False, False)
bit_X = QGTC.val2bit(X, bw_X, True, False)
bit_output = QGTC.bitMM2Bit(bit_A, bit_X, A.size(0), A.size(0), X.size(1), bw_A, bw_X, bw_X)
bit_output_1 = QGTC.bitMM2Bit(bit_output, bit_W1, A.size(0), X.size(1), W_1.size(1), bw_X, bw_W, bw_X)
bit_output_2 = QGTC.bitMM2Bit(bit_A, bit_output_1, A.size(0), A.size(0), W_1.size(1), bw_A, bw_X, bw_X)
bit_output_3 = QGTC.bitMM2Bit(bit_output_2, bit_W2, A.size(0), W_1.size(1), W_2.size(1), bw_X, bw_W, bw_X)
bit_output_4 = QGTC.bitMM2Bit(bit_A, bit_output_3, A.size(0), A.size(0), W_2.size(1), bw_A, bw_X, bw_X)
float_output = QGTC.bitMM2Int(bit_output_4, bit_W3, A.size(0), W_2.size(1), W_3.size(1), bw_X, bw_W, False)
else:
bit_A = QGTC.val2bit(A, bw_A, False, False)
bit_X = QGTC.val2bit(X, bw_X, True, False)
bit_output = QGTC.bitMM2Bit(bit_X, bit_W1, X.size(0), X.size(1), W_1.size(1), bw_X, bw_W, bw_X)
if args.zerotile_jump:
QGTC.bitMM2Bit_base_cnt(bit_A, bit_output, A.size(0), A.size(1), W_1.size(1), bw_A, bw_X, bw_X)
QGTC.bitMM2Bit_zerojump_cnt(bit_A, bit_output, A.size(0), A.size(1), W_1.size(1), bw_A, bw_X, bw_X)
continue
bit_output = QGTC.bitMM2Bit(bit_X, bit_W1, X.size(0), X.size(1), W_1.size(1), bw_X, bw_W, bw_X)
bit_output_1 = QGTC.bitMM2Bit(bit_A, bit_output, A.size(0), A.size(1), W_1.size(1), bw_A, bw_X, bw_X)
bit_output_2 = QGTC.bitMM2Bit(bit_output_1, bit_W2, A.size(0), W_1.size(1), W_2.size(1), bw_X, bw_W, bw_X)
bit_output_3 = QGTC.bitMM2Bit(bit_A, bit_output_2, A.size(0), A.size(0), W_2.size(1), bw_A, bw_X, bw_X)
bit_output_4 = QGTC.bitMM2Bit(bit_output_3, bit_W3, A.size(0), W_2.size(1), W_3.size(1), bw_X, bw_W, bw_X)
float_output = QGTC.bitMM2Int(bit_A, bit_output_4, A.size(0), A.size(0), W_2.size(1), bw_A, bw_X, False)
torch.cuda.synchronize()
running_time += (time.perf_counter() - t)
cnt += 1
cluster = cluster.cpu()
torch.cuda.synchronize()
end_time = time.time()
print('Trans (ms): {:.3f}, Compute (ms): {:.3f}'.format(((transfering / cnt) * 1000.0), ((running_time / cnt) * 1000.0)))
print('Avg. Epoch: {:.3f} ms'.format((((end_time - start_time) * 1000) / cnt))) |
def get_sos_schema(num_density_layers, hidden_channels, num_polynomials_per_layer, polynomial_degree):
result = [{'type': 'flatten'}]
for i in range(num_density_layers):
if (i > 0):
result.append({'type': 'flip'})
result += [{'type': 'sos', 'hidden_channels': hidden_channels, 'activation': 'tanh', 'num_polynomials': num_polynomials_per_layer, 'polynomial_degree': polynomial_degree}, {'type': 'normalise'}]
return result |
def build_conv_model2():
input0 = helper.make_tensor_value_info('input0', TensorProto.FLOAT, [1, 3, 1, 3])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 1, 3])
X1_weight = generate_input_initializer([3, 3, 1, 1], np.float32, 'X1_weight')
X1_bias = generate_input_initializer([3], np.float32, 'X1_bias')
X3_weight = generate_input_initializer([3, 3, 1, 1], np.float32, 'X3_weight')
X3_bias = generate_input_initializer([3], np.float32, 'X3_bias')
X5_weight = generate_input_initializer([3, 3, 1, 1], np.float32, 'X5_weight')
X5_bias = generate_input_initializer([3], np.float32, 'X5_bias')
relu_node_1 = onnx.helper.make_node('Relu', ['input0'], ['X1'], name='Relu1')
conv_node_1 = onnx.helper.make_node('Conv', ['X1', 'X1_weight', 'X1_bias'], ['X2'], name='Conv1')
relu_node_2 = onnx.helper.make_node('Relu', ['X2'], ['X3'], name='Relu2')
conv_node_2 = onnx.helper.make_node('Conv', ['X3', 'X3_weight', 'X3_bias'], ['X4'], name='Conv2')
conv_node_3 = onnx.helper.make_node('Conv', ['X1', 'X5_weight', 'X5_bias'], ['X5'], name='Conv3')
add_node = onnx.helper.make_node('Add', ['X4', 'X5'], ['output'], name='Add')
graph = helper.make_graph([relu_node_1, conv_node_1, relu_node_2, conv_node_2, conv_node_3, add_node], 'test_graph_1', [input0], [output])
graph.initializer.add().CopyFrom(X1_weight)
graph.initializer.add().CopyFrom(X1_bias)
graph.initializer.add().CopyFrom(X3_weight)
graph.initializer.add().CopyFrom(X3_bias)
graph.initializer.add().CopyFrom(X5_weight)
graph.initializer.add().CopyFrom(X5_bias)
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
return model |
_module()
class QueryInst(SparseRCNN):
'Implementation of\n `Instances as Queries <
def __init__(self, backbone: ConfigType, rpn_head: ConfigType, roi_head: ConfigType, train_cfg: ConfigType, test_cfg: ConfigType, neck: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None) -> None:
super().__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, data_preprocessor=data_preprocessor, init_cfg=init_cfg) |
class BahdanauAttention(AttentionMechanism):
def __init__(self, hidden_size=1, key_size=1, query_size=1):
super().__init__()
self.key_layer = nn.Linear(key_size, hidden_size, bias=False)
self.query_layer = nn.Linear(query_size, hidden_size, bias=False)
self.energy_layer = nn.Linear(hidden_size, 1, bias=False)
self.proj_keys = None
self.proj_query = None
def forward(self, query: Tensor=None, mask: Tensor=None, values: Tensor=None):
self._check_input_shapes_forward(query=query, mask=mask, values=values)
assert (mask is not None), 'mask is required'
assert (self.proj_keys is not None), 'projection keys have to get pre-computed'
self.compute_proj_query(query)
scores = self.energy_layer(torch.tanh((self.proj_query + self.proj_keys)))
scores = scores.squeeze(2).unsqueeze(1)
scores = torch.where(mask, scores, scores.new_full([1], float('-inf')))
alphas = F.softmax(scores, dim=(- 1))
context = (alphas values)
return (context, alphas)
def compute_proj_keys(self, keys: Tensor):
self.proj_keys = self.key_layer(keys)
def compute_proj_query(self, query: Tensor):
self.proj_query = self.query_layer(query)
def _check_input_shapes_forward(self, query: torch.Tensor, mask: torch.Tensor, values: torch.Tensor):
assert (query.shape[0] == values.shape[0] == mask.shape[0])
assert (query.shape[1] == 1 == mask.shape[1])
assert (query.shape[2] == self.query_layer.in_features)
assert (values.shape[2] == self.key_layer.in_features)
assert (mask.shape[2] == values.shape[1])
def __repr__(self):
return 'BahdanauAttention' |
def get_all_dir_names(dir_path):
dir_path = Path(dir_path)
if dir_path.exists():
return sorted([x.name for x in list(scandir(str(dir_path))) if x.is_dir()])
else:
return [] |
class ResidualStack(nn.Module):
def __init__(self, channels):
super().__init__()
self.channels = channels
self.res_1 = nn.Sequential(nn.LeakyReLU(), nn.Conv1d(channels, channels, 3, padding=commons.get_same_padding(3)), nn.LeakyReLU(), nn.Conv1d(channels, channels, 3, padding=commons.get_same_padding(3)))
self.res_2 = nn.Sequential(nn.LeakyReLU(), nn.Conv1d(channels, channels, 3, dilation=3, padding=commons.get_same_padding(3, 3)), nn.LeakyReLU(), nn.Conv1d(channels, channels, 3, padding=commons.get_same_padding(3)))
self.res_3 = nn.Sequential(nn.LeakyReLU(), nn.Conv1d(channels, channels, 3, dilation=9, padding=commons.get_same_padding(3, 9)), nn.LeakyReLU(), nn.Conv1d(channels, channels, 3, padding=commons.get_same_padding(3)))
nn.utils.weight_norm(self.res_1[1])
nn.utils.weight_norm(self.res_1[3])
nn.utils.weight_norm(self.res_2[1])
nn.utils.weight_norm(self.res_2[3])
nn.utils.weight_norm(self.res_3[1])
nn.utils.weight_norm(self.res_3[3])
def forward(self, x):
for l in [self.res_1, self.res_2, self.res_3]:
x_ = l(x)
x = (x + x_)
return x
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.res_1[1])
nn.utils.remove_weight_norm(self.res_1[3])
nn.utils.remove_weight_norm(self.res_2[1])
nn.utils.remove_weight_norm(self.res_2[3])
nn.utils.remove_weight_norm(self.res_3[1])
nn.utils.remove_weight_norm(self.res_3[3]) |
class EventQuery():
def __init__(self, api_key, prompt_folder: str, num_prompts: int=12):
openai.api_key = api_key
self.setup_msgs = []
system_msgs = []
prompt_assistant_msgs = []
prompt_user_msgs = []
help_msgs = []
if (not os.path.exists(prompt_folder)):
raise RuntimeError(f'Prompt folder is not existed: {prompt_folder}')
for fn in sorted(os.listdir(prompt_folder)):
file_path = os.path.join(prompt_folder, fn)
if (not os.path.isfile(file_path)):
continue
msg = read_text_file(file_path)
if fn.startswith('system_msg'):
system_msgs.append(msg)
elif fn.startswith('prompt_assistant'):
prompt_assistant_msgs.append(msg)
elif fn.startswith('prompt_user'):
prompt_user_msgs.append(msg)
elif fn.startswith('help_msg'):
help_msgs.append(msg)
for msg in system_msgs:
self.setup_msgs.append({'role': 'system', 'content': msg})
for msg in help_msgs:
self.setup_msgs.append({'role': 'user', 'content': msg})
for (user_msg, assistant_msg) in zip(prompt_user_msgs[:num_prompts], prompt_assistant_msgs[:num_prompts]):
self.setup_msgs.append({'role': 'user', 'content': user_msg})
self.setup_msgs.append({'role': 'assistant', 'content': assistant_msg})
def query(self, msg):
msg_list = (self.setup_msgs + [{'role': 'user', 'content': msg}])
completions = openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=msg_list, stream=False)
print('Usage:', completions['usage']['total_tokens'])
body = completions['choices'][0]['message']['content']
return body |
def revert_imagenet_normalization(sample):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
mean_tensor = torch.Tensor(mean).view(3, 1, 1).to(sample.device)
std_tensor = torch.Tensor(std).view(3, 1, 1).to(sample.device)
non_normalized_sample = ((sample * std_tensor) + mean_tensor)
return non_normalized_sample |
def train(model, predictor, dataset, optimizer, batch_size, device):
model.train()
losses = []
optimizer.zero_grad()
for data in tqdm.notebook.tqdm(torch_geometric.loader.dataloader.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=16), 'train', total=(len(dataset) // batch_size)):
data = data.to(device)
data.x = data.x.nan_to_num((- 1))
h = model(data)
assert (h.isnan().sum() == 0), h
x_i = torch.index_select(h, 0, data.edge_index[0])
x_j = torch.index_select(h, 0, data.edge_index[1])
y_hat = predictor(x_i, x_j)
y = data.y.nan_to_num((- 1))
y = y.long()
loss_f = torch.nn.CrossEntropyLoss(weight=city_class_weights, ignore_index=(- 1))
loss = loss_f(y_hat, y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
losses.append(loss.cpu().item())
return losses |
def get_datetime(time_delta):
days_delta = (time_delta // (24 * 3600))
time_delta = (time_delta % (24 * 3600))
hour_delta = (time_delta // 3600)
time_delta = (time_delta % 3600)
mins_delta = (time_delta // 60)
time_delta = (time_delta % 60)
secs_delta = time_delta
return '{}:{}:{}:{}'.format(days_delta, hour_delta, mins_delta, secs_delta) |
def add_random_restarts_single_l(lik, n_rand, sd, data_shape):
lik_list = []
for dummy in range(n_rand):
l = lik.copy()
l.initialise_params(sd=sd, data_shape=data_shape)
lik_list.append(l)
return lik_list |
def test_error():
msg = 'Penalty term must be positive'
with pytest.raises(ValueError, match=msg):
LogisticRegression(lambda_1=(- 1)).fit(X, Y1)
with pytest.raises(ValueError, match=msg):
LogisticRegression(lambda_1='test').fit(X, Y1)
for LR in [LogisticRegression]:
msg = 'Tolerance for stopping criteria must be positive'
with pytest.raises(ValueError, match=msg):
LR(tol=(- 1)).fit(X, Y1)
with pytest.raises(ValueError, match=msg):
LR(tol='test').fit(X, Y1)
msg = 'Maximum number of iteration must be positive'
with pytest.raises(ValueError, match=msg):
LR(max_iter=(- 1)).fit(X, Y1)
with pytest.raises(ValueError, match=msg):
LR(max_iter='test').fit(X, Y1) |
_torch
class LxmertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = ((LxmertModel, LxmertForPreTraining, LxmertForQuestionAnswering) if is_torch_available() else ())
test_head_masking = False
test_pruning = False
test_torchscript = False
test_head_masking = False
test_pruning = False
test_torchscript = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
if (model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()):
inputs_dict['labels'] = torch.zeros(self.model_tester.batch_size, dtype=torch.long, device=torch_device)
elif (model_class in MODEL_FOR_PRETRAINING_MAPPING.values()):
inputs_dict['labels'] = torch.zeros((self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device)
return inputs_dict
def setUp(self):
self.model_tester = LxmertModelTester(self)
self.config_tester = ConfigTester(self, config_class=LxmertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_lxmert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lxmert_model(*config_and_inputs)
def test_lxmert_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lxmert_for_question_answering(*config_and_inputs)
def test_lxmert_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lxmert_for_pretraining(*config_and_inputs)
def test_lxmert_question_answering_labels_resize(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.resize_lxmert_num_qa_labels(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = LxmertModel.from_pretrained(model_name)
model.to(torch_device)
self.assertIsNotNone(model)
def test_attention_outputs(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
seq_len = getattr(self.model_tester, 'seq_length', None)
encoder_seq_length = getattr(self.model_tester, 'encoder_seq_length', seq_len)
encoder_key_length = getattr(self.model_tester, 'key_length', encoder_seq_length)
chunk_length = getattr(self.model_tester, 'chunk_length', None)
if ((chunk_length is not None) and hasattr(self.model_tester, 'num_hashes')):
encoder_seq_length = (encoder_seq_length * self.model_tester.num_hashes)
for model_class in self.all_model_classes:
inputs_dict['output_attentions'] = True
inputs_dict['output_hidden_states'] = False
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
(language_attentions, vision_attentions, cross_encoder_attentions) = (outputs[(- 3)], outputs[(- 2)], outputs[(- 1)])
self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers['language'])
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers['vision'])
self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers['cross_encoder'])
del inputs_dict['output_attentions']
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
(language_attentions, vision_attentions, cross_encoder_attentions) = (outputs[(- 3)], outputs[(- 2)], outputs[(- 1)])
self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers['language'])
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers['vision'])
self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers['cross_encoder'])
attentions = [language_attentions, vision_attentions, cross_encoder_attentions]
attention_shapes = [[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features]]
for (attention, attention_shape) in zip(attentions, attention_shapes):
self.assertListEqual(list(attention[0].shape[(- 3):]), attention_shape)
out_len = len(outputs)
inputs_dict['output_attentions'] = True
inputs_dict['output_hidden_states'] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual((out_len + 2), len(outputs))
(language_attentions, vision_attentions, cross_encoder_attentions) = (outputs[(- 3)], outputs[(- 2)], outputs[(- 1)])
self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers['language'])
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers['vision'])
self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers['cross_encoder'])
attentions = [language_attentions, vision_attentions, cross_encoder_attentions]
attention_shapes = [[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features]]
for (attention, attention_shape) in zip(attentions, attention_shapes):
self.assertListEqual(list(attention[0].shape[(- 3):]), attention_shape)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
(language_hidden_states, vision_hidden_states) = (outputs[(- 2)], outputs[(- 1)])
self.assertEqual(len(language_hidden_states), (self.model_tester.num_hidden_layers['language'] + 1))
self.assertEqual(len(vision_hidden_states), (self.model_tester.num_hidden_layers['vision'] + 1))
seq_length = self.model_tester.seq_length
num_visual_features = self.model_tester.num_visual_features
self.assertListEqual(list(language_hidden_states[0].shape[(- 2):]), [seq_length, self.model_tester.hidden_size])
self.assertListEqual(list(vision_hidden_states[0].shape[(- 2):]), [num_visual_features, self.model_tester.hidden_size])
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict['output_hidden_states'] = True
check_hidden_states_output(inputs_dict, config, model_class)
del inputs_dict['output_hidden_states']
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
hidden_states_lang = outputs.language_hidden_states[0]
attentions_lang = outputs.language_attentions[0]
hidden_states_vision = outputs.vision_hidden_states[0]
attentions_vision = outputs.vision_attentions[0]
hidden_states_lang.retain_grad()
attentions_lang.retain_grad()
hidden_states_vision.retain_grad()
attentions_vision.retain_grad()
outputs.language_output.flatten()[0].backward(retain_graph=True)
outputs.vision_output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states_lang.grad)
self.assertIsNotNone(attentions_vision.grad)
self.assertIsNotNone(hidden_states_vision.grad)
self.assertIsNotNone(attentions_vision.grad) |
class Pattern(object):
def __call__(self, model, *args, **kwargs):
raise NotImplementedError |
def slim_eval_runner(benchmark, vec_input: bool=False, uniform: bool=True, input_dim: int=10, bond_dim: int=10, seq_len: int=100, batch: int=100):
if uniform:
core_tensor = near_eye_init((input_dim, bond_dim, bond_dim))
else:
core_tensor = near_eye_init((seq_len, input_dim, bond_dim, bond_dim))
if vec_input:
fake_data = torch.randn(seq_len, batch, input_dim).abs()
else:
fake_data = torch.randint(input_dim, (seq_len, batch))
bound_vecs = torch.randn(2, bond_dim)
benchmark(slim_eval_fun, fake_data, core_tensor, bound_vecs) |
def get_logger():
logger_name = 'main-logger'
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = '[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s'
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
file_handler = logging.FileHandler(os.path.join('checkpoints', args.exp_name, (('main-' + str(int(time.time()))) + '.log')))
file_handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(file_handler)
return logger |
def try_to_load_from_cache(repo_id: str, filename: str, cache_dir: Union[(str, Path, None)]=None, revision: Optional[str]=None) -> Optional[str]:
if (revision is None):
revision = 'main'
if (cache_dir is None):
cache_dir = TRANSFORMERS_CACHE
object_id = repo_id.replace('/', '--')
repo_cache = os.path.join(cache_dir, f'models--{object_id}')
if (not os.path.isdir(repo_cache)):
return None
for subfolder in ['refs', 'snapshots']:
if (not os.path.isdir(os.path.join(repo_cache, subfolder))):
return None
cached_refs = os.listdir(os.path.join(repo_cache, 'refs'))
if (revision in cached_refs):
with open(os.path.join(repo_cache, 'refs', revision)) as f:
revision = f.read()
if os.path.isfile(os.path.join(repo_cache, '.no_exist', revision, filename)):
return _CACHED_NO_EXIST
cached_shas = os.listdir(os.path.join(repo_cache, 'snapshots'))
if (revision not in cached_shas):
return None
cached_file = os.path.join(repo_cache, 'snapshots', revision, filename)
return (cached_file if os.path.isfile(cached_file) else None) |
def parse_args(script):
parser = argparse.ArgumentParser(description=('few-shot script %s' % script))
parser.add_argument('--dataset', default='CUB', help='CUB/miniImagenet/cross/omniglot/cross_char')
parser.add_argument('--model', default='Conv4', help='model: Conv{4|6} / ResNet{10|18|34|50|101}')
parser.add_argument('--method', default='baseline', help='baseline/baseline++/protonet/matchingnet/relationnet{_softmax}/maml{_approx}')
parser.add_argument('--train_n_way', default=5, type=int, help='class num to classify for training')
parser.add_argument('--train_n_shot', default=5, type=int, help='shot num per class to classify for training')
parser.add_argument('--test_n_way', default=5, type=int, help='class num to classify for testing (validation) ')
parser.add_argument('--limit_n_images', default=(- 1), type=int, help='Number of images to use during training.')
parser.add_argument('--limit_n_classes', default=(- 1), type=int, help='Number of classes to use during training')
parser.add_argument('--n_shot', default=5, type=int, help='number of labeled data in each class, same as n_support')
parser.add_argument('--train_aug', action='store_true', help='perform data augmentation or not during training ')
if (script == 'train'):
parser.add_argument('--num_classes', default=200, type=int, help='total number of classes in softmax, only used in baseline')
parser.add_argument('--save_freq', default=50, type=int, help='Save frequency')
parser.add_argument('--start_epoch', default=0, type=int, help='Starting epoch')
parser.add_argument('--stop_epoch', default=(- 1), type=int, help='Stopping epoch')
parser.add_argument('--resume', action='store_true', help='continue from previous trained model with largest epoch')
parser.add_argument('--warmup', action='store_true', help='continue from baseline, neglected if resume is true')
elif (script == 'save_features'):
parser.add_argument('--split', default='novel', help='base/val/novel')
parser.add_argument('--save_iter', default=(- 1), type=int, help='save feature from the model trained in x epoch, use the best model if x is -1')
elif (script == 'test'):
parser.add_argument('--split', default='novel', help='base/val/novel')
parser.add_argument('--save_iter', default=(- 1), type=int, help='saved feature from the model trained in x epoch, use the best model if x is -1')
parser.add_argument('--adaptation', action='store_true', help='further adaptation in test time or not')
else:
raise ValueError('Unknown script')
return parser.parse_args() |
def sample_generator(filename, batch_size=1):
index = 0
file_size = len(open(filename, 'r').readlines())
while True:
fsamples = open(filename, 'r')
fixed_list = []
moving_list = []
for (n, line) in enumerate(fsamples):
if (n < index):
continue
if (n < (index + batch_size)):
split = line.split(',')
fixed_list.append(split[0].strip())
moving_list.append(split[1].strip())
else:
break
if ((len(fixed_list) == 0) or (len(moving_list) == 0)):
print('Empty List')
if ((index + batch_size) >= file_size):
index = 0
else:
index = (index + batch_size)
(yield get_batch(fixed_list, moving_list)) |
def edit_modelfile(data_, mtype, csvfilename):
list_doc = yaml.load(open('model.yml'), Loader=yaml.Loader)
os.remove('model.yml')
project = list_doc['project']
data = list_doc['data']
print(data)
data['drop'] = ['Unnamed: 0']
data['shuffle'] = True
data['split'] = 0.4
data['target'] = 'class_'
model = list_doc['model']
if (mtype in ['classification', 'c']):
model['algorithms'] = ['AB', 'GB', 'KNN', 'LOGR', 'RF', 'XGB', 'XT']
model['scoring_function'] = 'roc_auc'
model['type'] = 'classification'
elif (mtype in ['regression', 'r']):
model['algorithms'] = ['GBR', 'KNR', 'LR', 'RFR', 'XGBR', 'XTR']
model['scoring_function'] = 'mse'
model['type'] = 'regression'
features = list_doc['features']
pipeline = list_doc['pipeline']
plots = list_doc['plots']
xgboost_ = list_doc['xgboost']
list_doc['project'] = project
list_doc['data'] = data
list_doc['model'] = model
list_doc['features'] = features
list_doc['pipeline'] = pipeline
list_doc['plots'] = plots
print(list_doc)
print('re-writing YAML config file...')
file = open('model.yml', 'w')
yaml.dump(list_doc, file)
file.close()
print(list_doc)
file.close() |
class MaCowInternalBlock(Flow):
def __init__(self, num_steps, in_channels, kernel_size, hidden_channels, s_channels, factor=2, scale=True, prior_scale=True, inverse=False, coupling_type='conv', slice=None, heads=1, pos_enc=True, dropout=0.0):
super(MaCowInternalBlock, self).__init__(inverse)
num_layers = len(num_steps)
assert (num_layers < factor)
self.layers = nn.ModuleList()
self.priors = nn.ModuleList()
channel_step = (in_channels // factor)
for num_step in num_steps:
layer = [MaCowStep(in_channels, kernel_size, hidden_channels, s_channels, scale=scale, inverse=inverse, coupling_type=coupling_type, slice=slice, heads=heads, pos_enc=pos_enc, dropout=dropout) for _ in range(num_step)]
self.layers.append(nn.ModuleList(layer))
prior = Prior(in_channels, hidden_channels=hidden_channels, s_channels=s_channels, scale=prior_scale, inverse=inverse, factor=factor)
self.priors.append(prior)
in_channels = (in_channels - channel_step)
assert (in_channels == prior.z1_channels)
factor = (factor - 1)
self.z1_channels = in_channels
assert (len(self.layers) == len(self.priors))
def sync(self):
for (layer, prior) in zip(self.layers, self.priors):
for step in layer:
step.sync()
prior.sync()
def forward(self, input: torch.Tensor, s=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
out = input
logdet_accum = input.new_zeros(input.size(0))
outputs = []
for (layer, prior) in zip(self.layers, self.priors):
for step in layer:
(out, logdet) = step.forward(out, s=s)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = prior.forward(out, s=s)
logdet_accum = (logdet_accum + logdet)
(out1, out2) = split2d(out, prior.z1_channels)
outputs.append(out2)
out = out1
outputs.append(out)
outputs.reverse()
out = unsplit2d(outputs)
return (out, logdet_accum)
def backward(self, input: torch.Tensor, s=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
out = input
outputs = []
for prior in self.priors:
(out1, out2) = split2d(out, prior.z1_channels)
outputs.append(out2)
out = out1
logdet_accum = out.new_zeros(out.size(0))
for (layer, prior) in zip(reversed(self.layers), reversed(self.priors)):
out2 = outputs.pop()
out = unsplit2d([out, out2])
(out, logdet) = prior.backward(out, s=s)
logdet_accum = (logdet_accum + logdet)
for step in reversed(layer):
(out, logdet) = step.backward(out, s=s)
logdet_accum = (logdet_accum + logdet)
assert (len(outputs) == 0)
return (out, logdet_accum)
def init(self, data, s=None, init_scale=1.0) -> Tuple[(Tuple[(torch.Tensor, torch.Tensor)], torch.Tensor)]:
out = data
logdet_accum = data.new_zeros(data.size(0))
outputs = []
for (layer, prior) in zip(self.layers, self.priors):
for step in layer:
(out, logdet) = step.init(out, s=s, init_scale=init_scale)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = prior.init(out, s=s, init_scale=init_scale)
logdet_accum = (logdet_accum + logdet)
(out1, out2) = split2d(out, prior.z1_channels)
outputs.append(out2)
out = out1
outputs.append(out)
outputs.reverse()
out = unsplit2d(outputs)
return (out, logdet_accum) |
def parse_data_format(str):
str = str.upper()
mace_check((str in [e.name for e in DataFormat]), ('unknown data format %s' % str))
return DataFormat[str] |
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0, is_train=None):
with tf.variable_scope((scope or 'linear')):
if ((args is None) or (nest.is_sequence(args) and (not args))):
raise ValueError('`args` must be specified')
if (not nest.is_sequence(args)):
args = [args]
flat_args = [flatten(arg, 1) for arg in args]
assert (is_train is not None)
flat_args = [tf.cond(is_train, (lambda : tf.nn.dropout(arg, input_keep_prob)), (lambda : arg)) for arg in flat_args]
flat_out = _linear(flat_args, output_size, bias)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [(len(args[0].get_shape().as_list()) - 1)])
if wd:
add_wd(wd)
return out |
def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path):
logger.info('Saving graph of ONNX exported model to {} ...'.format(graph_save_path))
save_graph(predict_net, graph_save_path, op_only=False)
logger.info('Running ONNX exported model ...')
with ScopedWS('__ws_tmp__', True) as ws:
ws.RunNetOnce(init_net)
initialized_blobs = set(ws.Blobs())
uninitialized = [inp for inp in predict_net.external_input if (inp not in initialized_blobs)]
for (name, blob) in zip(uninitialized, tensor_inputs):
ws.FeedBlob(name, blob)
try:
ws.RunNetOnce(predict_net)
except RuntimeError as e:
logger.warning('Encountered RuntimeError: \n{}'.format(str(e)))
ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()}
blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)}
logger.info('Saving graph with blob shapes to {} ...'.format(graph_save_path))
save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes)
return ws_blobs |
class SPIN(nn.Module):
def __init__(self, block, layers):
self.inplanes = 64
super(SPIN, self).__init__()
npose = (24 * 6)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.fc1 = nn.Linear((((512 * block.expansion) + npose) + 13), 1024)
self.drop1 = nn.Dropout()
self.fc2 = nn.Linear(1024, 1024)
self.drop2 = nn.Dropout()
self.decpose = nn.Linear(1024, npose)
self.decshape = nn.Linear(1024, 10)
self.deccam = nn.Linear(1024, 3)
nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)
nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)
nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)
init_pose = torch.zeros(1, npose, dtype=torch.float32)
init_shape = torch.zeros(1, 10, dtype=torch.float32)
init_cam = torch.zeros(1, 3, dtype=torch.float32)
self.register_buffer('init_pose', init_pose)
self.register_buffer('init_shape', init_shape)
self.register_buffer('init_cam', init_cam)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, init_pose=None, init_shape=None, init_cam=None, n_iter=3):
device = x.device
batch_size = x.shape[0]
if (init_pose is None):
init_pose = self.init_pose.expand(batch_size, (- 1))
if (init_shape is None):
init_shape = self.init_shape.expand(batch_size, (- 1))
if (init_cam is None):
init_cam = self.init_cam.expand(batch_size, (- 1))
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
xf = self.avgpool(x4)
xf = xf.view(xf.size(0), (- 1))
pred_pose = init_pose
pred_shape = init_shape
pred_cam = init_cam
for i in range(n_iter):
xc = torch.cat([xf, pred_pose, pred_shape, pred_cam], 1)
xc = self.fc1(xc)
xc = self.drop1(xc)
xc = self.fc2(xc)
xc = self.drop2(xc)
pred_pose = (self.decpose(xc) + pred_pose)
pred_shape = (self.decshape(xc) + pred_shape)
pred_cam = (self.deccam(xc) + pred_cam)
pred_rotmat = rot6d_to_rotmat(pred_pose)
pred_rotmat_hom = torch.cat([pred_rotmat.view((- 1), 3, 3), torch.tensor([0, 0, 1], dtype=torch.float32, device=device).view(1, 3, 1).expand((batch_size * 24), (- 1), (- 1))], dim=(- 1))
pred_angle_axis = rotation_matrix_to_angle_axis(pred_rotmat_hom).contiguous().view(batch_size, (- 1))
pred_angle_axis[torch.isnan(pred_angle_axis)] = 0.0
preds = torch.cat([pred_cam, pred_angle_axis, pred_shape], dim=(- 1))
return preds |
def run_minimization_while(energy_fn, R_init, shift, max_grad_thresh=1e-12, max_steps=1000000, **kwargs):
(init, apply) = minimize.fire_descent(jit(energy_fn), shift, dt_start=0.001, dt_max=0.005, **kwargs)
apply = jit(apply)
def get_maxgrad(state):
return jnp.amax(jnp.abs(state.force))
def cond_fn(val):
(state, i) = val
return ((get_maxgrad(state) > max_grad_thresh) & (i < max_steps))
def body_fn(val):
(state, i) = val
return (apply(state), (i + 1))
state = init(R_init)
(state, num_iterations) = lax.while_loop(cond_fn, body_fn, (state, 0))
return (state.position, get_maxgrad(state), (num_iterations + 1)) |
def train_model(datapath, output, appliance, hparams, doplot=None, reload=True):
buildings = appliance['buildings']['train']
name = appliance['name']
params = appliance['hparams']
record_err = np.inf
transform_enabled = appliance.get('normalization', False)
model_type = appliance.get('model', 'ModelPaper')
active_threshold = appliance.get('active_threshold', 0.15)
active_ratio = appliance.get('active_ratio', 0.5)
active_oversample = appliance.get('active_oversample', 2)
transform = None
my_dataset = InMemoryKoreaDataset(datapath, buildings, name, windowsize=params['L'], active_threshold=active_threshold, active_ratio=active_ratio, active_oversample=active_oversample, transform_enabled=transform_enabled)
if transform_enabled:
transform = {'sample_mean': my_dataset.sample_mean, 'sample_std': my_dataset.sample_std, 'target_mean': my_dataset.target_mean, 'target_std': my_dataset.target_std}
print(transform)
total_size = len(my_dataset)
train_size = int((hparams['train_size'] * total_size))
eval_size = (total_size - train_size)
print(' DATASET ')
print(f'Total size: {total_size}'.format(total_size))
print(f'Train size: {train_size}'.format(train_size))
print(f'Eval size: {eval_size}'.format(eval_size))
print('')
print(' ARCHITECTURE ')
pprint.pprint(appliance)
print('')
(train_dataset, eval_dataset) = torch.utils.data.random_split(my_dataset, (train_size, eval_size))
filename = os.path.join(output, 'dataset.pt')
save_dataset(transform, train_dataset, eval_dataset, filename)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=hparams['batch_size'], shuffle=True)
eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=hparams['batch_size'])
model_type = getattr(nilmmodel, model_type)
model = model_type(params['L'], params['F'], params['K'], params['H'])
model = model.to(device)
optimizer = optim.Adam(model.parameters(), hparams['lr'])
scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.9)
if reload:
filename = os.path.join(output, appliance['filename'])
print('')
print('Reloading model: ', filename)
print('')
(transform, record_err) = load_model(filename, model, optimizer)
results = []
start = datetime.now()
for epoch in range(hparams['epochs']):
filename = os.path.join(output, (appliance['filename'] + str(epoch)))
plotfilename = None
if doplot:
plotfilename = filename
err_ = None
try:
(loss, err) = train_single_epoch(epoch, model, train_loader, transform, optimizer, eval_loader, plotfilename)
print('')
print(f'train epoch={epoch} loss={loss:.2f} err={err:.2f}')
print('')
(loss_, err_) = eval_single_epoch(model, eval_loader, transform)
print('')
print(f'eval loss={loss_:.2f} err={err_:.2f}')
print('')
results.append([(epoch, loss, err), (epoch, loss_, err_)])
if (err_ < record_err):
filename = os.path.join(output, appliance['filename'])
save_model(model, optimizer, hparams, appliance, transform, filename, err_)
record_err = err_
except Exception as e:
print(e)
scheduler.step()
end = datetime.now()
total_seconds = (end - start).seconds
print('')
print(f'Total seconds: {total_seconds}')
print('')
summary(output, results)
return (model, transform) |
class WikipediaDataSet(Dataset):
def __init__(self, root, n_context_sent=1, train=True, high_granularity=False):
root_path = root
print(root_path)
cache_path = get_cache_path(root_path)
print(cache_path)
if (not os.path.exists(cache_path)):
print('loading names of all files')
cache_wiki_filenames(root_path)
self.textfiles = Path(cache_path).read_text().splitlines()
if (len(self.textfiles) == 0):
raise RuntimeError('Found 0 images in subfolders of: {}'.format(root))
self.train = train
self.root = root
self.high_granularity = high_granularity
self.n_context_sent = n_context_sent
def __getitem__(self, index):
path = self.textfiles[index]
return read_wiki_file(Path(path), n_context_sent=self.n_context_sent, high_granularity=self.high_granularity)
def __len__(self):
return len(self.textfiles) |
class Encoder_FC(nn.Module):
def __init__(self, modeltype, njoints, nfeats, num_frames, num_classes, translation, pose_rep, glob, glob_rot, latent_dim=256, **kargs):
super().__init__()
self.modeltype = modeltype
self.njoints = njoints
self.nfeats = nfeats
self.num_frames = num_frames
self.num_classes = num_classes
self.translation = translation
self.pose_rep = pose_rep
self.glob = glob
self.glob_rot = glob_rot
self.latent_dim = latent_dim
self.activation = nn.GELU()
self.input_dim = (((self.njoints * self.nfeats) * self.num_frames) + self.num_classes)
self.fully_connected = nn.Sequential(nn.Linear(self.input_dim, 512), nn.GELU(), nn.Linear(512, 256), nn.GELU())
if (self.modeltype == 'cvae'):
self.mu = nn.Linear(256, self.latent_dim)
self.var = nn.Linear(256, self.latent_dim)
else:
self.final = nn.Linear(256, self.latent_dim)
def forward(self, batch):
(x, y) = (batch['x'], batch['y'])
(bs, njoints, feats, nframes) = x.size()
if (((njoints * feats) * nframes) != ((self.njoints * self.nfeats) * self.num_frames)):
raise ValueError('This model is not adapted with this input')
if (len(y.shape) == 1):
y = F.one_hot(y, self.num_classes)
y = y.to(dtype=x.dtype)
x = x.reshape(bs, ((njoints * feats) * nframes))
x = torch.cat((x, y), 1)
x = self.fully_connected(x)
if (self.modeltype == 'cvae'):
return {'mu': self.mu(x), 'logvar': self.var(x)}
else:
return {'z': self.final(x)} |
def loss(labels, logits):
return tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits) |
def testeval(fen, absolute=False) -> float:
piece_vals = {'K': 3, 'Q': 14, 'R': 5, 'B': 3.25, 'N': 3, 'P': 1}
ans = 0.0
tot = 0
for c in fen.split(' ')[0]:
if (not c.isalpha()):
continue
if c.isupper():
ans += piece_vals[c]
tot += piece_vals[c]
else:
ans -= piece_vals[c.upper()]
tot += piece_vals[c.upper()]
v = (ans / tot)
if ((not absolute) and is_black_turn(fen)):
v = (- v)
assert (abs(v) < 1)
return np.tanh((v * 3)) |
def convert_ordinal_to_binary_preference(preferences: Union[(pd.DataFrame, list[dict[(str, Any)]])], ordinal_preference_key: str='preference', binary_preference_key: str='preference'):
if isinstance(preferences, pd.DataFrame):
is_df = True
else:
is_df = False
preferences = pd.DataFrame.from_records(preferences)
preferences[binary_preference_key] = (((preferences[ordinal_preference_key].round().astype(int) - 1) // 2) + 1)
if (not is_df):
preferences = preferences.to_dict(orient='records')
return preferences |
_module()
class KineticsClipFolderDatasetV2MultiFrames(KineticsClipFolderDatasetV2):
def __init__(self, root, transform=None, split='train_list', sample_num=0):
super(KineticsClipFolderDatasetV2MultiFrames, self).__init__(root, split)
self.transform = transform
self.sample_num = sample_num
assert (self.transform is not None)
def __getitem__(self, item):
(frame_root, frame_num, cls) = self.samples[item]
sample_num = (frame_num if ((self.sample_num <= 0) or (self.sample_num > frame_num)) else self.sample_num)
frame_indices = np.round(np.linspace(1, frame_num, num=sample_num)).astype(np.int64)
frames = torch.cat([self._get_aug_frame(frame_root, frame_indices[i]) for i in range(sample_num)], dim=0)
return (frames, cls) |
def resnet_v1_50(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, scope='resnet_v1_50'):
blocks = [resnet_utils.Block('block1', bottleneck, (([(256, 64, 1)] * 2) + [(256, 64, 2)])), resnet_utils.Block('block2', bottleneck, (([(512, 128, 1)] * 3) + [(512, 128, 2)])), resnet_utils.Block('block3', bottleneck, (([(1024, 256, 1)] * 5) + [(1024, 256, 2)])), resnet_utils.Block('block4', bottleneck, ([(2048, 512, 1)] * 3))]
return resnet_v1(inputs, blocks, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, reuse=reuse, scope=scope) |
def create_and_report(model_id, edge_length_threshold, filled, overwrite=False):
import template_ffd.eval.iou as iou
print(iou.get_iou_average(model_id=model_id, edge_length_threshold=edge_length_threshold, filled=filled)) |
def _test_initialization(d, x, name, inertia, frozen, dtype):
assert (d.inertia == inertia)
assert (d.frozen == frozen)
param = getattr(d, name)
if (x is not None):
assert (param.shape[0] == len(x))
assert (param.dtype == dtype)
assert_array_almost_equal(param, x)
else:
assert (param == x) |
def _take_channels(*xs, ignore_channels=None):
if (ignore_channels is None):
return xs
else:
channels = [channel for channel in range(xs[0].shape[1]) if (channel not in ignore_channels)]
xs = [torch.index_select(x, dim=1, index=torch.tensor(channels).to(x.device)) for x in xs]
return xs |
class FrozenDict(OrderedDict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for (key, value) in self.items():
setattr(self, key, value)
self.__frozen = True
def __delitem__(self, *args, **kwargs):
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.')
def setdefault(self, *args, **kwargs):
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.')
def pop(self, *args, **kwargs):
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.')
def update(self, *args, **kwargs):
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.')
def __setattr__(self, name, value):
if (hasattr(self, '__frozen') and self.__frozen):
raise Exception(f'You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.')
super().__setattr__(name, value)
def __setitem__(self, name, value):
if (hasattr(self, '__frozen') and self.__frozen):
raise Exception(f'You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.')
super().__setitem__(name, value) |
def _load_from_summary(index, config):
dataframe = pd.DataFrame.from_csv('./train_package/train_summary.csv')
history_string = dataframe.loc[int(index)]['backtest_test_history']
if (not check_input_same(config, json.loads(dataframe.loc[int(index)]['config']))):
raise ValueError('the date of this index is not the same as the default config')
return np.fromstring(history_string, sep=',')[:(- 1)] |
def test_velocity_in_kpcGyr():
(vofid, rofid) = (200.0, 8.0)
assert (numpy.fabs((((2.0 * conversion.velocity_in_kpcGyr(vofid, rofid)) / conversion.velocity_in_kpcGyr((2.0 * vofid), rofid)) - 1.0)) < (10.0 ** (- 10.0))), 'velocity_in_kpcGyr did not work as expected'
assert (numpy.fabs(((conversion.velocity_in_kpcGyr(vofid, rofid) / conversion.velocity_in_kpcGyr(vofid, (2 * rofid))) - 1.0)) < (10.0 ** (- 10.0))), 'velocity_in_kpcGyr did not work as expected'
return None |
def validate(args, device_id, pt, step):
device = ('cpu' if (args.visible_gpus == '-1') else 'cuda')
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info(('Loading checkpoint from %s' % test_from))
checkpoint = torch.load(test_from, map_location=(lambda storage, loc: storage))
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False), args.batch_size, device, shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent() |
class TestOptions(BaseOptions):
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
parser.add_argument('--phase', type=str, default='test', help='test flag')
parser.add_argument('--phase_anno', type=str, default='test', help='eigen/eigen_test, Annotations file name')
return parser |
class bcolors():
HEADER = '\x1b[95m'
INFO = ' [INFO] | '
OKBLUE = '\x1b[94m[DOWNLOAD] | '
WARNING = '\x1b[93m [WARN] | '
FAIL = '\x1b[91m [ERROR] | '
OKGREEN = '\x1b[92m'
ENDC = '\x1b[0m' |
def tf_hard_intersection_pooler(boxes: TFBoxTensor, mask: tf.Tensor=None, dim: int=0, keepdim: bool=False) -> TFBoxTensor:
box_z = boxes.z
box_Z = boxes.Z
if (mask is not None):
box_z[mask] -= float('inf')
box_Z[mask] += float('inf')
z = tf.math.reduce_max(box_z, axis=dim, keepdims=keepdim)[0]
Z = tf.math.reduce_min(box_Z, axis=dim, keepdims=keepdim)[0]
return boxes.like_this_from_zZ(z, Z) |
def pretent_to_be_nnUNetTrainer(base, folds=(0, 1, 2, 3, 4)):
for fold in folds:
cur = join(base, ('fold_%d' % fold))
pkl_file = join(cur, 'model_best.model.pkl')
a = load_pickle(pkl_file)
a['name_old'] = deepcopy(a['name'])
a['name'] = 'nnUNetTrainer'
save_pickle(a, pkl_file) |
class UnpairedDataset(data.Dataset):
def __init__(self, opt, im_path, is_val=False):
super().__init__()
self.dir = im_path
self.paths = sorted(make_dataset(self.dir, opt.max_dataset_size))
self.size = len(self.paths)
assert (self.size > 0)
self.transform = transforms.get_transform(opt, for_val=is_val)
self.opt = opt
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path).convert('RGB')
img = self.transform(img)
return {'img': img, 'path': path}
def __len__(self):
return self.size |
def parse_args():
parser = argparse.ArgumentParser(description=' EmbMatch Training')
parser.add_argument('--dataset-path', type=str, default=os.environ.get('SEMCO_DATA_PATH', '/home/inas0003/data'), help='the path to the data folder containing all datasets')
parser.add_argument('--word-vec-path', type=str, default=os.environ.get('SEMCO_WV_PATH', '/home/inas0003/data/numberbatch-en-19.08_128D.dict.pkl'), help='Word vectors (Semantic Embeddings) dict path')
parser.add_argument('--im-size', type=int, default=32, help='default image size to which all images will be resized')
parser.add_argument('--cropsize', type=int, default=32, help='default cropsize to which all images will be cropped -radomly for train data and centrally for test/valid data')
parser.add_argument('--lam-u', type=float, default=1.0, help='coefficient of unlabeled loss')
parser.add_argument('--mu', type=int, default=3, help='factor of train batch size of unlabeled samples')
parser.add_argument('--thr', type=float, default=0.95, help='pseudo label threshold')
parser.add_argument('--thr-emb', type=float, default=0.7, help='pseudo label cos sim threshold for embedding path')
parser.add_argument('--lambda-emb', type=float, default=3, help='weight of embedding loss')
parser.add_argument('--eps', type=float, default=None, help='Epsilon for DBScan clustering [0-1], the less eps, the more conservative label grouping is. If left blank, will be automatically set')
parser.add_argument('--ema-alpha', type=float, default=0.999, help='decay rate for ema module')
parser.add_argument('--parallel', type=bool, default=True, help='If set to True, EmbMatch will run in parallel on all available GPUs, use CUDA_VISIBLE_DEVICES=x,y to limit parallelism to cuda:x and cuda:y only')
parser.add_argument('--no_amp', action='store_true', help='If set, Automatic mixed precision will not be used for training the model.')
parser.add_argument('--no_progress_bar', action='store_true', help='If set, progress bar will not be displayed during training')
parser.add_argument('--cache-imgs', type=bool, default=False, help='If set to True, images will be cached to memory. In case SDD is used with 4 wokers per gpu, this does not help much')
parser.add_argument('--model_backbone', type=str, default=None, help='Takes a value from [wres, resnet50, resnet18], if it is not set, the model backbone will be infered based on img size')
parser.add_argument('--wres-k', type=int, default=2, help='k parameter for wideresnet model')
parser.add_argument('--no-imgnet-pretrained', action='store_true', help='If set, the backbone model will not be pretrained using imagenet (only applicable for resnet backbones)')
parser.add_argument('--use-pretrained', type=bool, default=False, help='If set to True, the model will be initialised by the state dictionary as per the checkpoint-path argument')
parser.add_argument('--checkpoint-path', type=str, default=None, help='The checkpoint path to be used in case use-pretrained is set to True')
parser.add_argument('--freeze-backbone', type=bool, default=False, help='If set, only the embedding fully connected layer would be unfrozen')
parser.add_argument('--n-epoches', type=int, default=300, help='number of training epoches if below is not passed')
parser.add_argument('--break-epoch', type=int, default=None, help='epoch at which training stops, use this instead of n-epoches if you want to maintain the LR scheduler')
parser.add_argument('--early-stopping-epochs', type=int, default=0, help='number of epochs after which early stopping would happen if no improvement to validation accuracy was witnessed')
parser.add_argument('--min_wait_before_es', type=int, default=(- 1), help='number of epochs to wait before starting to monitor best model (for saving model or for early stopping')
parser.add_argument('--es-metric', type=str, default='accuracy', help='Early stopping metric can either be accuracy or loss')
parser.add_argument('--n-imgs-per-epoch', type=int, default=(64 * 1024), help='number of training images for each epoch')
parser.add_argument('--batch-size', type=int, default=64, help='batch size for trained data')
parser.add_argument('--lr', type=float, default=0.03, help='learning rate')
parser.add_argument('--num-workers-per-gpu', type=int, default=4, help='Number of workers per gpu')
parser.add_argument('--weight-decay', type=float, default=0.0005, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for optimizer')
parser.add_argument('--seed', type=int, default=123, help='seed for random behaviors, no seed if negative')
parser.add_argument('--dataset-name', type=str, default='cifar100', help='Name of dataset (e.g. cifar100)')
parser.add_argument('--train-split-pickle', type=str, default='splits/cifar100_labelled_data_25_seed123.pkl', help='path to pickle file with training split (generate_tst_pkls output)')
parser.add_argument('--valid-split-pickle', type=str, default='splits/cifar100_valid_data.pkl', help='path to pickle file with validation/test split (generate_tst_pkls output)')
parser.add_argument('--classes-pickle', type=str, default='splits/cifar100_classes.pkl', help='path to pickle file with classes (generate_tst_pkls output)')
(args, _) = parser.parse_known_args()
return args |
def prediction(dataset, model, args):
preds = []
golds = []
model.eval()
for j in range(0, len(dataset), args.batch_size):
(src, seg, label, mask_src) = Batch(dataset, j, args.batch_size, args.device).get()
preds += model.predict(src, seg, mask_src)
golds += label.cpu().data.numpy().tolist()
return (f1_score(golds, preds, average='macro'), preds) |
def generate_features(tbl, bins, cross_sizes):
tbl = tbl.cut_bins(columns=count_cols, bins=bins, out_cols=count_cols)
tbl = tbl.cross_columns(cross_cols, cross_sizes)
return tbl |
class LeakyDataset(Dataset):
def __init__(self, traindata, testdata, r, seed=2):
self.r = r
gen = torch.Generator().manual_seed(seed)
len_text = len(testdata)
nb_leak = int((r * len_text))
(testdata, _) = random_split(testdata, [nb_leak, (len_text - nb_leak)], generator=gen)
self.data = torch.utils.data.ConcatDataset([traindata, testdata])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
(image, label) = self.data[idx]
return (image, label) |
def _create_grid_offsets(size: List[int], stride: int, offset: float, device: torch.device):
(grid_height, grid_width) = size
shifts_x = torch.arange((offset * stride), (grid_width * stride), step=stride, dtype=torch.float32, device=device)
shifts_y = torch.arange((offset * stride), (grid_height * stride), step=stride, dtype=torch.float32, device=device)
(shift_y, shift_x) = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape((- 1))
shift_y = shift_y.reshape((- 1))
return (shift_x, shift_y) |
class SchedulerType(ExplicitEnum):
LINEAR = 'linear'
COSINE = 'cosine'
COSINE_WITH_RESTARTS = 'cosine_with_restarts'
POLYNOMIAL = 'polynomial'
CONSTANT = 'constant'
CONSTANT_WITH_WARMUP = 'constant_with_warmup'
INVERSE_SQRT = 'inverse_sqrt' |
class MotorModel(object):
def __init__(self, torque_control_enabled=False, kp=1.2, kd=0):
self._torque_control_enabled = torque_control_enabled
self._kp = kp
self._kd = kd
self._resistance = MOTOR_RESISTANCE
self._voltage = MOTOR_VOLTAGE
self._torque_constant = MOTOR_TORQUE_CONSTANT
self._viscous_damping = MOTOR_VISCOUS_DAMPING
self._current_table = [0, 10, 20, 30, 40, 50, 60]
self._torque_table = [0, 1, 1.9, 2.45, 3.0, 3.25, 3.5]
def set_voltage(self, voltage):
self._voltage = voltage
def get_voltage(self):
return self._voltage
def set_viscous_damping(self, viscous_damping):
self._viscous_damping = viscous_damping
def get_viscous_dampling(self):
return self._viscous_damping
def convert_to_torque(self, motor_commands, current_motor_angle, current_motor_velocity):
if self._torque_control_enabled:
pwm = motor_commands
else:
pwm = (((- self._kp) * (current_motor_angle - motor_commands)) - (self._kd * current_motor_velocity))
pwm = np.clip(pwm, (- 1.0), 1.0)
return self._convert_to_torque_from_pwm(pwm, current_motor_velocity)
def _convert_to_torque_from_pwm(self, pwm, current_motor_velocity):
observed_torque = np.clip((self._torque_constant * ((pwm * self._voltage) / self._resistance)), (- OBSERVED_TORQUE_LIMIT), OBSERVED_TORQUE_LIMIT)
voltage_net = np.clip(((pwm * self._voltage) - ((self._torque_constant + self._viscous_damping) * current_motor_velocity)), (- VOLTAGE_CLIPPING), VOLTAGE_CLIPPING)
current = (voltage_net / self._resistance)
current_sign = np.sign(current)
current_magnitude = np.absolute(current)
actual_torque = np.interp(current_magnitude, self._current_table, self._torque_table)
actual_torque = np.multiply(current_sign, actual_torque)
return (actual_torque, observed_torque) |
class CorstemNet(nn.Module):
def __init__(self, input_nc=1, num_classes=2, ngf=32):
super().__init__()
self.in_dim = input_nc
self.out_dim = ngf
self.final_out_dim = num_classes
act_fn = nn.LeakyReLU(0.2, inplace=True)
act_fn_2 = nn.ReLU()
self.down_1 = Conv_residual_conv(self.in_dim, self.out_dim, act_fn)
self.pool_1 = maxpool()
self.down_2 = Conv_residual_conv(self.out_dim, (self.out_dim * 2), act_fn)
self.pool_2 = maxpool()
self.down_3 = Conv_residual_conv((self.out_dim * 2), (self.out_dim * 4), act_fn)
self.pool_3 = maxpool()
self.down_4 = Conv_residual_conv((self.out_dim * 4), (self.out_dim * 8), act_fn)
self.pool_4 = maxpool()
self.bridge = Conv_residual_conv((self.out_dim * 8), (self.out_dim * 16), act_fn)
self.deconv_1 = conv_decod_block((self.out_dim * 16), (self.out_dim * 8), act_fn_2)
self.up_1 = Conv_residual_conv((self.out_dim * 8), (self.out_dim * 8), act_fn_2)
self.deconv_2 = conv_decod_block((self.out_dim * 8), (self.out_dim * 4), act_fn_2)
self.up_2 = Conv_residual_conv((self.out_dim * 4), (self.out_dim * 4), act_fn_2)
self.deconv_3 = conv_decod_block((self.out_dim * 4), (self.out_dim * 2), act_fn_2)
self.up_3 = Conv_residual_conv((self.out_dim * 2), (self.out_dim * 2), act_fn_2)
self.deconv_4 = conv_decod_block((self.out_dim * 2), self.out_dim, act_fn_2)
self.up_4 = Conv_residual_conv(self.out_dim, self.out_dim, act_fn_2)
self.out = nn.Conv2d(self.out_dim, self.final_out_dim, kernel_size=3, stride=1, padding=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
print(f'Initialized {self.__class__.__name__} succesfully')
def forward(self, input):
down_1 = self.down_1(input)
down_2 = self.down_2(self.pool_1(down_1))
down_3 = self.down_3(self.pool_2(down_2))
down_4 = self.down_4(self.pool_3(down_3))
bridge = self.bridge(self.pool_4(down_4))
deconv_1 = self.deconv_1(bridge)
skip_1 = ((deconv_1 + down_4) / 2)
up_1 = self.up_1(skip_1)
deconv_2 = self.deconv_2(up_1)
skip_2 = ((deconv_2 + down_3) / 2)
up_2 = self.up_2(skip_2)
deconv_3 = self.deconv_3(up_2)
skip_3 = ((deconv_3 + down_2) / 2)
up_3 = self.up_3(skip_3)
deconv_4 = self.deconv_4(up_3)
skip_4 = ((deconv_4 + down_1) / 2)
up_4 = self.up_4(skip_4)
return self.out(up_4) |
def meshgrid(batch, height, width, is_homogeneous=True):
x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])), tf.transpose(tf.expand_dims(tf.linspace((- 1.0), 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace((- 1.0), 1.0, height), 1), tf.ones(shape=tf.stack([1, width])))
x_t = (((x_t + 1.0) * 0.5) * tf.cast((width - 1), tf.float32))
y_t = (((y_t + 1.0) * 0.5) * tf.cast((height - 1), tf.float32))
if is_homogeneous:
ones = tf.ones_like(x_t)
coords = tf.stack([x_t, y_t, ones], axis=0)
else:
coords = tf.stack([x_t, y_t], axis=0)
coords = tf.tile(tf.expand_dims(coords, 0), [batch, 1, 1, 1])
return coords |
def test_ST3():
orb = orbit.ST3(q_in, K_in, e_in, omega_in, P_in, T0_in, q_out, K_out, e_out, omega_out, P_out, T0_out, gamma, dates)
vels = orb.get_velocities()
(fig, ax) = plt.subplots(nrows=1)
ax.axhline(gamma, color='0.5', ls=':')
ax.plot(dates, vels[0])
ax.plot(dates, vels[1])
ax.plot(dates, vels[2])
ax.set_xlabel('JD')
ax.set_ylabel('$v\\,\\mathrm{km/s}$')
fig.savefig((outdir + 'ST3.png'), dpi=300) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.