code stringlengths 101 5.91M |
|---|
def c(dfs, numL, numR):
r = Rule.fromDFS(dfs)
assert (r.numLeftComponents == numL)
assert (r.numRightComponents == numR)
commonChecks(r) |
def bottle3(f, x_tuple):
x_sizes = tuple(map((lambda x: x.size()), x_tuple))
y = f(*map((lambda x: x[0].view(((x[1][0] * x[1][1]) * x[1][2]), *x[1][3:])), zip(x_tuple, x_sizes)))
y_size = y.size()
return y.view(x_sizes[0][0], x_sizes[0][1], x_sizes[0][2], *y_size[1:]) |
class BridgeTowerForImageAndTextRetrieval(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def polarity(importtext):
text = word_tokenize(importtext)
tokens = nltk.pos_tag(text)
polarity = TextBlob(importtext).sentiment[0]
sentiment = TextBlob(importtext).sentiment[1]
polaritylist = list()
for i in range(0, len(tokens), 3):
if (i <= (len(tokens) - 3)):
words = ((((text[i] + ' ') + text[(i + 1)]) + ' ') + text[(i + 2)])
polaritylist.append(TextBlob(words).sentiment[0])
else:
pass
avgpolarity = np.mean(polaritylist)
stdpolarity = np.std(polaritylist)
varpolarity = np.var(polaritylist)
return [float(avgpolarity), float(stdpolarity), float(varpolarity)] |
class PointnetSAModule(PointnetSAModuleMSG):
def __init__(self, *, mlp: List[int], npoint: int=None, radius: float=None, nsample: int=None, bn: bool=True, use_xyz: bool=True, pool_method='max_pool'):
super().__init__(mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz, pool_method=pool_method) |
def train(train_dataloader, ae, optimizer, optimizer_step, cuda_details: gnn_utils.CudaDetails, tb_logger, lambda_value, property_pred_factor):
loss_meter = gnn_utils.AverageMeter()
time_meter = gnn_utils.AverageMeter()
time_on_calc = gnn_utils.AverageMeter()
prediction_mse_meter = gnn_utils.AverageMeter()
pre_time = time.time()
with tqdm.tqdm(train_dataloader, total=len(train_dataloader)) as t:
for (i, (padded_seq, lengths, order, properties)) in enumerate(t):
packed_seq = rnn.pack_padded_sequence(padded_seq, lengths, batch_first=True)
packed_seq = cuda_details.return_cudafied(packed_seq)
properties = cuda_details.return_cudafied(properties)
pre_calc_time = time.time()
if ((i % 100) == 0):
ae.encoder.shallow_dist._tb_logger = tb_logger
ae_obj = ae(packed_seq, lambda_=lambda_value).mean()
prediction_of_property = ae.prop_predictor_(ae._last_z_sample_on_obj)
prop_loss = F.mse_loss(input=prediction_of_property.squeeze(), target=properties.squeeze())
loss = (- ae_obj)
loss += (property_pred_factor * prop_loss)
optimizer.zero_grad()
loss.backward()
optimizer_step()
if ((i % 100) == 0):
ae.encoder.shallow_dist._tb_logger = None
loss_meter.update(loss.item(), n=lengths.shape[0])
time_meter.update((time.time() - pre_time))
time_on_calc.update((time.time() - pre_calc_time))
prediction_mse_meter.update(prop_loss.item(), n=lengths.shape[0])
pre_time = time.time()
if (tb_logger is not None):
tb_logger.add_scalar('property_mse', prop_loss.item())
t.set_postfix(avg_epoch_loss=f'{loss_meter.avg:.4E}', total_time=f'{time_meter.avg:.3E}', calc_time=f'{time_on_calc.avg:.3E}', prop_mse=f'{prediction_mse_meter.avg:.3E}') |
_model_architecture('s2ut_conformer', 's2ut_conformer')
def s2ut_conformer_architecture_base(args):
args.attn_type = getattr(args, 'attn_type', None)
args.pos_enc_type = getattr(args, 'pos_enc_type', 'abs')
args.input_feat_per_channel = getattr(args, 'input_feat_per_channel', 80)
args.input_channels = getattr(args, 'input_channels', 1)
args.max_source_positions = getattr(args, 'max_source_positions', 6000)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_layers = getattr(args, 'encoder_layers', 16)
args.depthwise_conv_kernel_size = getattr(args, 'depthwise_conv_kernel_size', 31)
s2ut_architecture_base(args) |
class LEVIRCDPlus(torch.utils.data.Dataset):
splits = ['train', 'test']
def __init__(self, root: str='.data/levircd_plus', split: str='train', transform: Compose=Compose([ToTensor()])):
assert (split in self.splits)
self.root = root
self.transform = transform
self.files = self.load_files(root, split)
def load_files(root: str, split: str):
files = []
images = glob(os.path.join(root, split, 'A', '*.png'))
images = sorted([os.path.basename(image) for image in images])
for image in images:
image1 = os.path.join(root, split, 'A', image)
image2 = os.path.join(root, split, 'B', image)
mask = os.path.join(root, split, 'label', image)
files.append(dict(image1=image1, image2=image2, mask=mask))
return files
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, idx: int) -> Dict:
files = self.files[idx]
mask = np.array(Image.open(files['mask']))
mask = np.clip(mask, 0, 1)
image1 = np.array(Image.open(files['image1']))
image2 = np.array(Image.open(files['image2']))
(image1, image2, mask) = self.transform([image1, image2, mask])
x = torch.stack([image1, image2], dim=0)
return dict(x=x, mask=mask) |
def create_FDS_train_subset(args):
print('Creating FDS statistics updating subset...')
frame = pd.read_csv(os.path.join(args.data_dir, 'nyu2_train.csv'), header=None)
select_id = np.load(os.path.join(args.data_dir, 'FDS_train_subset_id.npy'))
frame.iloc[select_id].to_csv(os.path.join(args.data_dir, 'nyu2_train_FDS_subset.csv'), index=False, header=False) |
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--input_model', type=str, required=False, default='zfnet512-12.onnx')
parser.add_argument('--output_model', type=str, required=True)
return parser.parse_args() |
class TestHPO(unittest.TestCase):
search_space = {'learning_rate': SearchSpace((0.0001, 0.001)), 'num_train_epochs': SearchSpace(bound=(20, 100), interval=1), 'weight_decay': SearchSpace((0.0001, 0.001)), 'cooldown_epochs': SearchSpace(bound=(0, 10), interval=1), 'sparsity_warm_epochs': SearchSpace(bound=(0, 5), interval=1), 'per_device_train_batch_size': SearchSpace((5, 20), 1)}
def test_searcher(self):
hpo_config = HPOConfig({'num_train_epochs': self.search_space['num_train_epochs'], 'cooldown_epochs': self.search_space['cooldown_epochs']}, searcher='grid')
searcher = GridSearcher({'num_train_epochs': self.search_space['num_train_epochs'], 'cooldown_epochs': self.search_space['cooldown_epochs']})
conf_searcher = prepare_hpo(hpo_config)
self.assertEqual(searcher.__class__, conf_searcher.__class__)
for _ in range(5):
self.assertEqual(searcher.suggest(), conf_searcher.suggest())
hpo_config = HPOConfig(self.search_space, 'random')
searcher = prepare_hpo(hpo_config)
for _ in range(5):
searcher.suggest()
hpo_config = HPOConfig(self.search_space, 'bo')
searcher = prepare_hpo(hpo_config)
params = []
for _ in range(5):
searcher.suggest()
searcher.get_feedback(np.random.random())
for _ in range(5):
param = searcher.suggest()
if (param in params):
continue
params.append(param)
searcher.feedback(param, np.random.random())
hpo_config = HPOConfig(self.search_space, 'xgb', higher_is_better=True, min_train_samples=3)
searcher = prepare_hpo(hpo_config)
for _ in range(5):
searcher.suggest()
searcher.get_feedback(np.random.random())
for _ in range(5):
param = searcher.suggest()
searcher.feedback(param, np.random.random())
def test_search_space(self):
ds = DiscreteSearchSpace(bound=[0, 10])
get_ds = SearchSpace(bound=[0, 10], interval=1)
self.assertEqual(ds.__class__, get_ds.__class__)
self.assertEqual(ds.index(1), ds.get_nth_value(1))
ds = DiscreteSearchSpace(value=[1, 2, 3, 4])
self.assertEqual(ds.get_all(), [1, 2, 3, 4])
ds = DiscreteSearchSpace(bound=[0.01, 0.1])
self.assertEqual(ds.interval, 0.01)
self.assertIn(ds.get_value(), ds.get_all())
self.assertEqual(ds.get_value(2), ds.get_nth_value(2))
cs = ContinuousSearchSpace(bound=[0.01, 0.1])
self.assertTrue((cs.get_value() >= 0.01))
self.assertTrue((cs.get_value() < 0.1))
def test_sa(self):
def f(x):
return np.mean(np.log((x ** 2)), axis=1)
points = np.random.randn(5, 6)
optimizer = SimulatedAnnealingOptimizer(T0=100, Tf=0, alpha=0.9, higher_is_better=True)
result = optimizer.gen_next_params(f, points)
optimizer = SimulatedAnnealingOptimizer(T0=1, Tf=0.01, alpha=None, higher_is_better=False)
result2 = optimizer.gen_next_params(f, points)
self.assertTrue((len(result) == len(result2))) |
def make_agent(id, **kwargs):
if isinstance(id, str):
wargs = dict(**_agent_registry[id])
del wargs['agent']
wargs.update(kwargs)
instance = _agent_registry[id]['agent'](name=id, **wargs)
return instance
else:
return id(**kwargs) |
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = this_dir
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))
sources = (main_file + source_cpu)
extension = CppExtension
extra_compile_args = {'cxx': []}
define_macros = []
if (torch.cuda.is_available() and (CUDA_HOME is not None)):
extension = CUDAExtension
sources += source_cuda
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = ['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [extension('C_ROIPooling', sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args)]
return ext_modules |
class HfApi():
def __init__(self, endpoint=None):
self.endpoint = (endpoint if (endpoint is not None) else ENDPOINT)
def login(self, username: str, password: str) -> str:
path = '{}/api/login'.format(self.endpoint)
r = requests.post(path, json={'username': username, 'password': password})
r.raise_for_status()
d = r.json()
return d['token']
def whoami(self, token: str) -> Tuple[(str, List[str])]:
path = '{}/api/whoami'.format(self.endpoint)
r = requests.get(path, headers={'authorization': 'Bearer {}'.format(token)})
r.raise_for_status()
d = r.json()
return (d['user'], d['orgs'])
def logout(self, token: str) -> None:
path = '{}/api/logout'.format(self.endpoint)
r = requests.post(path, headers={'authorization': 'Bearer {}'.format(token)})
r.raise_for_status()
def presign(self, token: str, filename: str, organization: Optional[str]=None) -> PresignedUrl:
path = '{}/api/presign'.format(self.endpoint)
r = requests.post(path, headers={'authorization': 'Bearer {}'.format(token)}, json={'filename': filename, 'organization': organization})
r.raise_for_status()
d = r.json()
return PresignedUrl(**d)
def presign_and_upload(self, token: str, filename: str, filepath: str, organization: Optional[str]=None) -> str:
urls = self.presign(token, filename=filename, organization=organization)
with open(filepath, 'rb') as f:
pf = TqdmProgressFileReader(f)
data = (f if (pf.total_size > 0) else '')
r = requests.put(urls.write, data=data, headers={'content-type': urls.type})
r.raise_for_status()
pf.close()
return urls.access
def list_objs(self, token: str, organization: Optional[str]=None) -> List[S3Obj]:
path = '{}/api/listObjs'.format(self.endpoint)
params = ({'organization': organization} if (organization is not None) else None)
r = requests.get(path, params=params, headers={'authorization': 'Bearer {}'.format(token)})
r.raise_for_status()
d = r.json()
return [S3Obj(**x) for x in d]
def delete_obj(self, token: str, filename: str, organization: Optional[str]=None):
path = '{}/api/deleteObj'.format(self.endpoint)
r = requests.delete(path, headers={'authorization': 'Bearer {}'.format(token)}, json={'filename': filename, 'organization': organization})
r.raise_for_status()
def model_list(self) -> List[ModelInfo]:
path = '{}/api/models'.format(self.endpoint)
r = requests.get(path)
r.raise_for_status()
d = r.json()
return [ModelInfo(**x) for x in d] |
def load_dataset(n_jobs, use_gpu, pin_memory, ascending, corpus, audio, text):
print('Prepare dataloader for training/validation')
(audio_transform, feat_dim) = create_transform(audio.copy())
tokenizer = load_text_encoder(**text)
(tr_set, dv_set, tr_loader_bs, dv_loader_bs, mode, data_msg) = create_dataset(tokenizer, ascending, **corpus)
collect_tr = partial(collect_audio_batch, audio_transform=audio_transform, mode=mode)
collect_dv = partial(collect_audio_batch, audio_transform=audio_transform, mode='test')
shuffle = ((mode == 'train') and (not ascending))
drop_last = shuffle
tr_set = DataLoader(tr_set, batch_size=tr_loader_bs, shuffle=shuffle, drop_last=drop_last, collate_fn=collect_tr, num_workers=n_jobs, pin_memory=use_gpu)
dv_set = DataLoader(dv_set, batch_size=dv_loader_bs, shuffle=False, drop_last=False, collate_fn=collect_dv, num_workers=n_jobs, pin_memory=pin_memory)
data_msg.append('I/O spec. | Audio feature = {}\t| feature dim = {}\t| Token type = {}\t| Vocab size = {}'.format(audio['feat_type'], feat_dim, tokenizer.token_type, tokenizer.vocab_size))
return (tr_set, dv_set, feat_dim, tokenizer.vocab_size, tokenizer, data_msg) |
class ModelCriterionConfig(FairseqDataclass):
loss_weights: Dict[(str, float)] = field(default_factory=dict, metadata={'help': 'weights for the loss terms'})
log_keys: List[str] = field(default_factory=list, metadata={'help': 'additional output keys to log'}) |
def track_parallel_progress(func, tasks, nproc, initializer=None, initargs=None, bar_width=50, chunksize=1, skip_first=False, keep_order=True):
if isinstance(tasks, tuple):
assert (len(tasks) == 2)
assert isinstance(tasks[0], collections_abc.Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, collections_abc.Iterable):
task_num = len(tasks)
else:
raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple')
pool = init_pool(nproc, initializer, initargs)
start = (not skip_first)
task_num -= ((nproc * chunksize) * int(skip_first))
prog_bar = ProgressBar(task_num, bar_width, start)
results = []
if keep_order:
gen = pool.imap(func, tasks, chunksize)
else:
gen = pool.imap_unordered(func, tasks, chunksize)
for result in gen:
results.append(result)
if skip_first:
if (len(results) < (nproc * chunksize)):
continue
elif (len(results) == (nproc * chunksize)):
prog_bar.start()
continue
prog_bar.update()
sys.stdout.write('\n')
pool.close()
pool.join()
return results |
class ContinuousInverseModel(nn.Module):
def __init__(self, state_size, action_size, log_std_low=(- 10.0), log_std_high=2.0, hidden_size=256, dist_impl='pyd'):
super().__init__()
assert (dist_impl in ['pyd', 'beta'])
self.fc1 = nn.Linear((state_size * 2), hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, (2 * action_size))
self.log_std_low = log_std_low
self.log_std_high = log_std_high
self.apply(weight_init)
self.dist_impl = dist_impl
def forward(self, state, next_state):
inp = torch.cat((state, next_state), dim=(- 1))
x = F.relu(self.fc1(inp))
x = F.relu(self.fc2(x))
out = self.fc3(x)
if (self.dist_impl == 'pyd'):
return distributions.create_tanh_normal(out, self.log_std_low, self.log_std_high)
elif (self.dist_impl == 'beta'):
return distributions.create_beta(out) |
def test_glorot_normal_receptive_field():
from lasagne.init import GlorotNormal
sample = GlorotNormal().sample((50, 50, 2))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def loss_G_fn(P, D, options, images, gen_images):
d_gen = D(P.augment_fn(gen_images))
if (options['loss'] == 'nonsat'):
g_loss = F.softplus((- d_gen)).mean()
elif (options['loss'] == 'lsgan'):
g_loss = (0.5 * ((d_gen - 1.0) ** 2).mean())
else:
g_loss = (- d_gen.mean())
return g_loss |
def initialize_double_double_artificial_homotopy(target, start, homogeneous=False, vrblvl=0):
if (vrblvl > 0):
print('in initialize_double_double_artificial_homotopy', end='')
print(', homogeneous :', homogeneous)
print('the target system :')
for pol in target:
print(pol)
print('the start system :')
for pol in start:
print(pol)
set_double_double_target_system(target, vrblvl)
set_double_double_start_system(start, vrblvl)
phc = get_phcfun()
aprc = pointer(c_int32(1))
bpars = (c_int32 * 2)()
bpars[0] = c_int32(vrblvl)
bpars[1] = c_int32(int(homogeneous))
bpar = pointer(bpars)
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> initialize_double_double_artificial_homotopy calls phc', end='')
retval = phc(860, aprc, bpar, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return retval |
class MAP():
def __init__(self, length=20):
self.length = length
def init(self, train):
return
def reset(self):
self.test = 0
self.pos = 0
def skip(self, for_item=0, session=(- 1)):
pass
def add_multiple(self, result, next_items, for_item=0, session=0, position=None):
last_recall = 0
res = 0
for i in range(self.length):
recall = self.recall(result[:i].index, next_items)
precision = self.precision(result[:i].index, next_items)
res += (precision * (recall - last_recall))
last_recall = recall
self.pos += res
self.test += 1
def add(self, result, next_item, for_item=0, session=0, pop_bin=None, position=None):
sum = 0
for i in range(self.length):
sum += self.mrr(result, next_item, (i + 1))
self.pos += (sum / self.length)
self.test += 1
def recall(self, result, next_items):
return (len((set(next_items) & set(result))) / len(next_items))
def precision(self, result, next_items):
return (len((set(next_items) & set(result))) / self.length)
def mrr(self, result, next_item, n):
res = result[:n]
if (next_item in res.index):
rank = (res.index.get_loc(next_item) + 1)
return (1.0 / rank)
else:
return 0
def add_batch(self, result, next_item):
i = 0
for (part, series) in result.iteritems():
result.sort_values(part, ascending=False, inplace=True)
self.add(series, next_item[i])
i += 1
def result(self):
return ((('' + str(self.length)) + ': '), (self.pos / self.test)) |
class CollectLayerHistogram(unittest.TestCase):
def setUp(self):
model = BuildFakeModel(width_mult=1)
(layer_tensor, include_layer) = (OrderedDict(), OrderedDict())
i = 0
for (key, value) in model.state_dict().items():
if (not value.ndim):
value = np.expand_dims(value, axis=0)
if (i > 200):
pass
else:
include_layer[key] = np.array(value, dtype=np.float32)
layer_tensor[key] = np.array(value, dtype=np.float32)
i += 1
self.layer_histogram_collector = LayerHistogramCollector(num_bins=8001, layer_tensor=layer_tensor, include_layer=include_layer, logger=logger)
def test_layer_histogram(self):
self.layer_histogram_collector.collect()
self.assertEqual((self.layer_histogram_collector.layer_tensor.keys() & self.layer_histogram_collector.include_layer.keys()), self.layer_histogram_collector.hist_dict.keys()) |
def load_tensorrt_plugin():
global plugin_is_loaded
lib_path = get_tensorrt_op_path()
if ((not plugin_is_loaded) and os.path.exists(lib_path)):
ctypes.CDLL(lib_path)
plugin_is_loaded = True |
class KPFCNN(nn.Module):
def __init__(self, config):
super(KPFCNN, self).__init__()
self.encoder = KPCNN(config)
self.config = config
self.blocks = nn.ModuleDict()
start_i = 0
for (block_i, block) in enumerate(config.architecture):
if ('upsample' in block):
start_i = block_i
break
layer = (config.num_layers - 1)
r = ((config.first_subsampling_dl * config.density_parameter) * (2 ** layer))
in_fdim = (config.first_features_dim * (2 ** layer))
out_fdim = in_fdim
block_in_layer = 0
for (block_i, block) in enumerate(config.architecture[start_i:]):
is_strided = ('strided' in block)
self.blocks[f'layer{layer}/{block}'] = get_block(block, config, int((1.5 * in_fdim)), out_fdim, radius=r, strided=is_strided)
in_fdim = out_fdim
block_in_layer += 1
if ('upsample' in block):
out_fdim = (out_fdim // 2)
r *= 0.5
layer -= 1
block_in_layer = 0
self.blocks['segmentation_head'] = nn.Sequential(nn.Linear(out_fdim, config.first_features_dim), nn.BatchNorm1d(config.first_features_dim, momentum=config.batch_norm_momentum, eps=1e-06), nn.LeakyReLU(negative_slope=0.2), nn.Linear(config.first_features_dim, config.num_classes))
def forward(self, inputs):
features = self.feature_extraction(inputs)
logits = self.segmentation_head(features)
return logits
def feature_extraction(self, inputs):
F = self.encoder.feature_extraction(inputs)
features = F[(- 1)]
layer = (self.config.num_layers - 1)
r = ((self.config.first_subsampling_dl * self.config.density_parameter) * (2 ** layer))
fdim = (self.config.first_features_dim * (2 ** layer))
start_i = 0
for (block_i, block) in enumerate(self.config.architecture):
if ('upsample' in block):
start_i = block_i
break
for (block_i, block) in enumerate(self.config.architecture[start_i:]):
block_ops = self.blocks[f'layer{layer}/{block}']
if ('upsample' in block):
if (block == 'nearest_upsample'):
upsample_indices = inputs['upsamples'][(layer - 1)]
else:
raise ValueError(f'Unknown block type. {block}')
features = block_ops(upsample_indices, features)
else:
if (block in ['unary', 'simple', 'resnet', 'resnetb']):
query_points = inputs['points'][layer]
support_points = inputs['points'][layer]
neighbors_indices = inputs['neighbors'][layer]
elif (block in ['simple_strided', 'resnetb_strided', 'resnetb_deformable_strided']):
query_points = inputs['points'][(layer + 1)]
support_points = inputs['points'][layer]
neighbors_indices = inputs['pools'][layer]
else:
raise ValueError(f'Unknown block type. {block}')
features = block_ops(query_points, support_points, neighbors_indices, features)
if ('upsample' in block):
layer -= 1
r *= 0.5
fdim = (fdim // 2)
features = torch.cat((features, F[layer]), dim=1)
return features
def segmentation_head(self, features):
logits = self.blocks['segmentation_head'](features)
return logits |
def InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs):
global backend, layers, models, keras_utils
(backend, layers, models, keras_utils) = get_submodules_from_kwargs(kwargs)
if (not ((weights in {'imagenet', None}) or os.path.exists(weights))):
raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.')
if ((weights == 'imagenet') and include_top and (classes != 1000)):
raise ValueError('If using `weights` as `"imagenet"` with `include_top` as true, `classes` should be 1000')
if (input_tensor is None):
img_input = layers.Input(shape=input_shape)
elif (not backend.is_keras_tensor(input_tensor)):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if (backend.image_data_format() == 'channels_first'):
channel_axis = 1
else:
channel_axis = 4
x = conv3d_bn(img_input, 32, 3, 3, 3, strides=(2, 2, 2), padding='valid')
x = conv3d_bn(x, 32, 3, 3, 3, padding='valid')
x = conv3d_bn(x, 64, 3, 3, 3)
x = layers.MaxPooling3D((3, 3, 3), strides=(2, 2, 2))(x)
x = conv3d_bn(x, 80, 1, 1, 1, padding='valid')
x = conv3d_bn(x, 192, 3, 3, 3, padding='valid')
x = layers.MaxPooling3D((3, 3, 3), strides=(2, 2, 2))(x)
branch1x1 = conv3d_bn(x, 64, 1, 1, 1)
branch5x5 = conv3d_bn(x, 48, 1, 1, 1)
branch5x5 = conv3d_bn(branch5x5, 64, 5, 5, 5)
branch3x3dbl = conv3d_bn(x, 64, 1, 1, 1)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 32, 1, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed0')
branch1x1 = conv3d_bn(x, 64, 1, 1, 1)
branch5x5 = conv3d_bn(x, 48, 1, 1, 1)
branch5x5 = conv3d_bn(branch5x5, 64, 5, 5, 5)
branch3x3dbl = conv3d_bn(x, 64, 1, 1, 1)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 64, 1, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed1')
branch1x1 = conv3d_bn(x, 64, 1, 1, 1)
branch5x5 = conv3d_bn(x, 48, 1, 1, 1)
branch5x5 = conv3d_bn(branch5x5, 64, 5, 5, 5)
branch3x3dbl = conv3d_bn(x, 64, 1, 1, 1)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 64, 1, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed2')
branch3x3 = conv3d_bn(x, 384, 3, 3, 3, strides=(2, 2, 2), padding='valid')
branch3x3dbl = conv3d_bn(x, 64, 1, 1, 1)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3)
branch3x3dbl = conv3d_bn(branch3x3dbl, 96, 3, 3, 3, strides=(2, 2, 2), padding='valid')
branch_pool = layers.MaxPooling3D((3, 3, 3), strides=(2, 2, 2))(x)
x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')
branch1x1 = conv3d_bn(x, 192, 1, 1, 1)
branch7x7 = conv3d_bn(x, 128, 1, 1, 1)
branch7x7 = conv3d_bn(branch7x7, 128, 1, 7, 1)
branch7x7 = conv3d_bn(branch7x7, 192, 7, 1, 1)
branch7x7dbl = conv3d_bn(x, 128, 1, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 128, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 128, 1, 7, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 128, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 1, 7, 1)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 192, 1, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name='mixed4')
for i in range(2):
branch1x1 = conv3d_bn(x, 192, 1, 1, 1)
branch7x7 = conv3d_bn(x, 160, 1, 1, 1)
branch7x7 = conv3d_bn(branch7x7, 160, 1, 7, 1)
branch7x7 = conv3d_bn(branch7x7, 192, 7, 1, 1)
branch7x7dbl = conv3d_bn(x, 160, 1, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 160, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 160, 1, 7, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 160, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 1, 7, 1)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 192, 1, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name=('mixed' + str((5 + i))))
branch1x1 = conv3d_bn(x, 192, 1, 1, 1)
branch7x7 = conv3d_bn(x, 192, 1, 1, 1)
branch7x7 = conv3d_bn(branch7x7, 192, 1, 7, 1)
branch7x7 = conv3d_bn(branch7x7, 192, 7, 1, 1)
branch7x7dbl = conv3d_bn(x, 192, 1, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 1, 7, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 7, 1, 1)
branch7x7dbl = conv3d_bn(branch7x7dbl, 192, 1, 7, 1)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 192, 1, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name='mixed7')
branch3x3 = conv3d_bn(x, 192, 1, 1, 1)
branch3x3 = conv3d_bn(branch3x3, 320, 3, 3, 3, strides=(2, 2, 2), padding='valid')
branch7x7x3 = conv3d_bn(x, 192, 1, 1, 1)
branch7x7x3 = conv3d_bn(branch7x7x3, 192, 1, 7, 1)
branch7x7x3 = conv3d_bn(branch7x7x3, 192, 7, 1, 1)
branch7x7x3 = conv3d_bn(branch7x7x3, 192, 3, 3, 3, strides=(2, 2, 2), padding='valid')
branch_pool = layers.MaxPooling3D((3, 3, 3), strides=(2, 2, 2))(x)
x = layers.concatenate([branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')
for i in range(2):
branch1x1 = conv3d_bn(x, 320, 1, 1, 1)
branch3x3 = conv3d_bn(x, 384, 1, 1, 1)
branch3x3_1 = conv3d_bn(branch3x3, 384, 1, 3, 1)
branch3x3_2 = conv3d_bn(branch3x3, 384, 3, 1, 1)
branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2], axis=channel_axis, name=('mixed9_' + str(i)))
branch3x3dbl = conv3d_bn(x, 448, 1, 1, 1)
branch3x3dbl = conv3d_bn(branch3x3dbl, 384, 3, 3, 3)
branch3x3dbl_1 = conv3d_bn(branch3x3dbl, 384, 1, 3, 1)
branch3x3dbl_2 = conv3d_bn(branch3x3dbl, 384, 3, 1, 1)
branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = layers.AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(x)
branch_pool = conv3d_bn(branch_pool, 192, 1, 1, 1)
x = layers.concatenate([branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name=('mixed' + str((9 + i))))
if include_top:
x = layers.GlobalAveragePooling3D(name='avg_pool')(x)
x = layers.Dense(classes, activation='softmax', name='predictions')(x)
elif (pooling == 'avg'):
x = layers.GlobalAveragePooling3D()(x)
elif (pooling == 'max'):
x = layers.GlobalMaxPooling3D()(x)
if (input_tensor is not None):
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
model = models.Model(inputs, x, name='inception_v3')
if (weights == 'imagenet'):
if include_top:
weights_path = keras_utils.get_file('inception_v3_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models', file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = keras_utils.get_file('inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', file_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
elif (weights is not None):
model.load_weights(weights)
return model |
(version='2.0')
class Optimizers(object):
def __init__(self, framework):
assert (framework in ('tensorflow', 'pytorch', 'pytorch_fx')), 'framework support tensorflow pytorch'
self.optimizers = framework_optimizers[framework]().optimizers
def __getitem__(self, optimizer_type):
assert (optimizer_type in self.optimizers.keys()), 'only support optimizers in {}'.format(self.optimizers.keys())
return self.optimizers[optimizer_type]
def register(self, name, optimizer_cls):
assert (name not in self.optimizers.keys()), 'registered optimizer name already exists.'
self.optimizers.update({name: optimizer_cls}) |
class Permute(torch.nn.Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def forward(self, input: Tensor) -> Tensor:
return input.permute(self.dims).contiguous() |
def test_modelcheckpoint_get_state():
fpath = 'tests/test_model_functioning/modelcheckpoint/'
model_checkpoint = ModelCheckpoint(filepath='/'.join([fpath, 'weights_out']), monitor='val_loss')
trainer = Trainer(model, objective='binary', callbacks=[model_checkpoint], verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, n_epochs=1, batch_size=16)
with open('/'.join([fpath, 'checkpoint.p']), 'wb') as f:
pickle.dump(model_checkpoint, f)
with open('/'.join([fpath, 'checkpoint.p']), 'rb') as f:
model_checkpoint = pickle.load(f)
self_dict_keys = model_checkpoint.__dict__.keys()
no_trainer = ('trainer' not in self_dict_keys)
no_model = ('model' not in self_dict_keys)
shutil.rmtree('tests/test_model_functioning/modelcheckpoint/')
assert (no_trainer and no_model) |
def train(model, loader, optimizer, device, weights):
model.train()
total_loss = total_examples = 0
total_correct = total_examples = 0
for data in loader:
data = data.to(device)
if (data.train_mask.sum() == 0):
continue
optimizer.zero_grad()
out = model(data.x, data.edge_index)[data.train_mask]
y = data.y.squeeze(1)[data.train_mask]
loss = F.nll_loss(out, y, weight=weights)
loss.backward()
optimizer.step()
num_examples = data.train_mask.sum().item()
total_loss += (loss.item() * num_examples)
total_examples += num_examples
total_correct += out.argmax(dim=(- 1)).eq(y).sum().item()
total_examples += y.size(0)
return ((total_loss / total_examples), (total_correct / total_examples)) |
def tune_model(model_type, X_tune, y_tune, X_val, y_val, tree_type=None, scoring='nll', bagging_frac=1.0, gridsearch=True, cond_mean_type='base', n_stopping_rounds=25, in_dir=None, logger=None, verbose=0, n_jobs=1):
start = time.time()
model = get_model(model_type=model_type, tree_type=tree_type, scoring=scoring, bagging_frac=bagging_frac)
model_val = None
tune_dict = {'base_model': model}
if (in_dir is not None):
if logger:
logger.info(f'''
loading saved validation model from {in_dir}/...''')
result = np.load(os.path.join(in_dir, 'results.npy'), allow_pickle=True)[()]
model_val = util.load_model(model_type=tree_type, fp=result['saved_models']['model_val'])
model_test = util.load_model(model_type=tree_type, fp=result['saved_models']['model_test'])
tune_dict['model_val'] = model_val
tune_dict['model_test'] = model_test
tune_dict['tune_time_model'] = result['timing']['tune_model']
else:
param_grid = get_params(model_type=model_type, tree_type=tree_type, n_train=len(X_tune))
if ((model_type == 'ibug') and (tree_type == 'lgb') and (args.custom_dir == 'ibug_bart')):
param_grid['n_estimators'] = [10, 50, 100, 200]
if ((model_type in ['constant', 'ibug', 'pgbm', 'bart', 'cbu', 'knn']) and gridsearch):
if logger:
logger.info('\nmodel: {}, param_grid: {}'.format(model_type, param_grid))
cv_results = []
best_score = None
best_model = None
best_params = None
param_dicts = list(util.product_dict(**param_grid))
for (i, param_dict) in enumerate(param_dicts):
temp_model = clone(model).set_params(**param_dict).fit(X_tune, y_tune)
y_val_hat = temp_model.predict(X_val)
param_dict['score'] = mean_squared_error(y_val, y_val_hat)
cv_results.append(param_dict)
if logger:
logger.info(f"[{(i + 1):,}/{len(param_dicts):,}] {param_dict}, cum. time: {(time.time() - start):.3f}s, score: {param_dict['score']:.3f}")
if ((best_score is None) or (param_dict['score'] < best_score)):
best_score = param_dict['score']
best_model = temp_model
best_params = param_dict
df = pd.DataFrame(cv_results).sort_values('score', ascending=True)
del best_params['score']
if logger:
logger.info(f'''
gridsearch results:
{df}''')
assert (best_model is not None)
model_val = best_model
elif (model_type in ['constant', 'ibug', 'knn']):
assert (tree_type in ['lgb', 'xgb', 'cb', 'ngboost', 'pgbm'])
if (tree_type == 'lgb'):
model_val = clone(model).fit(X_tune, y_tune, eval_set=[(X_val, y_val)], eval_metric='mse', early_stopping_rounds=n_stopping_rounds)
best_n_estimators = model_val.best_iteration_
elif (tree_type == 'xgb'):
model_val = clone(model).fit(X_tune, y_tune, eval_set=[(X_val, y_val)], early_stopping_rounds=n_stopping_rounds)
best_n_estimators = model_val.best_ntree_limit
elif (tree_type == 'cb'):
model_val = clone(model).fit(X_tune, y_tune, eval_set=[(X_val, y_val)], early_stopping_rounds=n_stopping_rounds)
best_n_estimators = model_val.tree_count_
elif (tree_type == 'ngboost'):
model_val = clone(model).fit(X_tune, y_tune, X_val=X_val, Y_val=y_val, early_stopping_rounds=n_stopping_rounds)
if (model_val.best_val_loss_itr is None):
best_n_estimators = model_val.n_estimators
else:
best_n_estimators = (model_val.best_val_loss_itr + 1)
elif (tree_type == 'pgbm'):
model_val = clone(model).fit(X_tune, y_tune, eval_set=(X_val, y_val), early_stopping_rounds=n_stopping_rounds)
best_n_estimators = model_val.learner_.best_iteration
else:
raise ValueError(f'Unknown tree type {tree_type}')
best_n_estimators = max(best_n_estimators, MIN_NUM_TREES)
best_params = {'n_estimators': best_n_estimators}
elif (model_type == 'bart'):
model_val = clone(model).fit(X_tune, y_tune)
best_params = {}
elif (model_type == 'cbu'):
model_val = clone(model).fit(X_tune, y_tune)
best_params = {}
elif (model_type == 'pgbm'):
model_val = clone(model).fit(X_tune, y_tune, eval_set=(X_val, y_val), early_stopping_rounds=n_stopping_rounds)
best_n_estimators = model_val.learner_.best_iteration
best_n_estimators = max(best_n_estimators, MIN_NUM_TREES)
best_params = {'n_estimators': best_n_estimators}
else:
assert (model_type == 'ngboost')
model_val = clone(model).fit(X_tune, y_tune, X_val=X_val, Y_val=y_val, early_stopping_rounds=n_stopping_rounds)
if (model_val.best_val_loss_itr is None):
best_n_estimators = model_val.n_estimators
else:
best_n_estimators = (model_val.best_val_loss_itr + 1)
best_n_estimators = max(best_n_estimators, MIN_NUM_TREES)
best_params = {'n_estimators': best_n_estimators}
tune_dict['model_val'] = model_val
tune_dict['best_params'] = best_params
tune_dict['tune_time_model'] = (time.time() - start)
if logger:
logger.info(f'''
best params: {tune_dict['best_params']}''')
logger.info(f"tune time (model): {tune_dict['tune_time_model']:.3f}s")
tune_dict['tune_time_extra'] = 0
if (model_type in ['ibug', 'knn']):
best_params_wrapper = {}
WrapperClass = (IBUGWrapper if (model_type == 'ibug') else KNNWrapper)
model_val_wrapper = WrapperClass(scoring=scoring, variance_calibration=False, cond_mean_type=cond_mean_type, verbose=verbose, n_jobs=n_jobs, logger=logger)
if logger:
logger.info('\nTuning k and min. scale...')
start = time.time()
model_val_wrapper = model_val_wrapper.fit(model_val, X_tune, y_tune, X_val=X_val, y_val=y_val)
best_params_wrapper = {'k': model_val_wrapper.k_, 'min_scale': model_val_wrapper.min_scale_, 'cond_mean_type': model_val_wrapper.cond_mean_type}
if (model_type == 'knn'):
best_params_wrapper['max_feat'] = model_val_wrapper.max_feat_
tune_dict['model_val_wrapper'] = model_val_wrapper
tune_dict['best_params_wrapper'] = best_params_wrapper
tune_dict['WrapperClass'] = WrapperClass
tune_dict['tune_time_extra'] = (time.time() - start)
if logger:
logger.info(f'best params (wrapper): {best_params_wrapper}')
logger.info(f"tune time (extra): {tune_dict['tune_time_extra']:.3f}s")
if (model_type in ['ibug', 'knn']):
(loc_val, scale_val) = (model_val_wrapper.loc_val_, model_val_wrapper.scale_val_)
elif (model_type in ['constant', 'ngboost', 'pgbm', 'bart', 'cbu']):
(loc_val, scale_val) = get_loc_scale(model_val, model_type, X=X_val, y_train=y_tune)
tune_dict['loc_val'] = loc_val
tune_dict['scale_val'] = scale_val
return tune_dict |
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.autoencoder = nn.Sequential(nn.Conv2d(4, 8, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(inplace=True), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(8, 12, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(inplace=True), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(12, 256, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(inplace=True), nn.Upsample(scale_factor=2, mode='nearest'), nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(inplace=True), nn.Upsample(scale_factor=2, mode='nearest'))
self.protocombiner = nn.Sequential(nn.Conv2d(131, 1, kernel_size=1, stride=1, padding=0), nn.Sigmoid())
def forward(self, imgs, same_proto, oppo_proto):
x = torch.cat([imgs, same_proto], dim=1)
x = self.autoencoder(x)
rec_same = torch.cat([x, same_proto], dim=1)
rec_oppo = torch.cat([x, oppo_proto], dim=1)
return (self.protocombiner(rec_same), self.protocombiner(rec_oppo)) |
class SpeedController(PID):
def __init__(self, params: Optional[PIDParam]=None):
params = (SpeedControllerParam() if (params is None) else params)
super(SpeedController, self).__init__(params)
def from_vehicle_params(cls, model_param: ModelParameters) -> 'SpeedController':
params = SpeedControllerParam(setpoint_minmax=model_param.vx_limits, output_minmax=model_param.acc_limits)
return SpeedController(params) |
def load_predictions(pred_path, gt_path, w2i_path):
raw_preds = load_json(pred_path)
gt_data = load_json(gt_path)
word2idx = load_json(w2i_path)
idx2word = {i: w for (w, i) in word2idx.items()}
qid2ans = {int(e['qid']): int(e['answer_idx']) for e in gt_data}
qid2bbox = {int(e['qid']): e['bbox'] for e in gt_data}
bbox_preds = dict()
for e in raw_preds['raw_bbox']:
qid = None
for i in range(5):
if (len(e[str(i)]) > 0):
qid = e[str(i)][0]['qid']
assert (qid is not None)
ans_idx = qid2ans[int(qid)]
cur_gt_bbox = qid2bbox[int(qid)]
cur_correct_bbox_preds = e[str(ans_idx)]
key_template = '{vid_name}_{qid}_{img_idx:05d}'
for p in cur_correct_bbox_preds:
annotated_word_ids = [(word2idx[clean_label(b['label'])] if (clean_label(b['label']) in word2idx) else word2idx['<unk>']) for b in cur_gt_bbox[str(p['img_idx'])]]
collected_bbox = []
for (idx, b) in enumerate(p['bbox']):
if (p['word'] in annotated_word_ids):
collected_bbox.append([idx2word[p['word']], float(p['pred'][idx]), b])
key_str = key_template.format(vid_name=p['vid_name'], qid=qid, img_idx=p['img_idx'])
if (key_str not in bbox_preds):
bbox_preds[key_str] = []
bbox_preds[key_str].extend(collected_bbox)
preds = dict(ts_answer=raw_preds['ts_answer'], bbox=bbox_preds)
return preds |
def tryLoad(name, default=None):
try:
import user_config
except:
return None
if hasattr(user_config, name):
return getattr(user_config, name)
return default |
class MaskedSeparableConv2D(_MaskedConv):
def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1), depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='global_uniform', pointwise_initializer='global_uniform', depthwise_regularizer=None, pointwise_regularizer=None, bias_initializer=init_ops.zeros_initializer(), bias_regularizer=None, activity_regularizer=None, trainable=True, name=None, task_id=1, **kwargs):
super(MaskedSeparableConv2D, self).__init__(rank=2, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, bias_initializer=bias_initializer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, trainable=trainable, name=name, task_id=task_id, **kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = depthwise_initializer
self.pointwise_initializer = pointwise_initializer
self.depthwise_regularizer = depthwise_regularizer
self.pointwise_regularizer = pointwise_regularizer
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = (1 if (self.data_format == 'channels_first') else (- 1))
if (input_shape[channel_axis].value is None):
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
depthwise_kernel_shape = (self.kernel_size + (input_dim, self.depth_multiplier))
pointwise_kernel_shape = (((1,) * self.rank) + ((self.depth_multiplier * input_dim), self.filters))
self.depthwise_mask = self.add_variable(name='depthwise_mask', shape=depthwise_kernel_shape, initializer=init_ops.zeros_initializer(), trainable=False, dtype=tf.int8)
self.depthwise_kernel = self.add_variable(name='depthwise_kernel', shape=depthwise_kernel_shape, initializer=self.depthwise_initializer, regularizer=self.depthwise_regularizer, trainable=True, dtype=self.dtype)
self.depthwise_threshold = self.add_variable(name='depthwise_threshold', shape=[], initializer=init_ops.zeros_initializer(), trainable=False, dtype=self.dtype)
self.pointwise_mask = self.add_variable(name='pointwise_mask', shape=pointwise_kernel_shape, initializer=init_ops.zeros_initializer(), trainable=False, dtype=tf.int8)
self.pointwise_kernel = self.add_variable(name='pointwise_kernel', shape=pointwise_kernel_shape, initializer=self.pointwise_initializer, regularizer=self.pointwise_regularizer, trainable=True, dtype=self.dtype)
self.pointwise_threshold = self.add_variable(name='pointwise_threshold', shape=[], initializer=init_ops.zeros_initializer(), trainable=False, dtype=self.dtype)
if (FLAGS.reset_weights_in_new_locations and FLAGS.open_ratio):
depthwise_conditional_op = change_mask_and_weight(self.depthwise_mask, self.depthwise_kernel, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
elif FLAGS.open_ratio:
depthwise_conditional_op = change_mask(self.depthwise_mask, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
else:
depthwise_conditional_op = control_flow_ops.no_op()
if (FLAGS.reset_weights_in_new_locations and FLAGS.open_ratio):
pointwise_conditional_op = change_mask_and_weight(self.pointwise_mask, self.pointwise_kernel, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
elif FLAGS.open_ratio:
pointwise_conditional_op = change_mask(self.pointwise_mask, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
else:
pointwise_conditional_op = control_flow_ops.no_op()
with tf.control_dependencies([depthwise_conditional_op, pointwise_conditional_op]):
if FLAGS.share_only_task_1:
depthwise_boolean_mask = tf.cast(tf.logical_or(tf.equal(tf.identity(self.depthwise_mask), 1), tf.equal(tf.identity(self.depthwise_mask), self.task_id)), dtype=tf.float32)
pointwise_boolean_mask = tf.cast(tf.logical_or(tf.equal(tf.identity(self.pointwise_mask), 1), tf.equal(tf.identity(self.pointwise_mask), self.task_id)), dtype=tf.float32)
else:
depthwise_boolean_mask = tf.cast(tf.logical_and(tf.greater_equal(tf.identity(self.depthwise_mask), 1), tf.less_equal(tf.identity(self.depthwise_mask), self.task_id)), dtype=tf.float32)
pointwise_boolean_mask = tf.cast(tf.logical_and(tf.greater_equal(tf.identity(self.pointwise_mask), 1), tf.less_equal(tf.identity(self.pointwise_mask), self.task_id)), dtype=tf.float32)
self.masked_depthwise_kernel = math_ops.multiply(depthwise_boolean_mask, self.depthwise_kernel, MASKED_WEIGHT_NAME)
self.masked_pointwise_kernel = math_ops.multiply(pointwise_boolean_mask, self.pointwise_kernel, MASKED_WEIGHT_NAME)
if (self.depthwise_mask not in ops.get_collection_ref(MASK_COLLECTION)):
ops.add_to_collection(MASK_COLLECTION, self.depthwise_mask)
ops.add_to_collection(MASK_COLLECTION, self.pointwise_mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_depthwise_kernel)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_pointwise_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.depthwise_threshold)
ops.add_to_collection(THRESHOLD_COLLECTION, self.pointwise_threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.depthwise_kernel)
ops.add_to_collection(WEIGHT_COLLECTION, self.pointwise_kernel)
if self.use_bias:
original_scope = self._scope
with tf.variable_scope('task_{}'.format(self.task_id)) as scope:
self._scope = scope
self.bias = self.add_variable(name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, trainable=True, dtype=self.dtype)
self._scope = original_scope
else:
self.bias = None
self.input_spec = base.InputSpec(ndim=(self.rank + 2), axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
if (self.data_format == 'channels_last'):
strides = (((1,) + self.strides) + (1,))
else:
strides = ((1, 1) + self.strides)
outputs = nn.separable_conv2d(inputs, self.masked_depthwise_kernel, self.masked_pointwise_kernel, strides=strides, padding=self.padding.upper(), rate=self.dilation_rate, data_format=utils.convert_data_format(self.data_format, ndim=4))
if (self.bias is not None):
if (self.data_format == 'channels_first'):
if (self.rank == 1):
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if (self.rank == 2):
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
if (self.rank == 3):
outputs_shape = outputs.shape.as_list()
outputs_4d = array_ops.reshape(outputs, [outputs_shape[0], outputs_shape[1], (outputs_shape[2] * outputs_shape[3]), outputs_shape[4]])
outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if (self.activation is not None):
return self.activation(outputs)
return outputs |
def default_install_dir(target_abi):
install_dir = '/tmp/mace_run'
if ((target_abi == 'armeabi-v7a') or (target_abi == 'arm64-v8a')):
install_dir = '/data/local/tmp/mace_run'
return install_dir |
class PruningCallbacks(BaseCallbacks):
def __init__(self, conf=None, model=None):
super(PruningCallbacks, self).__init__(conf=conf, model=model)
self.pruners_info = process_config(self.conf)
self.pruners = []
self._generate_pruners()
self.generate_hooks()
def on_train_end(self):
for on_train_end_hook in self.hooks_dict['on_train_end']:
on_train_end_hook()
if ((self.conf.framework == 'pytorch') and isinstance(self.model.model, torch.nn.Module)):
get_sparsity_ratio(self.pruners, self.model)
elif ((self.conf.framework == 'keras') and isinstance(self.model.model, tf.keras.Model)):
get_sparsity_ratio_tf(self.pruners, self.model)
def __repr__(self):
return 'Pruning Callbacks'
def generate_hooks(self):
for pruner in self.pruners:
for key in self.hooks.keys():
if hasattr(pruner, key):
self.register_hook(key, getattr(pruner, key))
def _generate_pruners(self):
if ((self.conf.framework == 'pytorch') and isinstance(self.model.model, torch.nn.Module)):
from .pruner.model_slim.pattern_analyzer import SelfMHASearcher
for info in self.pruners_info:
if ('mha' in info['pattern']):
pa_obj = SelfMHASearcher(self.model.model)
(modules, _) = pa_obj.search(split_qkv_ffn=False)
modules = pa_obj.obtain_mha_module(modules)
modules = pa_obj.from_layer_name_to_object(modules)
if (len(modules) == 0):
logger.warning('one pruner hooks no mha modules, please have a check')
self.pruners.append(get_pruner(info, modules))
else:
modules = parse_to_prune(info, self.model.model)
if (modules == {}):
logger.warning('one pruner hooks no layers, please have a check')
self.pruners.append(get_pruner(info, modules))
info['modules'] = [key for key in modules.keys()]
info['len_of_modules'] = len(info['modules'])
logger.info(info)
elif ((self.conf.framework == 'keras') and isinstance(self.model.model, tf.keras.Model)):
from tensorflow.python.ops.numpy_ops import np_config
np_config.enable_numpy_behavior()
for info in self.pruners_info:
modules = parse_to_prune_tf(info, self.model.model)
if (modules == {}):
logger.warning('one pruner hooks no layers, please have a check')
self.pruners.append(get_pruner(info, modules, 'keras'))
info['modules'] = [key for key in modules.keys()]
info['len_of_modules'] = len(info['modules'])
logger.info(info)
else:
assert False, 'now only support {}'.format(PRUNERS.keys()) |
def witness_set_of_hypersurface(nvar, hpol, precision='d'):
if (precision == 'd'):
from phcpy.phcpy2c3 import py2c_standard_witset_of_hypersurface
from phcpy.interface import load_standard_system
from phcpy.interface import load_standard_solutions
py2c_standard_witset_of_hypersurface(nvar, len(hpol), hpol)
return (load_standard_system(), load_standard_solutions())
elif (precision == 'dd'):
from phcpy.phcpy2c3 import py2c_dobldobl_witset_of_hypersurface
from phcpy.interface import load_dobldobl_system
from phcpy.interface import load_dobldobl_solutions
py2c_dobldobl_witset_of_hypersurface(nvar, len(hpol), hpol)
return (load_dobldobl_system(), load_dobldobl_solutions())
elif (precision == 'qd'):
from phcpy.phcpy2c3 import py2c_quaddobl_witset_of_hypersurface
from phcpy.interface import load_quaddobl_system
from phcpy.interface import load_quaddobl_solutions
py2c_quaddobl_witset_of_hypersurface(nvar, len(hpol), hpol)
return (load_quaddobl_system(), load_quaddobl_solutions())
else:
print('wrong argument for precision')
return None |
class MeshRenderer(nn.Module):
def __init__(self, rasterize_fov, znear=0.1, zfar=10, rasterize_size=224):
super(MeshRenderer, self).__init__()
x = (np.tan(np.deg2rad((rasterize_fov * 0.5))) * znear)
self.ndc_proj = torch.tensor(ndc_projection(x=x, n=znear, f=zfar)).matmul(torch.diag(torch.tensor([1.0, (- 1), (- 1), 1])))
self.rasterize_size = rasterize_size
self.glctx = None
def forward(self, vertex, tri, feat=None):
device = vertex.device
rsize = int(self.rasterize_size)
ndc_proj = self.ndc_proj.to(device)
if (vertex.shape[(- 1)] == 3):
vertex = torch.cat([vertex, torch.ones([*vertex.shape[:2], 1]).to(device)], dim=(- 1))
vertex[(..., 1)] = (- vertex[(..., 1)])
vertex_ndc = (vertex ndc_proj.t())
if (self.glctx is None):
self.glctx = dr.RasterizeGLContext(device=device)
print(('create glctx on device cuda:%d' % device.index))
ranges = None
if (isinstance(tri, List) or (len(tri.shape) == 3)):
vum = vertex_ndc.shape[1]
fnum = torch.tensor([f.shape[0] for f in tri]).unsqueeze(1).to(device)
fstartidx = (torch.cumsum(fnum, dim=0) - fnum)
ranges = torch.cat([fstartidx, fnum], axis=1).type(torch.int32).cpu()
for i in range(tri.shape[0]):
tri[i] = (tri[i] + (i * vum))
vertex_ndc = torch.cat(vertex_ndc, dim=0)
tri = torch.cat(tri, dim=0)
tri = tri.type(torch.int32).contiguous()
(rast_out, _) = dr.rasterize(self.glctx, vertex_ndc.contiguous(), tri, resolution=[rsize, rsize], ranges=ranges)
(depth, _) = dr.interpolate(vertex.reshape([(- 1), 4])[(..., 2)].unsqueeze(1).contiguous(), rast_out, tri)
depth = depth.permute(0, 3, 1, 2)
mask = (rast_out[(..., 3)] > 0).float().unsqueeze(1)
depth = (mask * depth)
image = None
if (feat is not None):
(image, _) = dr.interpolate(feat, rast_out, tri)
image = image.permute(0, 3, 1, 2)
image = (mask * image)
return (mask, depth, image) |
def test_func(x):
print('Running test_func')
p = mp.current_process()
y = ((x * x) if (p.runner is None) else x)
print(y) |
def blh2xyz(latitude, longitude, height):
latitude = math.radians(latitude)
longitude = math.radians(longitude)
e = math.sqrt((1 - ((B ** 2) / (A ** 2))))
N = (A / math.sqrt((1 - ((e ** 2) * (math.sin(latitude) ** 2)))))
X = (((N + height) * math.cos(latitude)) * math.cos(longitude))
Y = (((N + height) * math.cos(latitude)) * math.sin(longitude))
Z = (((N * (1 - (e ** 2))) + height) * math.sin(latitude))
return (X, Y, Z) |
def interpolate_img(img, coords, order=3, mode='nearest', cval=0.0):
return map_coordinates(img, coords, order=order, mode=mode, cval=cval) |
class ResNet(nn.Module):
__factory = {18: torchvision.models.resnet18, 34: torchvision.models.resnet34, 50: torchvision.models.resnet50, 101: torchvision.models.resnet101, 152: torchvision.models.resnet152}
def __init__(self, depth, pretrained=True, cut_at_pooling=False, num_features=0, norm=False, dropout=0, num_classes=0, FCN=False, T=1, dim=256, num_parts=6):
super(ResNet, self).__init__()
self.depth = depth
self.pretrained = pretrained
self.cut_at_pooling = cut_at_pooling
self.FCN = FCN
self.T = T
self.reduce_dim = dim
self.num_parts = num_parts
if (depth not in ResNet.__factory):
raise KeyError('Unsupported depth:', depth)
self.base = ResNet.__factory[depth](pretrained=pretrained)
if self.FCN:
self.base.layer4[0].conv2.stride = (1, 1)
self.base.layer4[0].downsample[0].stride = (1, 1)
self.num_features = num_features
self.num_classes = num_classes
self.dropout = dropout
self.instance = nn.ModuleList()
for i in range((self.num_parts + 1)):
local_conv = nn.Linear(2048, self.num_features, bias=False)
init.kaiming_normal_(local_conv.weight, mode='fan_out')
local_bn = nn.BatchNorm1d(self.num_features)
init.constant_(local_bn.weight, 1)
init.constant_(local_bn.bias, 0)
fc = AngleLinear(self.num_features, self.num_classes)
self.instance.append(nn.Sequential(nn.Dropout(self.dropout), local_conv, local_bn, fc))
self.drop = nn.Dropout(self.dropout)
self.local_mask = nn.Conv2d(self.reduce_dim, self.num_parts, kernel_size=1, bias=True)
init.kaiming_normal_(self.local_mask.weight, mode='fan_out')
init.constant_(self.local_mask.bias, 0)
elif (not self.cut_at_pooling):
self.num_features = num_features
self.norm = norm
self.dropout = dropout
self.has_embedding = (num_features > 0)
self.num_classes = num_classes
out_planes = self.base.fc.in_features
if self.has_embedding:
self.feat = nn.Linear(out_planes, self.num_features, bias=False)
self.feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(self.feat.weight, mode='fan_out')
init.constant_(self.feat_bn.weight, 1)
init.constant_(self.feat_bn.bias, 0)
else:
self.num_features = out_planes
if (self.dropout > 0):
self.drop = nn.Dropout(self.dropout)
if (self.num_classes > 0):
self.classifier = nn.Linear(self.num_features, self.num_classes)
init.normal_(self.classifier.weight, std=0.001)
init.constant_(self.classifier.bias, 0)
if (not self.pretrained):
self.reset_params()
def forward(self, inputs, part_labels=None):
x = inputs
if (part_labels is None):
tmp = torch.FloatTensor(range(1, 25))
tmp = ((tmp - 0.1) / 4).int()
part_labels = tmp.unsqueeze(0).expand(inputs.size(0), tmp.size(0))
part_labels = torch.autograd.Variable(part_labels.cuda())
for (name, module) in self.base._modules.items():
if (name == 'avgpool'):
break
x = module(x)
if self.cut_at_pooling:
return x
if self.FCN:
T = self.T
y = self.drop(x).unsqueeze(1)
stride = (2048 // self.reduce_dim)
y = F.avg_pool3d(y, kernel_size=(stride, 1, 8), stride=(stride, 1, 8)).squeeze(1)
x_global = F.avg_pool2d(x, (24, 8))
local_score = self.local_mask(y)
local_score = local_score.squeeze(3)
score = F.softmax((1 * local_score.detach()), 1)
pscore = score.sum(2)
score = (score / pscore.unsqueeze(2).expand_as(score))
(bb, cc, hh, ww) = x.size()
feat = (x.unsqueeze(2).expand(bb, cc, self.num_parts, hh, ww) * score.unsqueeze(1).unsqueeze(4).expand(bb, cc, self.num_parts, hh, ww))
feat = feat.sum(4).sum(3).unsqueeze(3)
x = feat
out0 = x.view(x.size(0), (- 1))
out0 = (x / torch.clamp(x.norm(2, 1).unsqueeze(1).expand_as(x), min=1e-12))
x_list = list(x.chunk(x.size(2), 2))
x_list.append(x_global)
c = []
for (tensor, branch) in zip(x_list, self.instance):
tensor = tensor.contiguous().view(tensor.size(0), (- 1))
c.append(branch(tensor))
ps = local_score
return (out0, c, ps, pscore)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(x.size(0), (- 1))
out1 = x
out1 = (x / x.norm(2, 1).unsqueeze(1).expand_as(x))
if self.has_embedding:
x = self.feat(x)
x = self.feat_bn(x)
out2 = (x / x.norm(2, 1).unsqueeze(1).expand_as(x))
if self.norm:
x = (x / x.norm(2, 1).unsqueeze(1).expand_as(x))
if (self.dropout > 0):
x = self.drop(x)
if (self.num_classes > 0):
x = self.classifier(x)
return (out2, x)
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if (m.bias is not None):
init.constant_(m.bias, 0) |
def load_results(sent_lst, tokenizer):
full_result_dict = {}
failed_instances = []
found_idx = []
sent_lst_lst = list(sent_lst.items())
for (idx, (key, val)) in enumerate(sent_lst_lst):
if (idx in full_result_dict.keys()):
continue
word_lst1 = [x.text for x in tokenizer(val['obs1'])]
word_lst2 = [x.text for x in tokenizer(val['obs2'])]
target_file = f'{INPUT_PATH}_*_{SPLIT}_{idx}.json'
file_lst = glob.glob(target_file)
try:
assert (len(file_lst) == 1)
except:
print('the file must have existed in a batched version')
target_file = f'{INPUT_PATH}_*_{idx}.json'
file_lst = glob.glob(target_file)
print(file_lst, target_file)
print(file_lst)
target_file = file_lst[0]
if ('x128' in target_file):
infill_lst = []
with open(target_file, 'r') as f:
for line in f:
example = json.loads(line)[0]
infill_ = example.split()[len(word_lst1):(- len(word_lst2))]
infill_ = ' '.join(infill_)
infill_lst.append(infill_)
result_dict = {'pred_samples': infill_lst, 'sample': None, 'obs1': val['obs1'], 'obs2': val['obs2']}
full_result_dict[idx] = result_dict
else:
with open(target_file, 'r') as f:
for line in f:
example = ast.literal_eval(line.strip())
(index, template) = list(example.keys())[0]
print(index, idx)
if (int(index) < int(idx)):
continue
assert (int(index) == int(idx))
found_idx.append(idx)
example = list(example.values())[0]
(kk, val) = sent_lst_lst[idx]
word_lst1 = [x.text for x in tokenizer(val['obs1'])]
word_lst2 = [x.text for x in tokenizer(val['obs2'])]
infill_lst = [' '.join(xx.split()[len(word_lst1):(- len(word_lst2))]) for xx in example]
result_dict = {'pred_samples': infill_lst, 'sample': None, 'obs1': val['obs1'], 'obs2': val['obs2']}
full_result_dict[idx] = result_dict
idx += 1
with open('full_diff_test_outputs_aug.json', 'w') as f:
json.dump(full_result_dict, f)
return full_result_dict |
def mood(sentence, **kwargs):
if isinstance(sentence, str):
try:
from pattern.en import parse, Sentence
sentence = Sentence(parse(sentence))
except ImportError:
pass
if imperative(sentence, **kwargs):
return IMPERATIVE
if conditional(sentence, **kwargs):
return CONDITIONAL
if subjunctive(sentence, **kwargs):
return SUBJUNCTIVE
else:
return INDICATIVE |
class Attribute(JsonSerializer):
def __init__(self, name: str, attribute_type: str, value: Any):
super().__init__()
self.name = name
self.attribute_type = attribute_type
self.value = value |
class Generic_MIL_Dataset(Generic_WSI_Classification_Dataset):
def __init__(self, data_dir, **kwargs):
super().__init__(**kwargs)
self.data_dir = data_dir
self.use_h5 = False
def load_from_h5(self, toggle):
self.use_h5 = toggle
def __getitem__(self, idx):
import h5py
slide_id = self.slide_data['slide'][idx]
label = self.slide_data['label'][idx]
if (type(self.data_dir) == dict):
source = self.slide_data['source'][idx]
data_dir = self.data_dir[source]
else:
data_dir = self.data_dir
if (not self.use_h5):
if self.data_dir:
full_path = os.path.join(data_dir, f'{slide_id}.pt')
features = torch.load(full_path)
if self.lasthalf:
features = torch.split(features, 1024, dim=1)[1]
return (features, label)
else:
return (slide_id, label)
else:
full_path = os.path.join(data_dir, 'h5_files', f'{slide_id}.h5')
with h5py.File(full_path, 'r') as hdf5_file:
features = hdf5_file['features'][:]
coords = hdf5_file['coords'][:]
if self.lasthalf:
features = torch.from_numpy(features.split(2)[1])
else:
features = torch.from_numpy(features)
return (features, label, coords) |
class PowerTransformerComponent(Rescaling, AutotabularPreprocessingAlgorithm):
def __init__(self, random_state: Optional[np.random.RandomState]=None):
from sklearn.preprocessing import PowerTransformer
self.preprocessor = PowerTransformer(copy=False)
def get_properties(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> Dict[(str, Optional[Union[(str, int, bool, Tuple)]])]:
return {'shortname': 'PowerTransformer', 'name': 'PowerTransformer', 'handles_missing_values': False, 'handles_nominal_values': False, 'handles_numerical_features': True, 'prefers_data_scaled': False, 'prefers_data_normalized': False, 'handles_regression': True, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': True, 'is_deterministic': True, 'handles_sparse': False, 'handles_dense': True, 'input': (DENSE, UNSIGNED_DATA), 'output': (INPUT,), 'preferred_dtype': None} |
class Timer():
def __init__(self, keys):
self.keys = keys
self.n = {}
self.running_time = {}
self.total_time = {}
self.reset()
def start(self, key):
self.running_time[key] = time.time()
return self
def stop(self, key):
self.total_time[key] = (time.time() - self.running_time[key])
self.n[key] += 1
self.running_time[key] = None
return self
def reset(self):
for k in self.keys:
self.total_time[k] = 0
self.running_time[k] = None
self.n[k] = 0
return self
def value(self):
vals = {}
for k in self.keys:
if (self.n[k] == 0):
raise ValueError('Trying to divide by zero in TimeMeter')
else:
vals[k] = (self.total_time[k] / self.n[k])
return vals |
def _loads(s, *, fix_imports=True, encoding='ASCII', errors='strict', buffers=None):
if isinstance(s, str):
raise TypeError("Can't load pickle from unicode string")
file = io.BytesIO(s)
return _Unpickler(file, fix_imports=fix_imports, buffers=buffers, encoding=encoding, errors=errors).load() |
def main():
parser = argparse.ArgumentParser(description='Diffuser Pipeline for processing images with prompts.')
parser.add_argument('--prompt_path', type=str, default='dataset/animal.json', help='Path to the JSON file containing prompts.')
parser.add_argument('--save_path', type=str, default='train_set/animal/', help='Path to save processed images.')
args = parser.parse_args()
pipe = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0', torch_dtype=torch.float16, use_safetensors=True, variant='fp16')
pipe.to('cuda')
with open(args.prompt_path, 'r') as f:
data = json.load(f)
for info in data:
img = info['image'].split('/')[1]
prompt = info['prompt']
images = pipe(prompt=prompt).images[0]
images.save((args.save_path + img)) |
class Task(NamedTuple):
video_name: str
video_path: str
out_path: str
min_frame: int
max_frame: int
target_fps: float
target_num_frames: int
width: int
height: int
max_height: int |
def evaluate(args):
with open(args.data, 'rb') as f:
test_dataset: SNLIDataset = pickle.load(f)
word_vocab = test_dataset.word_vocab
label_vocab = test_dataset.label_vocab
model = SNLIModel(num_classes=len(label_vocab), num_words=len(word_vocab), word_dim=args.word_dim, hidden_dim=args.hidden_dim, clf_hidden_dim=args.clf_hidden_dim, clf_num_layers=args.clf_num_layers, use_leaf_rnn=args.leaf_rnn, intra_attention=args.intra_attention, use_batchnorm=args.batchnorm, dropout_prob=args.dropout, bidirectional=args.bidirectional)
num_params = sum((np.prod(p.size()) for p in model.parameters()))
num_embedding_params = np.prod(model.word_embedding.weight.size())
print(f'# of parameters: {num_params}')
print(f'# of word embedding parameters: {num_embedding_params}')
print(f'# of parameters (excluding word embeddings): {(num_params - num_embedding_params)}')
model.load_state_dict(torch.load(args.model, map_location='cpu'))
model.eval()
model.to(args.device)
torch.set_grad_enabled(False)
test_data_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, collate_fn=test_dataset.collate)
num_correct = 0
num_data = len(test_dataset)
for batch in test_data_loader:
pre = batch['pre'].to(args.device)
hyp = batch['hyp'].to(args.device)
pre_length = batch['pre_length'].to(args.device)
hyp_length = batch['hyp_length'].to(args.device)
label = batch['label'].to(args.device)
logits = model(pre=pre, pre_length=pre_length, hyp=hyp, hyp_length=hyp_length)
label_pred = logits.max(1)[1]
num_correct_batch = torch.eq(label, label_pred).long().sum()
num_correct_batch = num_correct_batch.item()
num_correct += num_correct_batch
print(f'# data: {num_data}')
print(f'# correct: {num_correct}')
print(f'Accuracy: {(num_correct / num_data):.4f}') |
_model('s2t_transformer_w2v2')
class S2TTransformerModelW2V2(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def add_args(parser):
parser.add_argument('--conv-kernel-sizes', type=str, metavar='N', help='kernel sizes of Conv1d subsampling layers')
parser.add_argument('--conv-channels', type=int, metavar='N', help='# of channels in Conv1d subsampling layers')
parser.add_argument('--activation-fn', type=str, default='relu', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--translation-encoder-layers', type=int, metavar='N', help='num translation encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings')
parser.add_argument('--load-pretrained-mt-encoder-decoder-from', type=str, metavar='STR', help='model to take mt encoder/decoder weights from (for initialization)')
parser.add_argument('--encoder-freezing-updates', type=int, metavar='N', help='freeze encoder for first N updates')
parser.add_argument('--mixup', action='store_true', help='if mix input of translation encoder')
parser.add_argument('--mixup-arguments', type=str, metavar='STR', help='arguments for adjusting the probability p of mixup')
parser.add_argument('--w2v2-model-path', type=str, metavar='STR', help='path/to/wav2vec/model')
parser.add_argument('--freeze-w2v', action='store_true', help='if we want to freeze the w2v features')
def build_encoder(cls, args, task=None, embed_tokens=None):
return S2TTransformerEncoderW2V2(args, task.target_dictionary, embed_tokens)
def build_decoder(cls, args, task, embed_tokens):
return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
def build_model(cls, args, task):
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(task.target_dictionary, args.decoder_embed_dim)
encoder_embed_tokens = decoder_embed_tokens
encoder = cls.build_encoder(args, task, encoder_embed_tokens)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
mt_pretraining_path = getattr(args, 'load_pretrained_mt_encoder_decoder_from', None)
if ((mt_pretraining_path is not None) and Path(mt_pretraining_path).exists()):
mt_state = checkpoint_utils.load_checkpoint_to_cpu(mt_pretraining_path)
mt_encoder_state_dict = OrderedDict()
mt_decoder_state_dict = OrderedDict()
for key in mt_state['model'].keys():
if key.startswith('encoder'):
subkey = key[(len('encoder') + 1):]
mt_encoder_state_dict[subkey] = mt_state['model'][key]
if key.startswith('decoder'):
subkey = key[(len('decoder') + 1):]
mt_decoder_state_dict[subkey] = mt_state['model'][key]
encoder.load_state_dict(mt_encoder_state_dict, strict=False)
decoder.load_state_dict(mt_decoder_state_dict, strict=False)
logger.info(f'loaded pretrained mt encoder and decoder from: {mt_pretraining_path}')
return cls(encoder, decoder)
def get_normalized_probs(self, net_output: Tuple[(Tensor, Optional[Dict[(str, List[Optional[Tensor]])]])], log_probs: bool, sample: Optional[Dict[(str, Tensor)]]=None):
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def forward(self, audio, audio_lengths, source, source_lengths, prev_output_tokens, align_pad, align_lengths):
encoder_out = self.encoder(audio, audio_lengths, source, source_lengths, align_pad, align_lengths)
decoder_out = self.decoder(prev_output_tokens=prev_output_tokens, encoder_out=encoder_out)
return decoder_out |
class SegNet(nn.Module):
def __init__(self, input_nbr=3, label_nbr=22):
super(SegNet, self).__init__()
batchNorm_momentum = 0.1
self.conv11 = nn.Conv2d(input_nbr, 64, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv12 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv21 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv22 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv31 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv32 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv33 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv41 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv42 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv43 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv51 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv52 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv53 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53 = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv53d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv52d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv51d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv43d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn43d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv42d = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42d = nn.BatchNorm2d(512, momentum=batchNorm_momentum)
self.conv41d = nn.Conv2d(512, 256, kernel_size=3, padding=1)
self.bn41d = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv33d = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn33d = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv32d = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32d = nn.BatchNorm2d(256, momentum=batchNorm_momentum)
self.conv31d = nn.Conv2d(256, 128, kernel_size=3, padding=1)
self.bn31d = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv22d = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22d = nn.BatchNorm2d(128, momentum=batchNorm_momentum)
self.conv21d = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.bn21d = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv12d = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12d = nn.BatchNorm2d(64, momentum=batchNorm_momentum)
self.conv11d = nn.Conv2d(64, label_nbr, kernel_size=3, padding=1)
def forward(self, x):
x11 = F.relu(self.bn11(self.conv11(x)))
x12 = F.relu(self.bn12(self.conv12(x11)))
(x1p, id1) = F.max_pool2d(x12, kernel_size=2, stride=2, return_indices=True)
x21 = F.relu(self.bn21(self.conv21(x1p)))
x22 = F.relu(self.bn22(self.conv22(x21)))
(x2p, id2) = F.max_pool2d(x22, kernel_size=2, stride=2, return_indices=True)
x31 = F.relu(self.bn31(self.conv31(x2p)))
x32 = F.relu(self.bn32(self.conv32(x31)))
x33 = F.relu(self.bn33(self.conv33(x32)))
(x3p, id3) = F.max_pool2d(x33, kernel_size=2, stride=2, return_indices=True)
x41 = F.relu(self.bn41(self.conv41(x3p)))
x42 = F.relu(self.bn42(self.conv42(x41)))
x43 = F.relu(self.bn43(self.conv43(x42)))
(x4p, id4) = F.max_pool2d(x43, kernel_size=2, stride=2, return_indices=True)
x51 = F.relu(self.bn51(self.conv51(x4p)))
x52 = F.relu(self.bn52(self.conv52(x51)))
x53 = F.relu(self.bn53(self.conv53(x52)))
(x5p, id5) = F.max_pool2d(x53, kernel_size=2, stride=2, return_indices=True)
x5d = F.max_unpool2d(x5p, id5, kernel_size=2, stride=2)
x53d = F.relu(self.bn53d(self.conv53d(x5d)))
x52d = F.relu(self.bn52d(self.conv52d(x53d)))
x51d = F.relu(self.bn51d(self.conv51d(x52d)))
x4d = F.max_unpool2d(x51d, id4, kernel_size=2, stride=2)
x43d = F.relu(self.bn43d(self.conv43d(x4d)))
x42d = F.relu(self.bn42d(self.conv42d(x43d)))
x41d = F.relu(self.bn41d(self.conv41d(x42d)))
x3d = F.max_unpool2d(x41d, id3, kernel_size=2, stride=2)
x33d = F.relu(self.bn33d(self.conv33d(x3d)))
x32d = F.relu(self.bn32d(self.conv32d(x33d)))
x31d = F.relu(self.bn31d(self.conv31d(x32d)))
x2d = F.max_unpool2d(x31d, id2, kernel_size=2, stride=2)
x22d = F.relu(self.bn22d(self.conv22d(x2d)))
x21d = F.relu(self.bn21d(self.conv21d(x22d)))
x1d = F.max_unpool2d(x21d, id1, kernel_size=2, stride=2)
x12d = F.relu(self.bn12d(self.conv12d(x1d)))
x11d = self.conv11d(x12d)
return x11d |
class DataCfg(TypedDict):
type: str
mode: str
size: Sequence[int]
supp_idxs: Optional[Sequence[int]]
use_depth: Optional[bool]
use_hints: Optional[bool]
use_benchmark: Optional[bool]
use_strong_aug: Optional[bool]
as_torch: Optional[bool]
use_aug: Optional[bool]
log_time: Optional[bool]
train: Optional['DataCfg']
val: Optional['DataCfg']
test: Optional['DataCfg'] |
def load_local_or_remote_file(filepath, file_type=None):
local_path = local_path_from_s3_or_local_path(filepath)
if (file_type is None):
extension = local_path.split('.')[(- 1)]
if (extension == 'npy'):
file_type = NUMPY
else:
file_type = PICKLE
else:
file_type = PICKLE
if (file_type == NUMPY):
object = np.load(open(local_path, 'rb'))
elif (file_type == JOBLIB):
object = joblib.load(local_path)
else:
object = pickle.load(open(local_path, 'rb'))
print('loaded', local_path)
return object |
class FuncNonContiguousArgs():
def forward(self, input_ids, some_other_args, token_type_ids, attention_mask):
return None |
def set_severity(args):
if (args.dataset != 'kitti'):
args.severity = args.robustness_severities[args.severity_idx]
return True
if (args.task == 'initial'):
args.severity = ''
return True
if (args.severity_idx < len(globals.KITTI_SEVERITIES[args.task])):
args.severity = globals.KITTI_SEVERITIES[args.task][args.severity_idx]
return True
return False |
def inverse_dict(d):
assert (len(d.keys()) == len(set(d.keys())))
return {v: k for (k, v) in d.items()} |
def get_model_list():
_start_prompt = ' Transformers currently provides the following architectures'
_end_prompt = '1. Want to contribute a new model?'
with open(os.path.join(REPO_PATH, 'README.md'), 'r', encoding='utf-8', newline='\n') as f:
lines = f.readlines()
start_index = 0
while (not lines[start_index].startswith(_start_prompt)):
start_index += 1
start_index += 1
result = []
current_line = ''
end_index = start_index
while (not lines[end_index].startswith(_end_prompt)):
if lines[end_index].startswith('1.'):
if (len(current_line) > 1):
result.append(current_line)
current_line = lines[end_index]
elif (len(lines[end_index]) > 1):
current_line = f'{current_line[:(- 1)]} {lines[end_index].lstrip()}'
end_index += 1
if (len(current_line) > 1):
result.append(current_line)
return ''.join(result) |
class EagerModeCtx():
def __init__(self, eagerly: bool) -> None:
assert isinstance(eagerly, bool), f'argument eagerly should not be {eagerly.__class__}. It must be a boolean.'
self.eagerly = eagerly
def __enter__(self) -> None:
self.old_mode = tf.config.functions_run_eagerly()
tf.config.run_functions_eagerly(self.eagerly)
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
tf.config.run_functions_eagerly(self.old_mode) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_name', default='gsm8k', type=str)
parser.add_argument('--model_name_or_path', default='gpt-4', type=str)
parser.add_argument('--prompt_type', default='pal', type=str)
parser.add_argument('--split', default='test', type=str)
parser.add_argument('--num_test_sample', default=(- 1), type=int)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--start', default=0, type=int)
parser.add_argument('--end', default=(- 1), type=int)
parser.add_argument('--temperature', default=0, type=float)
parser.add_argument('--n_sampling', default=1, type=int)
parser.add_argument('--top_p', default=0.95, type=float)
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--use_train_prompt_format', action='store_true')
parser.add_argument('--code_concat', action='store_true')
parser.add_argument('--code_exec_warning', action='store_true')
parser.add_argument('--max_func_call', default=4, type=int)
parser.add_argument('--max_code_fix_retries', default=4, type=int)
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
args.top_p = (1 if (args.temperature == 0) else args.top_p)
args.max_code_fix_retries = min(args.max_code_fix_retries, int((args.max_func_call / 2)))
if (args.prompt_type in ['cr']):
args.max_func_call = max(args.max_func_call, 10)
return args |
class AlexDagRnn(nn.Module):
def __init__(self, embedding_size, hidden_size):
super(AlexDagRnn, self).__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
(sym_dict, _) = ut.get_sym_dict()
self.max_tok = (len(sym_dict) + 1)
embedding = nn.Embedding(self.max_tok, self.embedding_size)
initrange = (0.5 / self.embedding_size)
embedding.weight.data.uniform_((- initrange), initrange)
self.final_embeddings = embedding
self.linear_token_1 = nn.Linear(self.embedding_size, self.hidden_size)
self.linear_token_2 = nn.Linear((3 * self.hidden_size), self.hidden_size)
self.lstm_ins = nn.LSTM(self.hidden_size, self.hidden_size)
def embed_instr(self, instr):
op = self.final_embeddings(instr.opcode)
srcs = ([self.final_embeddings(min(src, self.max_tok)) for src in instr.srcs] + [torch.zeros(self.embedding_size)])
def forward(self, datum):
embeddings = {} |
def get_tag_device(args: object) -> str:
tag = ''
if torch.cuda.is_available():
txt = subprocess.run(['nvidia-smi', '--list-gpus'], stdout=subprocess.PIPE).stdout.decode('utf-8').split('\n')
try:
cudaids = args.cudaid.split(',')
tag = 'CUDA devices: \n'
for cid in cudaids:
tag += 'ID: {} - {} \n'.format(cid, txt[int(cid)])
except IndexError:
tag = 'CUDA devices: lost.'
return tag |
_model
def ese_vovnet19b_dw(pretrained=False, **kwargs):
return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs) |
def get_user_config(usr_config_path, default_config=None):
if (default_config is None):
config = get_default_config()
else:
config = default_config
usr_config = get_config_from_file(usr_config_path)
config.merge_from_other_cfg(usr_config)
config = get_conditional_config(config)
return config |
def calibrate(prompt_model: PromptForClassification, dataloader: PromptDataLoader) -> torch.Tensor:
all_logits = []
prompt_model.eval()
for batch in tqdm(dataloader, desc='ContextCali'):
batch = batch.to(prompt_model.device)
logits = prompt_model.forward_without_verbalize(batch)
all_logits.append(logits.detach())
all_logits = torch.cat(all_logits, dim=0)
return all_logits.mean(dim=0) |
def create_lmsm_network(outfname_train, outfname_deploy, source_train, source_test, softmax_weight, use_OLE, batch_size_train, lambda_, num_classes=10):
template_train = open('model/cifar_network.prototxt', 'r').read()
template_deploy = open('model/cifar_network.prototxt', 'r').read()
if use_OLE:
template_train = template_train.replace('_OLE_COMMENT_', '')
else:
template_train = template_train.replace('_OLE_COMMENT_', '#')
template_train = template_train.replace('_BATCH_SIZE_TRAIN_', str(batch_size_train))
template_train = template_train.replace('_SOURCE_TRAIN_', source_train)
template_train = template_train.replace('_SOURCE_TEST_', source_test)
template_train = template_train.replace('_SOFTMAX_WEIGHT_', str(softmax_weight))
template_train = template_train.replace('_LAMBDA_', str(lambda_))
write_to_file(outfname_train, template_train)
write_to_file(outfname_deploy, template_deploy) |
def pca_sub_df(df, task, ref_depth):
data_dict = pkl.load(open(scores_path, 'rb'))
accs = [get_acc(data_dict, probe_task, seed, layer=ref_depth, dims=0, run='average') for seed in REF_SEEDS]
acc_dict = dict(zip(REF_SEEDS, accs))
best_seed = max(acc_dict, key=acc_dict.get)
sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & ((df.seed1 == best_seed) | (df.seed2 == best_seed)))]
return sub_df |
def register_images(output_map, grapher, prefix='train'):
if ((args.distributed_rank == 0) and (grapher is not None)):
for (k, v) in output_map.items():
if isinstance(v, dict):
register_images(output_map[k], grapher, prefix=prefix)
if (('img' in k) or ('imgs' in k)):
key_name = '-'.join(k.split('_')[0:(- 1)])
img = torchvision.utils.make_grid(v, normalize=True, scale_each=True)
grapher.add_image('{}_{}'.format(prefix, key_name), img.detach(), global_step=0) |
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut, dropout=0.2):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True, batch_first=True)
self.embedding = nn.Linear((nHidden * 2), nOut)
self.dropout = nn.Dropout(p=dropout)
def forward(self, input, lengths=None):
(hidden, _) = self.rnn(input)
hidden = self.dropout(hidden)
output = self.embedding(hidden)
return output |
_module()
class ResNeXt2(ResNet2):
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, groups=1, base_width=4, patch_path=None, **kwargs):
self.groups = groups
self.base_width = base_width
super(ResNeXt2, self).__init__(**kwargs)
self.patch_path = patch_path
for p in self.parameters():
p.requires_grad = False
self.patch = get_patch_tensor(self.patch_path, self.training)
def make_res_layer(self, **kwargs):
return ResLayer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs) |
def search_similar(s1, dlist=DATASET_IDS, MAX_SIMILARS=10):
similars = {s2: similarity(s1, s2) for s2 in dlist if similarity(s1, s2)}
top_match = Counter(similars).most_common((MAX_SIMILARS + 1))
return top_match |
def _ada_boost_hp_space(name_func, base_estimator=None, n_estimators=None, learning_rate=None, random_state=None):
hp_space = dict(base_estimator=base_estimator, n_estimators=(_boosting_n_estimators(name_func('n_estimators')) if (n_estimators is None) else n_estimators), learning_rate=(_ada_boost_learning_rate(name_func('learning_rate')) if (learning_rate is None) else learning_rate), random_state=_random_state(name_func('rstate'), random_state))
return hp_space |
def pageinate(page, maxPage, n):
pages = [page]
min = (page - 1)
max = (page + 1)
while (len(pages) < n):
if (((((2 * page) - min) <= max) or (max > maxPage)) and (min > 0)):
pages.append(min)
min -= 1
elif (max <= maxPage):
pages.append(max)
max += 1
else:
break
pages = sorted(pages)
if ((maxPage > n) and (n > 5)):
if (pages[0] != 1):
pages[0] = 1
pages[1] = (- 1)
if (pages[(- 1)] != maxPage):
pages[(- 1)] = maxPage
pages[(- 2)] = (- 1)
return pages |
def get_device():
device = ('cuda' if torch.cuda.is_available() else 'cpu')
if (torch.backends.mps.is_available() and torch.backends.mps.is_built()):
device = 'mps'
if (device == 'mps'):
print("WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues with generations.")
return device |
def ignore_buffers_decorator(func):
def wrapper_ignore_buffers_decorator(self, raw_state):
raw_state = raw_state[(- 1)]
if (len(raw_state['parsed_mkt_data']) == 0):
pass
else:
raw_state['parsed_mkt_data'] = raw_state['parsed_mkt_data'][(- 1)]
if raw_state['parsed_volume_data']:
raw_state['parsed_volume_data'] = raw_state['parsed_volume_data'][(- 1)]
return func(self, raw_state)
return wrapper_ignore_buffers_decorator |
class Layer(object):
def __init__(self):
raise NotImplementedError((str(type(self)) + ' does not implement this method'))
def get_output_shape(self):
raise NotImplementedError((str(type(self)) + ' does not implement this method'))
def output(self):
raise NotImplementedError((str(type(self)) + ' does not implement this method'))
def reset_params(self):
raise NotImplementedError((str(type(self)) + ' does not implement this method')) |
def get_vehicle_corners_from_dict(state_dict):
x = state_dict['center-x']
y = state_dict['center-y']
psi = state_dict['heading']
body_shape = state_dict['corners']
center = np.array([x, y])
R = np.array([[np.cos(psi), (- np.sin(psi))], [np.sin(psi), np.cos(psi)]])
corners = (R body_shape.T).T
return (corners + center) |
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--niter', type=int, default=200, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=200, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--TTUR', action='store_true', help='Use TTUR training scheme')
self.parser.add_argument('--gan_mode', type=str, default='ls', help='(ls|original|hinge)')
self.parser.add_argument('--pool_size', type=int, default=1, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
self.isTrain = True |
def main():
cuda = torch.cuda.is_available()
device = torch.device(('cuda' if cuda else 'cpu'))
print('Used device:', device)
encoder = CNN_4Layer(in_channels=3)
encoder = encoder.to(device)
save_path = 'prototransfer/checkpoints/random_init_conv4'
print('Save path is:', save_path)
def save_checkpoint(state):
filename = (save_path + '.pth.tar')
torch.save(state, filename)
save_checkpoint({'epoch': 0, 'n_no_improvement': 0, 'model': encoder.state_dict(), 'optimizer': None, 'scheduler': None, 'loss': np.inf, 'best_loss': np.inf, 'best_accuracy': 0, 'accuracy': 0, 'setup': None}) |
def ReadFile(tthread, batchInterval):
(w, h) = (6, 6)
y = [[0 for x in range(w)] for y in range(h)]
y_sum = [0 for x in range(w)]
inputEvents = (tthread * batchInterval)
gs_path = (FILE_FOLER + '/GS/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(gs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][0] += float(breakdown_value[(i + 1)])
y_sum[0] += float(breakdown_value[(i + 1)])
bfs_path = (FILE_FOLER + '/BFS/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(bfs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][1] += float(breakdown_value[(i + 1)])
y_sum[1] += float(breakdown_value[(i + 1)])
dfs_path = (FILE_FOLER + '/DFS/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(dfs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][2] += float(breakdown_value[(i + 1)])
y_sum[2] += float(breakdown_value[(i + 1)])
op_gs_path = (FILE_FOLER + '/OPGS/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(op_gs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][3] += float(breakdown_value[(i + 1)])
y_sum[3] += float(breakdown_value[(i + 1)])
op_bfs_path = (FILE_FOLER + '/OPBFS/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(op_bfs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][4] += float(breakdown_value[(i + 1)])
y_sum[4] += float(breakdown_value[(i + 1)])
op_dfs_path = (FILE_FOLER + '/OPDFS/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(op_dfs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][5] += float(breakdown_value[(i + 1)])
y_sum[5] += float(breakdown_value[(i + 1)])
for i in range(h):
for j in range(w):
if (y_sum[j] != 0):
y[i][j] = ((y[i][j] / y_sum[j]) * 100)
print(y)
return y |
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, bn_aff=True, shortcut=True, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)]
assert (((depth - 4) % 6) == 0)
n = ((depth - 4) / 6)
block = BasicBlock
self.bn_aff = bn_aff
self.shortcut = shortcut
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, self.bn_aff, self.shortcut, dropRate)
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, self.bn_aff, self.shortcut, dropRate)
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, self.bn_aff, self.shortcut, dropRate)
self.bn1 = nn.BatchNorm2d(nChannels[3], affine=self.bn_aff)
self.relu = nn.ReLU(inplace=True)
self.linear = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view((- 1), self.nChannels[3])
return self.linear(out)
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def get_channel_num(self):
return self.nChannels[1:]
def extract_feature(self, x, preReLU=False):
out = self.conv1(x)
feat1 = self.block1(out)
feat2 = self.block2(feat1)
feat3 = self.block3(feat2)
out = self.relu(self.bn1(feat3))
out = F.avg_pool2d(out, 8)
out = out.view((- 1), self.nChannels[3])
out = self.linear(out)
if preReLU:
feat1 = self.block2.layer[0].bn1(feat1)
feat2 = self.block3.layer[0].bn1(feat2)
feat3 = self.bn1(feat3)
return ([feat1, feat2, feat3], out) |
def test():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(testloader):
(inputs, targets) = (inputs.to(device), targets.to(device))
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
(_, predicted) = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print('Test acc: ', (correct / total))
if ((correct / total) > best_acc):
best_acc = (correct / total)
print('Saving..')
state = {'net': net.state_dict(), 'acc': best_acc}
if (not os.path.isdir('checkpoint')):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/best_ckpt.pth')
print('Best acc: ', best_acc)
net.train() |
class OnnxrtModel(Model):
def __init__(self, path: str) -> None:
super().__init__(path)
self._nc_model_instance: Optional[ONNXModel] = None
def domain(self) -> Domain:
try:
input_node_names = {node.name for node in self.nc_model_instance.graph().input}
node_names = {node.name for node in self.nc_model_instance.nodes()}
boundary_nodes = [node.name for node in self.nc_model_instance.graph().input]
boundary_nodes.extend([node.name for node in self.nc_model_instance.graph().output])
op_names = {node.op_type for node in self.nc_model_instance.nodes()}
except Exception:
return Domain()
nlp_domain = self._check_nlp_domain(input_node_names)
if (nlp_domain is not None):
return nlp_domain
object_detection_domain = self._check_object_detection_domain(node_names, boundary_nodes, op_names)
if (object_detection_domain is not None):
return object_detection_domain
image_recognition_domain = self._check_image_recognition_domain(op_names)
if (image_recognition_domain is not None):
return image_recognition_domain
return Domain()
def _has_any_name_parts(nodes: set, name_parts: List[str]) -> bool:
matching_names = []
for node in nodes:
for partial_name in name_parts:
search = re.match(partial_name, node)
if search:
matching_names.append(node)
return bool(matching_names)
def _check_nlp_domain(self, input_node_names: set) -> Optional[Domain]:
if self._has_any_name_parts(input_node_names, ['input_ids', 'input_mask', 'segment_ids']):
return Domain(domain=Domains.NLP.value, domain_flavour=DomainFlavours.NONE.value)
return None
def _check_object_detection_domain(self, node_names: set, boundary_nodes: list, op_names: set) -> Optional[Domain]:
if (self._has_any_name_parts(node_names, ['NonMaxSuppression', 'box', 'score', 'class', 'detection']) or self._has_any_name_parts(op_names, ['NonMaxSuppression']) or self._has_any_name_parts(set(boundary_nodes), ['NonMaxSuppression', 'box', 'score', 'class', 'detection'])):
return Domain(domain=Domains.OBJECT_DETECTION.value, domain_flavour=DomainFlavours.NONE.value)
return None
def _check_image_recognition_domain(self, op_names: set) -> Optional[Domain]:
if self._has_any_name_parts(op_names, ['Conv', 'Relu/Clip']):
return Domain(domain=Domains.IMAGE_RECOGNITION.value, domain_flavour=DomainFlavours.NONE.value)
return None
def input_shape(self) -> Shape:
from google.protobuf.json_format import MessageToDict
shape = None
trusted = False
if (self.domain.domain == Domains.IMAGE_RECOGNITION.value):
shape_list = []
for input_node in self.filtered_input_nodes:
node_dict = MessageToDict(input_node)
dimensions = node_dict.get('type', {}).get('tensorType', {}).get('shape', {}).get('dim', [])
input_shape = []
for dim in dimensions:
if (dim.get('dimValue', None) is not None):
input_shape.append(dim['dimValue'])
shape_list.append(','.join(input_shape))
if (len(shape_list) == 1):
shape = shape_list[0]
shape = remove_number_of_samples_from_shape(shape)
trusted = True
if (len(shape_list) > 1):
adjusted_shapes_list = []
for sub_shape in shape_list:
adjusted_shapes_list.append(remove_number_of_samples_from_shape(sub_shape))
shape = ','.join([(('[' + sub_shape) + ']') for sub_shape in adjusted_shapes_list])
trusted = True
return Shape(shape=shape, trusted=trusted)
def shape_elements_order(self) -> List[str]:
return ['channels', 'height', 'width']
def get_framework_name() -> str:
return Frameworks.ONNX.value
def get_model_graph(self) -> Graph:
graph_reader = OnnxrtReader(self)
return graph_reader.read()
def nc_model_instance(self) -> ONNXModel:
self._ensure_nc_model_instance()
return self._nc_model_instance
def _ensure_nc_model_instance(self) -> None:
if (self._nc_model_instance is not None):
return
model_name = os.path.splitext(os.path.basename(self.path))[0]
Logger().get_logger().setLevel(log.level)
self._nc_model_instance = NCModel(self.path)
self._nc_model_instance.name = model_name
def supports_path(path: str) -> bool:
return ('onnx' == get_file_extension(path))
def guard_requirements_installed(self) -> None:
check_module('onnx')
check_module('onnxruntime')
if (sys.version_info < (3, 11)):
check_module('onnxruntime_extensions')
def filtered_input_nodes(self) -> List[Any]:
input_nodes = self.nc_model_instance.graph().input
name_to_input = {}
for input in input_nodes:
name_to_input[input.name] = input
for initializer in self.nc_model_instance.graph().initializer:
if (initializer.name in name_to_input):
input_nodes.remove(name_to_input[initializer.name])
return input_nodes |
def merge_dict(user, default):
if (isinstance(user, dict) and isinstance(default, dict)):
for (k, v) in default.items():
if (k not in user):
user[k] = v
else:
user[k] = merge_dict(user[k], v)
return user |
class TestDummy(unittest.TestCase):
def test_encode(self):
commands = [['python', 'encode.py', '--users', '--items'], ['python', 'lr.py', 'data/dummy/X-ui.npz'], ['python', 'lr.py', '--folds', 'strong', 'data/dummy/X-ui.npz'], ['python', 'fm.py', 'data/dummy/X-ui.npz'], ['python', 'fm.py', '--folds', 'weak', 'data/dummy/X-ui.npz'], ['python', 'sktm.py', '--model', 'irt'], ['python', 'sktm.py', '--model', 'pfa'], ['python', 'sktm.py', '--model', 'iswf']]
for command in commands:
p = check_output(command) |
_args('v', 'is', 'is', 'is', 'is')
def im2col(g, input, kernel_size, dilation, padding, stride):
input_h = size(g, input, g.op('Constant', value_t=torch.tensor(2)))
input_w = size(g, input, g.op('Constant', value_t=torch.tensor(3)))
(stride_h, stride_w) = (stride[0], stride[1])
(padding_h, padding_w) = (padding[0], padding[1])
(dilation_h, dilation_w) = (dilation[0], dilation[1])
(kernel_h, kernel_w) = (kernel_size[0], kernel_size[1])
blocks_row_indices = _get_im2col_indices_along_dim(g, input_h, kernel_h, dilation_h, padding_h, stride_h)
blocks_col_indices = _get_im2col_indices_along_dim(g, input_w, kernel_w, dilation_w, padding_w, stride_w)
output_shape = _get_im2col_output_shape(g, input, kernel_h, kernel_w)
padded_input = _get_im2col_padded_input(g, input, padding_h, padding_w)
output = g.op('Gather', padded_input, blocks_row_indices, axis_i=2)
output = g.op('Gather', output, blocks_col_indices, axis_i=4)
output = g.op('Transpose', output, perm_i=[0, 1, 2, 4, 3, 5])
return g.op('Reshape', output, output_shape) |
def plot(json_fname, results_fname, store_plots='', plots_to_latex=''):
name = '.'.join(json_fname.split('.')[:(- 1)])
data = json.loads(open(json_fname, 'r', encoding='utf-8').read())
fi = open(results_fname, 'r', encoding='utf-8')
truely_detected_and_truely_corrected = [[], []]
truely_detected_and_falsely_corrected = [[], []]
falsely_detected = [[], []]
count_of_absence_of_correct_chars = [0, 0]
w3 = open(f'{name}_falsely_detected.txt', 'w', encoding='utf-8')
w4 = open(f'{name}_falsely_corrected.txt', 'w', encoding='utf-8')
for (line, entry) in zip(fi, data):
(origin_num, wrong_sent, correct_sent, predict_sent, num) = line.strip().split('\t')
pos_to_error = dict([(e['error_position'], e) for e in entry['errors']])
for (pos, (w, c, p)) in enumerate(zip(wrong_sent, correct_sent, predict_sent)):
if ((w != c) and (w != p)):
e = pos_to_error[pos]
assert (e['corrected_to'] == p)
if (c != p):
candidatas = dict(sorted(list(e['candidates'].items()), reverse=True, key=(lambda it: it[1]))[:5])
absent = 'no'
if (c not in candidatas):
count_of_absence_of_correct_chars[0] += 1
absent = 'yes'
truely_detected_and_falsely_corrected[0].append(e['confidence'])
truely_detected_and_falsely_corrected[1].append(e['similarity'])
w4.write(('\t'.join([wrong_sent, f'pos={pos}', f'w={w}', f'c={c}', f'p={p}', f"sim={e['similarity']}", f'absent={absent}']) + '\n'))
else:
truely_detected_and_truely_corrected[0].append(e['confidence'])
truely_detected_and_truely_corrected[1].append(e['similarity'])
elif ((w == c) and (w != p)):
e = pos_to_error[pos]
candidates = dict(sorted(list(e['candidates'].items()), reverse=True, key=(lambda it: it[1]))[:5])
absent = 'no'
if (c not in candidates):
count_of_absence_of_correct_chars[1] += 1
absent = 'yes'
falsely_detected[0].append(e['confidence'])
falsely_detected[1].append(e['similarity'])
w3.write(('\t'.join([wrong_sent, f'pos={pos}', f'w={w}', f'c={c}', f'p={p}', f"sim={e['similarity']}", f'absent={absent}']) + '\n'))
print(f'In {len(truely_detected_and_falsely_corrected[0])} falsely corrected characters, {count_of_absence_of_correct_chars[0]} are because of absent correct candidates.')
print(f'In {len(falsely_detected[0])} falsely detected characters, {count_of_absence_of_correct_chars[1]} are because of absent correct candidates.')
plt.plot(truely_detected_and_truely_corrected[0], truely_detected_and_truely_corrected[1], 'ro', truely_detected_and_falsely_corrected[0], truely_detected_and_falsely_corrected[1], 'bo', falsely_detected[0], falsely_detected[1], 'x')
plt.axis([0.0, 1.0, 0.0, 1.0])
plt.show()
if plots_to_latex:
produce_latex(truely_detected_and_truely_corrected, truely_detected_and_falsely_corrected, falsely_detected, os.path.join(plots_to_latex, f'{name}_latex.txt'))
if store_plots:
axes = plt.gca()
plt.savefig(os.path.join(store_plots, f'{name}.png'))
axes.set_xlim([0.95, 1])
axes.set_ylim([0.0, 0.6])
plt.savefig(os.path.join(store_plots, f'{name}2.png')) |
def process_single_fragment(fragment_id, color_files, depth_files, n_files, n_fragments, config):
if config['path_intrinsic']:
intrinsic = o3d.io.read_pinhole_camera_intrinsic(config['path_intrinsic'])
else:
intrinsic = o3d.camera.PinholeCameraIntrinsic(o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
sid = (fragment_id * config['n_frames_per_fragment'])
eid = min((sid + config['n_frames_per_fragment']), n_files)
make_posegraph_for_fragment(config['path_dataset'], sid, eid, color_files, depth_files, fragment_id, n_fragments, intrinsic, with_opencv, config)
optimize_posegraph_for_fragment(config['path_dataset'], fragment_id, config)
make_pointcloud_for_fragment(config['path_dataset'], color_files, depth_files, fragment_id, n_fragments, intrinsic, config) |
class AdvWeightPerturb(object):
def __init__(self, model, proxy, proxy_optim, gamma):
super(AdvWeightPerturb, self).__init__()
self.model = model
self.proxy = proxy
self.proxy_optim = proxy_optim
self.gamma = gamma
def calc_awp(self, inputs_adv, targets):
self.proxy.load_state_dict(self.model.state_dict())
self.proxy.train()
loss = (- F.cross_entropy(self.proxy(inputs_adv), targets))
self.proxy_optim.zero_grad()
loss.backward()
self.proxy_optim.step()
diff = diff_in_weights(self.model, self.proxy)
return diff
def perturb(self, diff):
add_into_weights(self.model, diff, coeff=(1.0 * self.gamma))
def restore(self, diff):
add_into_weights(self.model, diff, coeff=((- 1.0) * self.gamma)) |
class ParetoSetModel(torch.nn.Module):
def __init__(self, n_dim, n_obj):
super(ParetoSetModel, self).__init__()
self.n_dim = n_dim
self.n_obj = n_obj
self.fc1 = nn.Linear(self.n_obj, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, self.n_dim)
def forward(self, pref):
x = torch.relu(self.fc1(pref))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
x = torch.sigmoid(x)
return x.to(torch.float64) |
class Warmup(Scheduler):
def __init__(self, delta: float) -> None:
from bigdl.dllib.optim.optimizer import Warmup as BWarmup
self.scheduler = BWarmup(delta)
def get_scheduler(self) -> 'optimizer.Warmup':
return self.scheduler |
def line_bounding_2D_activation(x_minus, x_plus, y_minus, y_plus, tanh=True):
(kl, bl, ku, bu) = getConvenientGeneralActivationBound(y_minus, y_plus, 'sigmoid')
if tanh:
X_l = torch.tanh(x_minus)
X_u = torch.tanh(x_plus)
else:
X_l = x_minus
X_u = x_plus
I_l = (X_l >= 0).float()
I_u = (X_u >= 0).float()
alpha_l = torch.zeros(x_minus.shape, device=x_minus.device)
beta_l = (((I_l * X_l) * kl) + (((1 - I_l) * X_l) * ku))
gamma_l = (((I_l * X_l) * bl) + (((1 - I_l) * X_l) * bu))
alpha_u = torch.zeros(x_plus.shape, device=x_minus.device)
beta_u = (((I_u * X_u) * ku) + (((1 - I_u) * X_u) * kl))
gamma_u = (((I_u * X_u) * bu) + (((1 - I_u) * X_u) * bl))
return (alpha_l, beta_l, gamma_l, alpha_u, beta_u, gamma_u) |
def test_text_envs():
env = gym.make('FrozenLake-v0')
video = VideoRecorder(env)
try:
env.reset()
video.capture_frame()
video.close()
finally:
os.remove(video.path) |
def data(ndata=100, baseline=1, freq=10, sigma=1.0, **kwargs):
t = (baseline * np.sort(np.random.rand(ndata)))
y = transit_model(t, freq, **kwargs)
dy = (sigma * np.ones_like(t))
y += (dy * np.random.randn(len(t)))
return (t, y, dy) |
def save_tokenizer(tokenizer, path):
with open(path, 'wb') as f:
pickle.dump(tokenizer, f)
print('tokenizer saved in {}'.format(path)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.