code stringlengths 101 5.91M |
|---|
def to_device(obj: object, device: str) -> None:
for (key, val) in vars(obj).items():
if isinstance(val, torch.Tensor):
setattr(obj, key, val.to(device=device, non_blocking=True)) |
class FakeLandscape(flexs.Landscape):
def _fitness_function(self, sequences):
return rng.random(size=len(sequences)) |
def save_model(path, model, epoch, optimizer=None):
model_dict = {'epoch': epoch, 'model_state': model.state_dict()}
if (optimizer is not None):
model_dict['optimizer_state'] = optimizer.state_dict()
torch.save(model_dict, path) |
def processInputStreamData(obj):
global myStatus
if ('attributes' in obj):
attributes = obj['attributes']
if ('command' in attributes):
print(myStatus)
if (attributes['command']['value'] == 'close'):
if (myStatus == 'open'):
myStatus = 'close'
elif (myStatus == 'close'):
myStatus = 'open'
print(myStatus) |
def load_dataset():
global train_data, dev_data, test_data, trfreq
trace('load train')
for line in open(args.train_file):
(h, r, t) = parse_line(line)
train_data.append((h, r, t))
trfreq[r] += 1
train_data = list(train_data)
for r in trfreq:
trfreq[r] = (args.train_size / (float(trfreq[r]) * len(trfreq)))
trace('load dev')
for line in open(args.dev_file):
(h, r, t, l) = parse_line(line)
dev_data.append((h, r, t, l))
trace('dev size:', len(dev_data))
trace('load test')
for line in open(args.test_file):
(h, r, t, l) = parse_line(line)
test_data.append((h, r, t, l))
trace('test size:', len(test_data)) |
def torchPSNR(tar_img, prd_img):
imdff = (torch.clamp(prd_img, 0, 1) - torch.clamp(tar_img, 0, 1))
rmse = (imdff ** 2).mean().sqrt()
ps = (20 * torch.log10((1 / rmse)))
return ps |
def detect_initials(text):
pattern = '[A-Z]\\. ?[A-Z]\\.'
match = re.findall(pattern, text)
return [m for m in match] |
class NSEM_3D_AdjointTests(unittest.TestCase):
def test_JvecAdjoint_zxx(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.halfSpace(0.01), 'xx', 0.1))
def test_JvecAdjoint_zxy(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.halfSpace(0.01), 'xy', 0.1))
def test_JvecAdjoint_zyx(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.halfSpace(0.01), 'yx', 0.1))
def test_JvecAdjoint_zyy(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.halfSpace(0.01), 'yy', 0.1))
def test_JvecAdjoint_tzx(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.halfSpace(0.01), 'zx', 0.1))
def test_JvecAdjoint_tzy(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.halfSpace(0.01), 'zy', 0.1))
def test_JvecAdjoint_All(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.random(0.01), 'All', 0.1))
def test_JvecAdjoint_Imp(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.random(0.01), 'Imp', 0.1))
def test_JvecAdjoint_Res(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.random(0.01), 'Res', 0.1))
def test_JvecAdjoint_location_e_b(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.random(0.01), 'Res', 0.1, testLocations=True, testSingle=False))
def test_JvecAdjoint_location_single(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.random(0.01), 'Res', 0.1, testLocations=True, testSingle=True))
def test_JvecAdjoint_location_single_all(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.random(0.01), 'All', 0.1, testLocations=True, testSingle=True))
def test_JvecAdjoint_location_single_imp(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.random(0.01), 'Imp', 0.1, testLocations=True, testSingle=True))
def test_JvecAdjoint_location_single_tip(self):
self.assertTrue(JvecAdjointTest(nsem.utils.test_utils.random(0.01), 'Tip', 0.1, testLocations=True, testSingle=True)) |
def CmtyEvolutionJson(Json, sizesContV, cContV, edges):
return _snap.CmtyEvolutionJson(Json, sizesContV, cContV, edges) |
def execute(prob: Chunk, min_distance: float=15.0, threshold_rel: float=0.3):
if (prob is None):
print('get None probability map!')
return None
assert (threshold_rel > 0.0)
assert (threshold_rel < 1.0)
if np.issubdtype(prob.dtype, np.uint8):
prob = prob.astype(np.float32)
prob /= 255.0
prob = ProbabilityMap.from_chunk(prob)
(points, _) = prob.detect_points(min_distance=min_distance, threshold_rel=threshold_rel)
points = PointCloud(points, prob.voxel_size)
return points |
def _get_num_outputs_entry(name: str, opts: Dict[(str, Any)]) -> Tuple[(int, int)]:
from returnn.tensor import Tensor
data = Tensor(name, **opts)
return ((data.dim or (data.shape[(- 1)] if data.shape else 0)), len(data.shape)) |
class Integrator():
def __init__(self, logger: TensorboardLogger, distributed: bool=True):
self.values = {}
self.counts = {}
self.hooks = []
self.logger = logger
self.distributed = distributed
self.local_rank = torch.distributed.get_rank()
self.world_size = torch.distributed.get_world_size()
def add_tensor(self, key: str, tensor: torch.Tensor):
if (key not in self.values):
self.counts[key] = 1
if ((type(tensor) == float) or (type(tensor) == int)):
self.values[key] = tensor
else:
self.values[key] = tensor.mean().item()
else:
self.counts[key] += 1
if ((type(tensor) == float) or (type(tensor) == int)):
self.values[key] += tensor
else:
self.values[key] += tensor.mean().item()
def add_dict(self, tensor_dict: Dict[(str, torch.Tensor)]):
for (k, v) in tensor_dict.items():
self.add_tensor(k, v)
def add_hook(self, hook: Callable[([torch.Tensor], Tuple[(str, torch.Tensor)])]):
if (type(hook) == list):
self.hooks.extend(hook)
else:
self.hooks.append(hook)
def reset_except_hooks(self):
self.values = {}
self.counts = {}
def finalize(self, exp_id: str, prefix: str, it: int) -> None:
for hook in self.hooks:
(k, v) = hook(self.values)
self.add_tensor(k, v)
outputs = {}
for (k, v) in self.values.items():
if (k[:4] == 'hide'):
continue
avg = (v / self.counts[k])
if self.distributed:
avg = torch.tensor(avg).cuda()
torch.distributed.reduce(avg, dst=0)
if (self.local_rank == 0):
avg = (avg / self.world_size).cpu().item()
outputs[k] = avg
else:
outputs[k] = avg
if ((not self.distributed) or (self.local_rank == 0)):
self.logger.log_metrics(exp_id, prefix, outputs, it) |
def fricas_console():
from sage.repl.rich_output.display_manager import get_display_manager
if (not get_display_manager().is_in_terminal()):
raise RuntimeError('Can use the console only in the terminal. Try %%fricas magics instead.')
os.system('fricas -nox') |
def register_types_ns3_Hash(module):
root_module = module.get_root()
module.add_class('Implementation', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t const )', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t const )*', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t const )&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t const )', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t const )*', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t const )&', u'ns3::Hash::Hash64Function_ptr&')
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module) |
class TestHuggingFaceTokenizer():
TEST_PROMPT: str = 'The Center for Research on Foundation Models (CRFM) is an interdisciplinary initiative born out of the Stanford Institute for Human-Centered Artificial Intelligence (HAI) that aims to make fundamental advances in the study, development, and deployment of foundation models.'
def verify_get_tokenizer(tokenizer_name: str, expected_num_tokens: int, pretrained_model_name_or_path: Optional[str]=None):
wrapped_tokenizer = HuggingFaceTokenizer.get_tokenizer(helm_tokenizer_name=tokenizer_name, pretrained_model_name_or_path=(pretrained_model_name_or_path or tokenizer_name))
assert (tokenizer_name in HuggingFaceTokenizer._tokenizers), 'Tokenizer should be cached'
with wrapped_tokenizer as tokenizer:
assert (len(tokenizer.encode(TestHuggingFaceTokenizer.TEST_PROMPT)) == expected_num_tokens)
def test_get_tokenizer_gpt2(self):
TestHuggingFaceTokenizer.verify_get_tokenizer('huggingface/gpt2', 51, pretrained_model_name_or_path='gpt2')
def test_get_tokenizer_gptj(self):
TestHuggingFaceTokenizer.verify_get_tokenizer('EleutherAI/gpt-j-6B', 51)
def test_get_tokenizer_gptneox(self):
TestHuggingFaceTokenizer.verify_get_tokenizer('EleutherAI/gpt-neox-20b', 52)
def test_get_tokenizer_bloom(self):
TestHuggingFaceTokenizer.verify_get_tokenizer('bigscience/bloom', 51)
def test_get_tokenizer_t0pp(self):
TestHuggingFaceTokenizer.verify_get_tokenizer('bigscience/T0pp', 58)
def test_get_tokenizer_t511b(self):
TestHuggingFaceTokenizer.verify_get_tokenizer('google/t5-11b', 58, pretrained_model_name_or_path='t5-11b')
def test_get_tokenizer_ul2(self):
TestHuggingFaceTokenizer.verify_get_tokenizer('google/ul2', 58)
def test_get_santacoder(self):
TestHuggingFaceTokenizer.verify_get_tokenizer('bigcode/santacoder', 62)
def test_get_clip_tokenizer(self):
TestHuggingFaceTokenizer.verify_get_tokenizer('openai/clip-vit-large-patch14', 50)
def test_gpt2_tokenize_eos(self):
eos_token: str = '<|endoftext|>'
wrapped_tokenizer = HuggingFaceTokenizer.get_tokenizer('huggingface/gpt2', pretrained_model_name_or_path='gpt2')
with wrapped_tokenizer as tokenizer:
token_ids = tokenizer.encode(eos_token)
assert (singleton(token_ids) == 50256)
assert (tokenizer.decode(token_ids) == eos_token) |
def generate_shell(model_name: str, shape_list: List[List[int]], workspace_root: str, suf: str='pt'):
shape_str = ','.join(shape_list_to_str(shape_list))
sh = sh_template.format(model_name=model_name, shape_str=f'[{shape_str}]', suf=suf)
with open(os.path.join(workspace_root, f'convert.sh'), 'w') as w:
w.write(sh) |
.timeout(120)
.parametrize('model_name', list_models(exclude_filters=(EXCLUDE_FILTERS + ['dla*'])))
.parametrize('batch_size', [2])
def test_model_backward(model_name, batch_size):
model = create_model(model_name, pretrained=False, num_classes=42)
num_params = sum([x.numel() for x in model.parameters()])
model.eval()
input_size = model.default_cfg['input_size']
if any([(x > MAX_BWD_SIZE) for x in input_size]):
input_size = tuple([min(x, MAX_BWD_SIZE) for x in input_size])
inputs = torch.randn((batch_size, *input_size))
outputs = model(inputs)
outputs.mean().backward()
for (n, x) in model.named_parameters():
assert (x.grad is not None), f'No gradient for {n}'
num_grad = sum([x.grad.numel() for x in model.parameters() if (x.grad is not None)])
assert (outputs.shape[(- 1)] == 42)
assert (num_params == num_grad), 'Some parameters are missing gradients'
assert (not torch.isnan(outputs).any()), 'Output included NaNs' |
def get_prog():
try:
prog = os.path.basename(sys.argv[0])
if (prog in ('__main__.py', '-c')):
return ('%s -m pip' % sys.executable)
else:
return prog
except (AttributeError, TypeError, IndexError):
pass
return 'pip' |
def drn_a_50(pretrained=False, **kwargs):
model = DRN_A(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model |
class PKLDFDatasetForGen(Dataset):
def __init__(self, data_file: typing.Union[(str, Path)], in_memory: bool=False, split: str='train', train_ratio: float=1, train_data_file: str='250K_ddG_split/train_ddG.pkl', data_subset='full'):
data_file = Path(data_file)
if (not data_file.exists()):
raise FileNotFoundError(data_file)
df = pd.read_pickle(data_file)
if (train_ratio != 1):
shuffled_df = df.sort_index()
train_num_samples = int((len(shuffled_df) * train_ratio))
if (split == 'train'):
final_df = shuffled_df.iloc[:train_num_samples]
elif (split == 'valid'):
final_df = shuffled_df.iloc[train_num_samples:]
else:
final_df = df
else:
final_df = df
if (data_subset != 'full'):
ddG_sorted_final_df = final_df.sort_values(by='ddG', ascending=True)
train_subset_num_samples = int((data_subset * len(ddG_sorted_final_df)))
final_df = ddG_sorted_final_df.iloc[:train_subset_num_samples]
print('split: ', split)
print('data_file: ', data_file)
print('len(final_df): ', len(final_df))
self.df = final_df
num_examples = len(final_df)
self._num_examples = num_examples
if in_memory:
cache = ([None] * num_examples)
self._cache = cache
self._in_memory = in_memory
def __len__(self) -> int:
return self._num_examples
def __getitem__(self, index: int):
if (not (0 <= index < self._num_examples)):
raise IndexError(index)
if (self._in_memory and (self._cache[index] is not None)):
item = self._cache[index]
else:
row = self.df.iloc[index]
item = {}
item['ddG'] = row['ddG']
item['input_ids'] = row['MT_seq']
item['labels'] = row['MT_seq']
item['id'] = str(index)
if self._in_memory:
self._cache[index] = item
return item |
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument('-n', '--name', type=str, const=True, default='', nargs='?', help='postfix for logdir')
parser.add_argument('-r', '--resume', type=str, const=True, default='', nargs='?', help='resume from logdir or checkpoint in logdir')
parser.add_argument('-b', '--base', nargs='*', metavar='base_config.yaml', help='paths to base configs. Loaded from left-to-right. Parameters can be overwritten or added with command-line options of the form `--key value`.', default=list())
parser.add_argument('-t', '--train', type=str2bool, const=True, default=False, nargs='?', help='train')
parser.add_argument('--no-test', type=str2bool, const=True, default=False, nargs='?', help='disable test')
parser.add_argument('-p', '--project', help='name of new or path to existing project')
parser.add_argument('-d', '--debug', type=str2bool, nargs='?', const=True, default=False, help='enable post-mortem debugging')
parser.add_argument('-s', '--seed', type=int, default=23, help='seed for seed_everything')
parser.add_argument('-f', '--postfix', type=str, default='', help='post-postfix for default name')
parser.add_argument('-l', '--logdir', type=str, default='logs', help='directory for logging dat shit')
parser.add_argument('--scale_lr', type=str2bool, nargs='?', const=True, default=True, help='scale base-lr by ngpu * batch_size * n_accumulate')
return parser |
def load_model_from_config(config, sd):
model = instantiate_from_config(config)
model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
return model |
def build_storm(model, base_learning_rate, parameters=None, max_gradient_norm=None, allow_lr_injection=False, **kwargs):
storm_optimizer = StormOptimizer(lr=base_learning_rate, **kwargs)
return _build(model, storm_optimizer, max_gradient_norm=max_gradient_norm, allow_lr_injection=allow_lr_injection) |
def calculate_video_results(output_buffer, video_id, test_results, class_names):
video_outputs = torch.stack(output_buffer)
average_scores = torch.mean(video_outputs, dim=0)
(sorted_scores, locs) = torch.topk(average_scores, k=10)
video_results = []
for i in range(sorted_scores.size(0)):
video_results.append({'label': class_names[locs[i]], 'score': sorted_scores[i]})
test_results['results'][video_id] = video_results |
class DCVAE():
def __init__(self, input_shape=(45, 45, 2), act='sigmoid', KernelDim=(2, 2, 3, 3), latent_dim=200, opt=RMSprop(), isTerminal=False, filepath=None, multi_GPU=0, hidden_dim=1024, filters=(2, 64, 64, 64), strides=(1, 2, 1, 1), dropout=0, epochs_drop=20):
self.epochs_drop = epochs_drop
self.act = act
self.multi_GPU = multi_GPU
self.opt = opt
self.KernelDim = KernelDim
self.model = None
self.input_shape = input_shape
self.latent_dim = latent_dim
self.hidden_dim = hidden_dim
self.filters = filters
self.strides = strides
self.dropout = dropout
self.earlystopper = EarlyStopping(patience=10, verbose=0)
self.reduce_lr = ReduceLROnPlateau(factor=0.5, patience=5, min_lr=5e-07, verbose=1)
self.scheduler = True
self.learningRateScheduler = LearningRateScheduler(self.step_decay, verbose=0)
self.filepath = filepath
if (self.filepath is None):
self.ModelCheck = []
else:
self.ModelCheck = [ModelCheckpoint(self.filepath, verbose=0, save_best_only=True, save_weights_only=True, period=1)]
if isTerminal:
nt = []
else:
nt = [TQDMNotebookCallback()]
if self.scheduler:
self.listCall = (([self.earlystopper, self.reduce_lr, self.learningRateScheduler] + self.ModelCheck) + nt)
else:
self.listCall = (([self.earlystopper, self.reduce_lr] + self.ModelCheck) + nt)
def step_decay(self, epoch):
self.initial_lrate = K.eval(self.model.optimizer.lr)
drop = 0.8
if (((1 + epoch) % self.epochs_drop) == 0):
lrate = (self.initial_lrate * drop)
else:
lrate = self.initial_lrate
return lrate
def acc_pred(self, y_true, y_pred):
return K.cast(K.equal(K.argmax(y_true, axis=(- 1)), K.argmax(y_pred, axis=(- 1))), K.floatx())
def fit(self, x_train, x_v=None, num_epochs=1, batch_size=100, val_split=None, reset_model=True, verbose=0):
self.batch_size = batch_size
self.num_epochs = num_epochs
if reset_model:
self._set_model()
if (self.multi_GPU == 0):
self.model.compile(optimizer=self.opt, loss=self._vae_loss, metrics=[self.acc_pred])
if (val_split is None):
self.history = self.model.fit(x_train, x_train, epochs=self.num_epochs, batch_size=self.batch_size, verbose=verbose, shuffle=True, callbacks=self.listCall, validation_data=(x_v, x_v))
else:
self.history = self.model.fit(x_train, x_train, epochs=self.num_epochs, batch_size=self.batch_size, verbose=verbose, shuffle=True, callbacks=self.listCall, validation_split=val_split)
else:
self.modelGPU = multi_gpu_model(self.model, gpus=self.multi_GPU)
self.modelGPU.compile(optimizer=self.opt, loss=self._vae_loss, metrics=[self.acc_pred])
if (val_split is None):
self.history = self.modelGPU.fit(x_train, x_train, epochs=self.num_epochs, batch_size=self.batch_size, verbose=verbose, shuffle=True, callbacks=self.listCall, validation_data=(x_v, x_v))
else:
self.history = self.modelGPU.fit(x_train, x_train, epochs=self.num_epochs, batch_size=self.batch_size, verbose=verbose, shuffle=True, callbacks=self.listCall, validation_split=val_split)
if (self.filepath is not None):
self.model.load_weights(self.filepath)
def fit_generator(self, x_train, num_epochs=1, batch_size=100, reset_model=True, verbose=0, steps_per_epoch=100, val_set=None, validation_steps=None):
self.batch_size = batch_size
self.num_epochs = num_epochs
if reset_model:
self._set_model()
if (self.multi_GPU == 0):
self.model.compile(optimizer=self.opt, loss=self._vae_loss, metrics=[self.acc_pred])
self.history = self.model.fit_generator(x_train, steps_per_epoch=steps_per_epoch, epochs=self.num_epochs, verbose=verbose, validation_data=val_set, validation_steps=validation_steps, callbacks=self.listCall, workers=0)
else:
print("Function 'multi_gpu_model' not found")
if (self.filepath is not None):
self.model.load_weights(self.filepath)
def _set_model(self):
print('Setting up model...')
inputs = Input(batch_shape=((None,) + self.input_shape))
self.inputs = inputs
for i in range(len(self.filters)):
if (i == 0):
Q = Conv2D(self.filters[i], (self.KernelDim[i], self.KernelDim[i]), strides=(self.strides[i], self.strides[i]), padding='same', activation='relu')(inputs)
else:
Q = Conv2D(self.filters[i], (self.KernelDim[i], self.KernelDim[i]), padding='same', activation='relu', strides=(self.strides[i], self.strides[i]))(Q)
Q_4 = Flatten()
Q_5 = Dense(self.hidden_dim, activation='relu')
Q_6 = Dropout(self.dropout)
Q_z_mean = Dense(self.latent_dim)
Q_z_log_var = Dense(self.latent_dim)
flat = Q_4(Q)
dp = Q_5(flat)
hidden = Q_6(dp)
z_mean = Q_z_mean(hidden)
z_log_var = Q_z_log_var(hidden)
self.encoder = Model(inputs, z_mean)
encoding = Lambda(self._sampling_normal, output_shape=(self.latent_dim,))([z_mean, z_log_var])
self.encoding = encoding
out_shape = (int(np.ceil((self.input_shape[0] / np.prod(self.strides)))), int(np.ceil((self.input_shape[1] / np.prod(self.strides)))), self.filters[(- 1)])
G_0 = Dense(self.hidden_dim, activation='relu')
G_d = Dropout(self.dropout)
G_1 = Dense(np.prod(out_shape), activation='relu')
G_2 = Reshape(out_shape)
G = []
for i in range(len(self.filters)):
if (i == 0):
G_ = Conv2DTranspose(self.filters[(- 1)], (self.KernelDim[(- 1)], self.KernelDim[(- 1)]), strides=(self.strides[(- 1)], self.strides[(- 1)]), padding='same', activation='relu')
else:
G_ = Conv2DTranspose(self.filters[((- i) - 1)], (self.KernelDim[((- i) - 1)], self.KernelDim[((- i) - 1)]), padding='same', activation='relu', strides=(self.strides[((- i) - 1)], self.strides[((- i) - 1)]))
G.append(G_)
G_5_ = BilinearUpsampling(output_size=(self.input_shape[0], self.input_shape[1]))
G_6 = Conv2D(self.input_shape[2], (2, 2), padding='same', strides=(1, 1), activation=self.act, name='generated')
x = G_0(encoding)
x = G_d(x)
x = G_1(x)
x = G_2(x)
for i in range(len(G)):
x = G[i](x)
x = G_5_(x)
generated = G_6(x)
self.model = Model(inputs, generated)
inputs_G = Input(batch_shape=(None, self.latent_dim))
x = G_0(inputs_G)
x = G_1(x)
x = G_2(x)
for i in range(len(self.filters)):
x = G[i](x)
x = G_5_(x)
generated_G = G_6(x)
self.generator = Model(inputs_G, generated_G)
self.z_mean = z_mean
self.z_log_var = z_log_var
self.model.compile(optimizer=self.opt, loss=self._vae_loss)
self.generator.compile(optimizer=self.opt, loss='mse')
self.model.summary()
print('Completed model setup.')
def Encoder(self, x_test):
return self.encoder.predict(x_test)
def Decoder(self, x_test, binary=False):
if binary:
return np.argmax(self.model.predict(x_test), axis=(- 1))
return self.model.predict(x_test)
def generate(self, number_latent_sample=20, std=1, binary=False):
latent_sample = np.random.normal(0, std, size=(number_latent_sample, self.latent_dim))
if binary:
return np.argmax(self.generator.predict(latent_sample), axis=(- 1))
return self.generator.predict(latent_sample)
def _vae_loss(self, x, x_generated):
x = K.flatten(x)
x_generated = K.flatten(x_generated)
reconstruction_loss = ((self.input_shape[0] * self.input_shape[1]) * binary_crossentropy(x, x_generated))
kl_normal_loss = kl_normal(self.z_mean, self.z_log_var)
kl_disc_loss = 0
return ((reconstruction_loss + kl_normal_loss) + kl_disc_loss)
def _sampling_normal(self, args):
(z_mean, z_log_var) = args
return sampling_normal(z_mean, z_log_var, (None, self.latent_dim)) |
def _get_ade_instances_meta():
thing_ids = [k['id'] for k in ADE_CATEGORIES]
assert (len(thing_ids) == 100), len(thing_ids)
thing_dataset_id_to_contiguous_id = {k: i for (i, k) in enumerate(thing_ids)}
thing_classes = [k['name'] for k in ADE_CATEGORIES]
ret = {'thing_dataset_id_to_contiguous_id': thing_dataset_id_to_contiguous_id, 'thing_classes': thing_classes}
return ret |
class Pickup_Soup(BaseScriptPeriod):
def __init__(self, random_dish=True, random_soup=True):
super().__init__(period_name='Pickup_Soup')
self.random_dish = random_dish
self.random_soup = random_soup
self.__stage = 1
self.__current_period = Pickup_Object(obj='dish', terrain_type='XOTPDS', random_put=True, random_pos=self.random_dish)
def reset(self, mdp, state, player_idx):
self.__stage = 1
if utils.exists(mdp, state, player_idx, terrain_type='X', obj='soup'):
self.__current_period = Pickup_Object(obj='soup', terrain_type='XP', random_put=True, random_pos=self.random_soup)
else:
self.__current_period = Pickup_Object(obj='dish', terrain_type='XOTPDS', random_put=True, random_pos=self.random_dish)
def step(self, mdp, state, player_idx):
player = state.players[player_idx]
if (self.__stage == 1):
if self.__current_period.done(mdp, state, player_idx):
assert (player.has_object() and (player.get_object().name == 'dish'))
self.__stage = 2
self.__current_period = Put_Object(terrain_type='P', random_put=self.random_soup, obj=['soup', 'cooking_soup'])
else:
return self.__current_period.step(mdp, state, player_idx)
return self.__current_period.step(mdp, state, player_idx)
def done(self, mdp, state, player_idx):
player = state.players[player_idx]
return (player.has_object() and (player.get_object().name == 'soup')) |
def compute_sst2_metrics(result_dict, labels, predictions):
all_true = []
all_pred = []
all_correct = 0
all_total = 0
for (true, pred) in zip(labels, predictions):
l = true.split('<|sentiment|>')[(- 1)].split('<|endofsentiment|>')[0].strip()
p = pred.split('<|sentiment|>')[(- 1)].split('<|endofsentiment|>')[0].strip()
all_true.append(l)
all_pred.append(p)
all_total += 1
if (l == p):
all_correct += 1
all_acc = (all_correct / all_total)
acc = accuracy_score(all_true, all_pred)
assert (acc == all_acc)
(prec, recall, fscore, _) = precision_recall_fscore_support(all_true, all_pred, average='macro')
result_dict.update({'sentiment_acc': all_acc, 'sentiment_prec': prec, 'sentiment_recall': recall, 'sentiment_fscorce': fscore})
return result_dict |
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, rfp_inplanes=None, sac=None, **kwargs):
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
assert ((sac is None) or isinstance(sac, dict))
self.sac = sac
self.with_sac = (sac is not None)
if self.with_sac:
self.conv2 = build_conv_layer(self.sac, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)
self.rfp_inplanes = rfp_inplanes
if self.rfp_inplanes:
self.rfp_conv = build_conv_layer(None, self.rfp_inplanes, (planes * self.expansion), 1, stride=1, bias=True)
self.init_weights()
def init_weights(self):
if self.rfp_inplanes:
constant_init(self.rfp_conv, 0)
def rfp_forward(self, x, rfp_feat):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
if self.rfp_inplanes:
rfp_feat = self.rfp_conv(rfp_feat)
out = (out + rfp_feat)
out = self.relu(out)
return out |
def test_changestats_comparison():
print('testing changestats comparison...')
assert is_same_changestat(changeContagion, changeContagion)
assert (not is_same_changestat(changeContagion, changeLogContagion))
assert is_same_changestat(partial(changeoOc, 'age'), partial(changeoOc, 'age'))
assert (not is_same_changestat(partial(changeoOc, 'age'), partial(changeoOc, 'height')))
assert is_same_changestat(partial(changeGWActivity, log(2.0)), partial(changeGWActivity, log(2.0)))
assert (not is_same_changestat(partial(changeGWActivity, log(2.0)), partial(changeGWActivity, 2.0)))
assert is_same_changestat(partial(changeBipartiteActivity, MODE_A), partial(changeBipartiteActivity, MODE_A))
assert (not is_same_changestat(partial(changeBipartiteActivity, MODE_A), partial(changeBipartiteActivity, MODE_B)))
assert (not is_same_changestat(partial(changeBipartiteActivity, MODE_A), partial(changeBipartiteDensity, MODE_A)))
print('OK') |
def fidelity(teacher, student, X):
y_target = teacher(X)
y_pred = student.predict(X)
return accuracy(y_target, y_pred) |
def update_config(config, args):
_update_config_from_file(config, args.cfg)
config.defrost()
if args.opts:
config.merge_from_list(args.opts)
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
if args.data_path:
config.DATA.DATA_PATH = args.data_path
if args.zip:
config.DATA.ZIP_MODE = True
if args.cache_mode:
config.DATA.CACHE_MODE = args.cache_mode
if args.pretrained:
config.MODEL.PRETRAINED = args.pretrained
if args.resume:
config.MODEL.RESUME = args.resume
if args.accumulation_steps:
config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps
if args.use_checkpoint:
config.TRAIN.USE_CHECKPOINT = True
if args.amp_opt_level:
config.AMP_OPT_LEVEL = args.amp_opt_level
if args.output:
config.OUTPUT = args.output
if args.tag:
config.TAG = args.tag
if args.eval:
config.EVAL_MODE = True
if args.throughput:
config.THROUGHPUT_MODE = True
config.LOCAL_RANK = args.local_rank
config.OUTPUT = os.path.join(config.OUTPUT, config.MODEL.NAME, config.TAG)
config.freeze() |
def _eval(ind):
res = illumination_rastrigin_normalised(ind, nb_features=2)
(fitness, features) = res
fitness[0] = (0.0 if (fitness[0] < 0.9) else fitness[0])
return (fitness, features) |
class StateManager():
def __init__(self, entity_manager: EntityManager, task_config: TaskConfig, entity_function_path=None):
self.task_config = task_config
self.entity_manager = entity_manager
self.addtional_ef = None
if entity_function_path:
spec = importlib.util.spec_from_file_location('additional_entity_function', entity_function_path)
self.addtional_ef = importlib.util.module_from_spec(spec)
spec.loader.exec_module(self.addtional_ef)
self.reset()
def reset(self):
self.task_turns = defaultdict(int)
self.exceed_max_turn_flag = False
def update_and_get_states(self, ctx):
states = ctx.cur_states
tree_manager = ctx.tree_manager
states.cur_task = tree_manager.cur_task
states.new_task = None
states.cur_entity_name = tree_manager.cur_entity
log.info(f'cur_entity_name = {states.cur_entity_name}')
states.cur_entity_types = self.entity_manager.get_entity_types(states.cur_entity_name)
if (not ((not states.cur_task) and states.confirm_continue)):
states.confirm_continue = tree_manager.finish
if (states.cur_task and states.cur_entity_name and (states.cur_task in self.task_config) and (states.cur_entity_name in self.task_config[states.cur_task].entities)):
task_entity_config = self.task_config[states.cur_task].entities[states.cur_entity_name]
states.need_confirm_entity = task_entity_config.confirm
states.need_confirm_retrieved_entity = task_entity_config.confirm_retrieved
else:
states.need_confirm_entity = TaskEntity._OPTIONAL_ATTRIBUTES['confirm']
states.need_confirm_retrieved_entity = TaskEntity._OPTIONAL_ATTRIBUTES['confirm_retrieved']
states.entity_methods = self.entity_manager.get_extraction_methods(states.cur_entity_name)
states.task_stack = tree_manager.task_stack
states.exceed_max_turn = self.exceed_max_turn_flag
states.prev_tasks = tree_manager.prev_tasks
states.prev_tasks_success = tree_manager.prev_tasks_success
states.prev_task_finished = tree_manager.prev_task_finished
if (states.prev_task_finished or (states.exceed_max_turn and states.prev_tasks)):
self._task_finish_function(ctx)
self.exceed_max_turn_flag = False
if (states.spell_entity != states.cur_entity_name):
states.spell_entity = None
states.agent_action_type = (tree_manager.cur_node.tag if tree_manager.cur_node else None)
def _get_entity_or_task_function(self, func_name):
func = None
url = None
if func_name:
try:
func = None
if self.addtional_ef:
func = getattr(self.addtional_ef, func_name, None)
if (not func):
func = getattr(ef, func_name)
except AttributeError:
url = func_name
return (func, url)
def _execute_entity_or_task_function(ctx, func, url):
try:
if url:
res = entity_api_call(url, ctx.collected_entities, **ctx.cur_states.to_dictionary())
else:
res = func(ctx.collected_entities, **ctx.cur_states.to_dictionary())
except TimeoutError as e:
log.warning(f'function call timeout: {e}')
res = resp(False, 'Service time out')
except Exception as e:
log.warning(f'function call exception: {e}')
res = resp(False, "We couldn't handle your request")
return res
def _task_finish_function(self, ctx):
if ctx.cur_states.prev_tasks_success[0]:
func_name = self.task_config[ctx.cur_states.prev_tasks[0]].task_finish_function
if func_name:
(func, url) = self._get_entity_or_task_function(func_name)
res = self._execute_entity_or_task_function(ctx, func, url)
if (not res['success']):
ctx.cur_states.prev_tasks_success[0] = False
else:
ctx.cur_states.prev_task_finish_func_response = res['msg']
def receive_info_from_policy(self, ctx):
states = ctx.cur_states
tree_manager = ctx.tree_manager
if tree_manager.prev_tasks:
for prev_task in tree_manager.prev_tasks:
self.task_turns[prev_task] = 0
tree_manager.reset_prev_task()
if states.cur_task:
self.task_turns[states.cur_task] += 1
if states.new_task:
self.new_task(states.new_task, tree_manager)
elif ctx.update_entity['entity']:
self.leaf_node_handler(ctx)
if (states.cur_task and (self.task_turns[states.cur_task] > self.task_config[states.cur_task].max_turns)):
self.force_cur_task_finish(tree_manager)
self.exceed_max_turn_flag = True
def new_task(self, task_name, tree_manager):
tree_manager.set_task(task_name)
tree_manager.traverse()
def leaf_node_handler(self, ctx):
states = ctx.cur_states
tree_manager = ctx.tree_manager
assert tree_manager.cur_task
assert tree_manager.cur_node
assert tree_manager.cur_entity
assert states.agent_action_type
assert (tree_manager.cur_entity == ctx.update_entity['entity'])
func_name = self.task_config[states.cur_task].entities[states.cur_entity_name]['function']
(func, url) = self._get_entity_or_task_function(func_name)
new_entity_name = ctx.update_entity['entity']
new_entity_value = extract_value_from_entity(ctx.update_entity['value'])
ctx.collected_entities[new_entity_name] = new_entity_value
log.info(f'collected_entities = {ctx.collected_entities}')
log.info(f'func_name = {func_name}')
log.info(f'leaf_node_handler() = {ctx.update_entity}')
if (ctx.update_entity['value'] == 'WRONG INFO!'):
states.last_wrong_entity = tree_manager.cur_entity
states.last_verified_entity = None
states.last_verified_task = None
tree_manager.update_entity(ctx.update_entity['value'], status=False)
tree_manager.traverse()
elif (states.agent_action_type == tree_manager.task_tree.simple):
states.simple_resp = extract_display_value_from_entity(ctx.update_entity['value'])
tree_manager.update_entity(states.simple_resp, status=True)
tree_manager.traverse()
else:
res = self._execute_entity_or_task_function(ctx, func, url)
states.last_wrong_entity = None
if (states.agent_action_type == tree_manager.task_tree.verify):
verify_status = res['success']
states.verify_resp = res['msg']
if verify_status:
tree_manager.update_entity(ctx.update_entity['value'], status=True)
states.last_verified_entity = tree_manager.cur_entity
states.last_verified_task = tree_manager.cur_task
states.last_wrong_entity = None
tree_manager.traverse()
else:
states.last_wrong_entity = tree_manager.cur_entity
states.last_verified_entity = None
states.last_verified_task = None
if ((not states.spell_entity) and ('spelling' in states.entity_methods) and (not ctx.bot_config.text_bot)):
states.spell_entity = states.cur_entity_name
else:
tree_manager.update_entity(ctx.update_entity['value'], status=False)
tree_manager.traverse()
elif (states.agent_action_type == tree_manager.task_tree.inform):
states.inform_resp = res['msg']
tree_manager.update_entity(states.inform_resp, status=res['success'])
tree_manager.traverse()
elif (states.agent_action_type == tree_manager.task_tree.update):
states.update_resp = res['msg']
tree_manager.update_entity(states.update_resp, status=res['success'])
tree_manager.traverse()
elif (states.agent_action_type == tree_manager.task_tree.api):
states.api_resp = res['msg']
tree_manager.update_entity(states.api_resp, status=res['success'])
tree_manager.traverse()
elif (states.agent_action_type == tree_manager.task_tree.query):
states.query_resp = res['msg']
tree_manager.update_entity(states.query_resp, status=res['success'])
tree_manager.traverse()
elif (states.agent_action_type == tree_manager.task_tree.insert):
states.insert_resp = res['msg']
tree_manager.update_entity(states.insert_resp, status=res['success'])
tree_manager.traverse()
elif (states.agent_action_type == tree_manager.task_tree.delete):
states.delete_resp = res['msg']
tree_manager.update_entity(states.delete_resp, status=res['success'])
tree_manager.traverse()
def force_cur_task_finish(self, tree_manager):
tree_manager.force_finish_task() |
def get_learning_rate(optim, name=None):
if (name is None):
return optim.param_groups[0]['lr'] |
def test_suppress_warnings_forwarding():
def warn_other_module():
def warn(arr):
warnings.warn('Some warning', stacklevel=2)
return arr
np.apply_along_axis(warn, 0, [0])
with suppress_warnings() as sup:
sup.record()
with suppress_warnings('always'):
for i in range(2):
warnings.warn('Some warning')
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings('location'):
for i in range(2):
warnings.warn('Some warning')
warnings.warn('Some warning')
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings('module'):
for i in range(2):
warnings.warn('Some warning')
warnings.warn('Some warning')
warn_other_module()
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings('once'):
for i in range(2):
warnings.warn('Some warning')
warnings.warn('Some other warning')
warn_other_module()
assert_equal(len(sup.log), 2) |
def load_subtensor(ndata, seeds, labels, input_nodes, device):
_load = (lambda k: th.IntTensor(np.array(ndata[k][input_nodes])))
input_text = {}
for k in ndata.keys():
if (k != 'labels'):
input_text[k] = _load(k).to(device)
return (input_text, labels[seeds].to(device)) |
_function_dispatch(_all_dispatcher)
def all(a, axis=None, out=None, keepdims=np._NoValue):
return _wrapreduction(a, np.logical_and, 'all', axis, None, out, keepdims=keepdims) |
def query_virtuoso(q):
endpoint = virtuoso_address
store = sparqlstore.SPARQLUpdateStore(endpoint)
gs = rdflib.ConjunctiveGraph(store)
gs.open((endpoint, endpoint))
gs1 = gs.get_context(rdflib.URIRef(virtuoso_graph_uri))
res = gs1.query(q)
return res |
def arg_parse():
parser = argparse.ArgumentParser(description='MMSB arguments.')
parser.add_argument('--dataset', dest='dataset', help='Input dataset.')
parser.add_argument('--K', dest='K', type=int, help='Number of blocks.')
parser.add_argument('--samples-per-G', dest='samples', type=int, help='Number of samples for every graph.')
parser.set_defaults(dataset='community', K=4, samples=1)
return parser.parse_args() |
def replace_ImageToTensor(pipelines):
pipelines = copy.deepcopy(pipelines)
for (i, pipeline) in enumerate(pipelines):
if (pipeline['type'] == 'MultiScaleFlipAug'):
assert ('transforms' in pipeline)
pipeline['transforms'] = replace_ImageToTensor(pipeline['transforms'])
elif (pipeline['type'] == 'ImageToTensor'):
warnings.warn('"ImageToTensor" pipeline is replaced by "DefaultFormatBundle" for batch inference. It is recommended to manually replace it in the test data pipeline in your config file.', UserWarning)
pipelines[i] = {'type': 'DefaultFormatBundle'}
return pipelines |
def test_record_fields_int32():
t = RecordType([NumpyType('int32')], ['one'])
assert (str(ak.types.from_datashape(str(t), highlevel=False)) == str(t)) |
(Output('clustering-summary', 'children'), [Input('cluster-attribute-table', 'data')])
def clustering_summary(data):
if (len(data) == 0):
return html.Div()
result_table = log_clustering.result_table
total_loglines = result_table.shape[0]
total_num_cluster = len(result_table['cluster_id'].unique())
return html.Div([html.P('Total Number Of Loglines: {}'.format(total_loglines)), html.P('Total Number Of Log Clusters: {}'.format(total_num_cluster))]) |
def add_visualizer_callback(callbacks: list[Callback], config: (DictConfig | ListConfig)) -> None:
assert isinstance(config, (DictConfig, Namespace))
if isinstance(config, DictConfig):
if ((('log_images_to' in config.project.keys()) and (len(config.project.log_images_to) > 0)) or (('log_images_to' in config.logging.keys()) and (len(config.logging.log_images_to) > 0))):
warnings.warn('log_images_to parameter is deprecated and will be removed in version 0.4.0 Please use the visualization.log_images and visualization.save_images parameters instead.')
if ('visualization' not in config.keys()):
config['visualization'] = dict(log_images=False, save_images=False, show_image=False, image_save_path=None)
if ('local' in config.project.log_images_to):
config.visualization['save_images'] = True
if (('local' not in config.project.log_images_to) or (len(config.project.log_images_to) > 1)):
config.visualization['log_images'] = True
config.visualization.task = config.dataset.task
config.visualization.inputs_are_normalized = (not (config.model.normalization_method == 'none'))
else:
config.visualization.task = config.data.init_args.task
config.visualization.inputs_are_normalized = (not (config.post_processing.normalization_method == 'none'))
if (config.visualization.log_images or config.visualization.save_images or config.visualization.show_images):
image_save_path = (config.visualization.image_save_path or (config.project.path + '/images'))
for callback in (ImageVisualizerCallback, MetricVisualizerCallback):
callbacks.append(callback(task=config.visualization.task, mode=config.visualization.mode, image_save_path=image_save_path, inputs_are_normalized=config.visualization.inputs_are_normalized, show_images=config.visualization.show_images, log_images=config.visualization.log_images, save_images=config.visualization.save_images)) |
class Helper(HelperBase):
def __init__(self):
self.name = 'kerashelper'
super().__init__()
def increment_average(self, model, model_next, num_examples, total_examples):
w = (num_examples / total_examples)
weights = []
for i in range(len(model)):
weights.append(((w * model_next[i]) + ((1 - w) * model[i])))
return weights
def increment_average_add(self, model, model_next, num_examples, total_examples):
w = np.add(model, ((num_examples * (np.array(model_next) - np.array(model))) / total_examples))
return w
def save(self, weights, path=None):
if (not path):
path = self.get_tmp_path()
weights_dict = {}
for (i, w) in enumerate(weights):
weights_dict[str(i)] = w
np.savez_compressed(path, **weights_dict)
return path
def load(self, fh):
a = np.load(fh)
weights = []
for i in range(len(a.files)):
weights.append(a[str(i)])
return weights |
def test_observers_clear(short_test_case):
tracer = ExecutionTracer()
tracer.current_thread_identifier = threading.current_thread().ident
executor = TestCaseExecutor(tracer)
observer = MagicMock()
executor.add_observer(observer)
assert (executor._observers == [observer])
executor.clear_observers()
assert (executor._observers == []) |
def _load_checkpoint(args, model):
if (args.pretrained_model == 'swin-b-1k'):
path = os.path.join(ROOT_DIR, '../checkpoints/swin_base_patch4_window7_224.pth')
elif (args.pretrained_model == 'swin-b-22k'):
path = os.path.join(ROOT_DIR, '../checkpoints/swin_base_patch4_window7_224_22k.pth')
else:
raise NotImplementedError
checkpoint = torch.load(path, map_location='cpu')
if ('module' in checkpoint):
checkpoint = checkpoint['module']
if ('model' in checkpoint):
model.load_state_dict(checkpoint['model'])
elif ('state_dict' in checkpoint):
model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint)
return model |
def _transform_month(result_str: str, month_token: str, month: int) -> str:
result = deepcopy(result_str)
if (month_token != ''):
if (month == (- 1)):
if (len(month_token) == 3):
result = result.replace(month_token, '---')
elif (len(month_token) == 5):
result = result.replace(month_token, '-----')
elif (len(month_token) == 2):
result = result.replace(month_token, '--')
elif (len(month_token) == 1):
result = result.replace(month_token, '-')
elif (len(month_token) == 2):
if (month < 10):
result = result.replace(month_token, f'{0}{month}', 1)
else:
result = result.replace(month_token, str(month), 1)
elif (len(month_token) == 3):
result = result.replace(month_token, TEXT_MONTHS[(month - 1)][0], 1)
elif (len(month_token) == 5):
result = result.replace(month_token, TEXT_MONTHS[(month - 1)][1], 1)
else:
result = result.replace(month_token, str(month), 1)
return result |
def get_probabilities(lps, references, mapping):
min_prob = np.exp(np.min(list(lps.values())))
remaining_prob = max(0, (1 - sum([np.exp(v) for v in lps.values()])))
(dist, misses) = ([], [])
for ref in references:
prefix = mapping[ref]
values = [lps[key] for key in [f' {prefix}', prefix] if (key in lps)]
misses.append((len(values) == 0))
dist.append((np.max(values) if len(values) else None))
Nmisses = sum(misses)
if (Nmisses > 0):
miss_value = np.log(min(min_prob, (remaining_prob / Nmisses)))
dist = [(d if (d is not None) else miss_value) for d in dist]
probs_unnorm = np.array([np.exp(v) for v in dist])
res = {'logprobs': dist, 'probs_unnorm': probs_unnorm, 'probs_norm': (probs_unnorm / np.sum(probs_unnorm)), 'misses': misses}
return res |
def glibc_version_string():
return (glibc_version_string_confstr() or glibc_version_string_ctypes()) |
def test_columnar_convert_selected_columns_missing():
converter = ColumnarConverter(name='some_name', default_type='foo', type_column=None, column_defaults={}, selected_columns={'before': 'after', 'same': 'same'}, transform_columns={})
with pytest.raises(ValueError, match="some_name\\['x'\\]: expected 'before', 'same' columns, found:"):
converter.convert({'x': _EMPTY_DF}) |
def test_patchset_get_patch_by_values(patchset):
assert patchset[(2100, 800)]
assert patchset[(2100, 800)]
assert patchset[[2100, 800]] |
def get_keras_tpc() -> tp.TargetPlatformCapabilities:
imx500_pot_tpc_tp_model = get_tp_model()
return generate_keras_tpc(name='imx500_pot_tpc_keras_tpc', tp_model=imx500_pot_tpc_tp_model) |
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count) |
class Container():
def __init__(self, to_render: Dict[(str, Any)], visual_type: str, cfg: Config) -> None:
self.context = Context(**to_render)
setattr(self.context, 'rnd', random.randint(0, 9999))
if (visual_type in GRID_VISUAL_TYPES):
self.template_base = ENV_LOADER.get_template('grid_base.html')
elif (visual_type in TAB_VISUAL_TYPES):
if (visual_type == 'missing_impact'):
setattr(self.context, 'highlight', False)
else:
setattr(self.context, 'highlight', cfg.insight.enable)
if to_render.get('tabledata'):
self.context.meta.insert(0, 'Stats')
if to_render.get('value_table'):
self.context.meta.append('Value Table')
if (visual_type == 'correlation_impact'):
self.template_base = ENV_LOADER.get_template('tab_base_corr.html')
else:
self.template_base = ENV_LOADER.get_template('tab_base.html')
else:
raise TypeError(f'Unsupported Visual Type: {visual_type}.')
def save(self, filename: str) -> None:
with open(filename, 'w', encoding='utf-8') as f:
f.write(self.template_base.render(context=self.context))
def _repr_html_(self) -> str:
output_html = self.template_base.render(context=self.context)
return output_html
def show(self) -> None:
if (not is_notebook()):
print("The plot will not show in a notebook environment, please try 'show_browser' if you want to open it in browser", file=sys.stderr)
try:
from IPython.display import HTML, display
display(HTML(self._repr_html_()))
except ImportError:
pass
def show_browser(self) -> None:
with NamedTemporaryFile(suffix='.html', delete=False) as tmpf:
pass
with open(tmpf.name, 'w', encoding='utf-8') as file:
file.write(self.template_base.render(context=self.context))
webbrowser.open_new_tab(f'file://{tmpf.name}') |
def test__rollback_changes_end(default_test_case):
default_test_case.add_statement(stmt.IntPrimitiveStatement(default_test_case, 5))
default_test_case.add_statement(stmt.IntPrimitiveStatement(default_test_case, 10))
default_test_case.add_statement(stmt.IntPrimitiveStatement(default_test_case, 15))
cloned = default_test_case.clone()
default_test_case.add_statement(stmt.FloatPrimitiveStatement(default_test_case, 7.5), 3)
assert (cloned != default_test_case)
tf.TestFactory._rollback_changes(default_test_case, cloned.size(), 3)
assert (cloned == default_test_case) |
class AmazonReviewPolarity(XiangZhangDataset):
dirname = 'amazon_review_polarity_csv'
columns = ['rating', 'subject', 'body'] |
def get_dataset(eval_dataset, data_path, split, audio_embs):
if (eval_dataset == 'mtat'):
dataset = MTAT_Dataset(data_path, split, audio_embs)
elif (eval_dataset == 'gtzan'):
dataset = GTZAN_Dataset(data_path, split, audio_embs)
elif (eval_dataset == 'fma'):
dataset = FMA_Dataset(data_path, split, audio_embs)
elif (eval_dataset == 'kvt'):
dataset = KVT_Dataset(data_path, split, audio_embs)
elif (eval_dataset == 'openmic'):
dataset = OPENMIC_Dataset(data_path, split, audio_embs)
elif (eval_dataset == 'emotify'):
dataset = EMOTIFY_Dataset(data_path, split, audio_embs)
elif ('mtg' in eval_dataset):
dataset = MTG_Dataset(data_path, split, audio_embs, eval_dataset)
else:
print('error')
return dataset |
class ExFileObject(object):
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, tarinfo.size, tarinfo.sparse)
self.name = tarinfo.name
self.mode = 'r'
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b''
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
if self.closed:
raise ValueError('I/O operation on closed file')
buf = b''
if self.buffer:
if (size is None):
buf = self.buffer
self.buffer = b''
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if (size is None):
buf += self.fileobj.read()
else:
buf += self.fileobj.read((size - len(buf)))
self.position += len(buf)
return buf
read1 = read
def readline(self, size=(- 1)):
if self.closed:
raise ValueError('I/O operation on closed file')
pos = (self.buffer.find(b'\n') + 1)
if (pos == 0):
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if ((not buf) or (b'\n' in buf)):
pos = (self.buffer.find(b'\n') + 1)
if (pos == 0):
pos = len(self.buffer)
break
if (size != (- 1)):
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
result = []
while True:
line = self.readline()
if (not line):
break
result.append(line)
return result
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return self.position
def seek(self, pos, whence=os.SEEK_SET):
if self.closed:
raise ValueError('I/O operation on closed file')
if (whence == os.SEEK_SET):
self.position = min(max(pos, 0), self.size)
elif (whence == os.SEEK_CUR):
if (pos < 0):
self.position = max((self.position + pos), 0)
else:
self.position = min((self.position + pos), self.size)
elif (whence == os.SEEK_END):
self.position = max(min((self.size + pos), self.size), 0)
else:
raise ValueError('Invalid argument')
self.buffer = b''
self.fileobj.seek(self.position)
def close(self):
self.closed = True
def __iter__(self):
while True:
line = self.readline()
if (not line):
break
(yield line) |
def resnet101_StoDepth_lineardecay(pretrained=False, prob_0_L=[1, 0.5], multFlag=True, **kwargs):
model = ResNet_StoDepth_lineardecay(StoDepth_Bottleneck, prob_0_L, multFlag, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model |
class _GenericTest(object):
def _test_equal(self, a, b):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
with assert_raises(AssertionError):
self._assert_func(a, b)
def test_array_rank1_eq(self):
a = np.array([1, 2])
b = np.array([1, 2])
self._test_equal(a, b)
def test_array_rank1_noteq(self):
a = np.array([1, 2])
b = np.array([2, 2])
self._test_not_equal(a, b)
def test_array_rank2_eq(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([[1, 2], [3, 4]])
self._test_equal(a, b)
def test_array_diffshape(self):
a = np.array([1, 2])
b = np.array([[1, 2], [1, 2]])
self._test_not_equal(a, b)
def test_objarray(self):
a = np.array([1, 1], dtype=object)
self._test_equal(a, 1)
def test_array_likes(self):
self._test_equal([1, 2, 3], (1, 2, 3)) |
def ConvertSubGraph_PDirNet_PDirNet(InGraph, NIdV, RenumberNodes=False):
return _snap.ConvertSubGraph_PDirNet_PDirNet(InGraph, NIdV, RenumberNodes) |
def ranking_eval(qrels, run, output_dir, measurements, output_file='eval_bm25_aggregate_overlap.txt'):
evaluator = pytrec_eval.RelevanceEvaluator(qrels, measurements)
results = evaluator.evaluate(run)
def print_line(measure, scope, value):
print('{:25s}{:8s}{:.4f}'.format(measure, scope, value))
def write_line(measure, scope, value):
return '{:25s}{:8s}{:.4f}'.format(measure, scope, value)
for (query_id, query_measures) in sorted(results.items()):
for (measure, value) in sorted(query_measures.items()):
print_line(measure, query_id, value)
with open(os.path.join(output_dir, output_file), 'w') as output:
for measure in sorted(query_measures.keys()):
output.write((write_line(measure, 'all', pytrec_eval.compute_aggregated_measure(measure, [query_measures[measure] for query_measures in results.values()])) + '\n')) |
_MASK_OUTPUTS.register('mask_deconv_output')
class Mask_deconv_output(nn.Module):
def __init__(self, dim_in):
super(Mask_deconv_output, self).__init__()
num_classes = cfg.MODEL.NUM_CLASSES
self.mask_deconv = nn.ConvTranspose2d(dim_in, dim_in, 2, 2, 0)
self.mask_fcn_logits = nn.Conv2d(dim_in, num_classes, 1, 1, 0)
nn.init.kaiming_normal_(self.mask_deconv.weight, mode='fan_out', nonlinearity='relu')
if (self.mask_deconv.bias is not None):
nn.init.zeros_(self.mask_deconv.bias)
nn.init.normal_(self.mask_fcn_logits.weight, std=0.001)
if (self.mask_fcn_logits.bias is not None):
nn.init.constant_(self.mask_fcn_logits.bias, 0)
def forward(self, x):
x = F.relu(self.mask_deconv(x))
return self.mask_fcn_logits(x) |
class OmniNet(nn.Module):
def __init__(self, config=None, gpu_id=(- 1), dropout=None):
super(OmniNet, self).__init__()
if (config is None):
(cc, pc, d) = self.__defaultconf__()
else:
(cc, pc, d) = config
if (dropout is not None):
cc['dropout'] = dropout
pc['dropout'] = dropout
self.gpu_id = gpu_id
tasks = {'PENN': pc['penn_output_classes'], 'HMDB': pc['hmdb_output_classes'], 'IMAGE_CAPTION': pc['english_language_output_vocab'], 'VQA': pc['vqa_output_vocab']}
self.cnp = CNP(tasks, conf=cc, domains=d, gpu_id=gpu_id)
self.image_input_perph = ImageInputPeripheral(output_dim=cc['input_dim'], dropout=pc['dropout'], freeze_layers=True)
self.english_language_perph = LanguagePeripheral(vocab_size=pc['english_language_input_vocab'], embed_dim=pc['english_language_input_embed'], output_dim=cc['input_dim'], lang='en', gpu_id=gpu_id, dropout=pc['dropout'])
self.german_language_perph = LanguagePeripheral(vocab_size=pc['german_language_input_vocab'], embed_dim=pc['german_language_input_embed'], output_dim=cc['input_dim'], lang='de', gpu_id=gpu_id)
def reset(self, batch_size):
self.cnp.reset(batch_size)
def encode_videos(self, videos, domain='IMAGE'):
video_encodings = self.image_input_perph.encode(videos)
self.cnp.encode(video_encodings, domain=domain)
def encode_images(self, images, domain='IMAGE'):
image_encodings = self.image_input_perph.encode(images)
self.cnp.encode(image_encodings, domain=domain)
def encode_englishtexts(self, texts, domain='ENGLISH'):
(sent_encodings, input_pad_mask) = self.english_language_perph.embed_sentences(texts)
self.cnp.encode(sent_encodings, pad_mask=input_pad_mask, domain=domain)
def decode_from_targets(self, task, targets, target_pad_mask=None):
return self.cnp.decode(task, targets=targets, pad_mask=target_pad_mask)
def decode_greedy(self, task, num_steps):
return self.cnp.decode(task, targets=None, num_steps=num_steps)
def save(self, checkpoint_dir, iterations):
save_dir = os.path.join(checkpoint_dir, str(iterations))
try:
os.stat(save_dir)
except:
os.mkdir(save_dir)
torch.save(self.state_dict(), os.path.join(save_dir, 'model.pth'))
print(('Model saved, iterations: %d' % iterations))
def restore(self, checkpoint_dir, iterations):
save_dir = os.path.join(checkpoint_dir, str(iterations), 'model.pth')
pretrained_dict = torch.load(save_dir)
model_dict = self.state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if ((k in model_dict) and (model_dict[k].shape == pretrained_dict[k].shape))}
self.load_state_dict(pretrained_dict, strict=False)
print(('Restored existing model with iterations: %d' % iterations))
def restore_file(self, file):
pretrained_dict = torch.load(file)
model_dict = self.state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if ((k in model_dict) and (model_dict[k].shape == pretrained_dict[k].shape))}
self.load_state_dict(pretrained_dict, strict=False)
def __defaultconf__():
cnp_conf = {'input_dim': 512, 'control_dim': 32, 'output_dim': 512, 'spatial_dim': 512, 'temporal_dim': 512, 'temporal_n_layers': 6, 'temporal_n_heads': 8, 'temporal_d_k': 64, 'temporal_d_v': 64, 'temporal_hidden_dim': 2048, 'decoder_dim': 512, 'decoder_n_layers': 6, 'decoder_n_heads': 8, 'decoder_d_k': 64, 'decoder_d_v': 64, 'decoder_hidden_dim': 2048, 'max_seq_len': 500, 'output_embedding_dim': 300, 'dropout': 0.1}
perph_conf = {'german_language_input_vocab': 25000, 'german_language_input_embed': 300, 'english_language_input_vocab': 25000, 'english_language_input_embed': 300, 'english_language_output_vocab': 25000, 'german_language_output_vocab': 25000, 'dropout': 0.1, 'vqa_output_vocab': 3500, 'hmdb_output_classes': 52, 'penn_output_classes': 48}
domains = ['ENGLISH', 'GERMAN', 'IMAGE']
return (cnp_conf, perph_conf, domains) |
class BaseModel(ABC):
def __init__(self, transition_scheme, unary_limit, reverse_sentence, *args, **kwargs):
super().__init__(*args, **kwargs)
self._transition_scheme = transition_scheme
self._unary_limit = unary_limit
self._reverse_sentence = reverse_sentence
def initial_word_queues(self, tagged_word_lists):
def initial_transitions(self):
def initial_constituents(self):
def get_word(self, word_node):
def transform_word_to_constituent(self, state):
def dummy_constituent(self, dummy):
def build_constituents(self, labels, children_lists):
def push_constituents(self, constituent_stacks, constituents):
def get_top_constituent(self, constituents):
def push_transitions(self, transition_stacks, transitions):
def get_top_transition(self, transitions):
def get_root_labels(self):
return ('ROOT',)
def unary_limit(self):
return self._unary_limit
def transition_scheme(self):
return self._transition_scheme
def has_unary_transitions(self):
return (self._transition_scheme is TransitionScheme.TOP_DOWN_UNARY)
def is_top_down(self):
return ((self._transition_scheme is TransitionScheme.TOP_DOWN) or (self._transition_scheme is TransitionScheme.TOP_DOWN_UNARY) or (self._transition_scheme is TransitionScheme.TOP_DOWN_COMPOUND))
def reverse_sentence(self):
return self._reverse_sentence
def predict(self, states, is_legal=True):
raise NotImplementedError('LSTMModel can predict, but SimpleModel cannot')
def weighted_choice(self, states):
raise NotImplementedError('LSTMModel can weighted_choice, but SimpleModel cannot')
def predict_gold(self, states, is_legal=True):
transitions = [y.gold_sequence[y.num_transitions()] for y in states]
if is_legal:
for (trans, state) in zip(transitions, states):
if (not trans.is_legal(state, self)):
raise RuntimeError('Transition {}:{} was not legal in a transition sequence:\nOriginal tree: {}\nTransitions: {}'.format(state.num_transitions(), trans, state.gold_tree, state.gold_sequence))
return (None, transitions, None)
def initial_state_from_preterminals(self, preterminal_lists, gold_trees):
word_queues = self.initial_word_queues(preterminal_lists)
transitions = self.initial_transitions()
constituents = self.initial_constituents()
states = [State(sentence_length=(len(wq) - 2), num_opens=0, word_queue=wq, gold_tree=None, gold_sequence=None, transitions=transitions, constituents=constituents, word_position=0, score=0.0) for (idx, wq) in enumerate(word_queues)]
if gold_trees:
states = [state._replace(gold_tree=gold_tree) for (gold_tree, state) in zip(gold_trees, states)]
return states
def initial_state_from_words(self, word_lists):
preterminal_lists = [[Tree(tag, Tree(word)) for (word, tag) in words] for words in word_lists]
return self.initial_state_from_preterminals(preterminal_lists, gold_trees=None)
def initial_state_from_gold_trees(self, trees):
preterminal_lists = [[Tree(pt.label, Tree(pt.children[0].label)) for pt in tree.yield_preterminals()] for tree in trees]
return self.initial_state_from_preterminals(preterminal_lists, gold_trees=trees)
def build_batch_from_trees(self, batch_size, data_iterator):
state_batch = []
for _ in range(batch_size):
gold_tree = next(data_iterator, None)
if (gold_tree is None):
break
state_batch.append(gold_tree)
if (len(state_batch) > 0):
state_batch = self.initial_state_from_gold_trees(state_batch)
return state_batch
def build_batch_from_trees_with_gold_sequence(self, batch_size, data_iterator):
state_batch = self.build_batch_from_trees(batch_size, data_iterator)
if (len(state_batch) == 0):
return state_batch
gold_sequences = transition_sequence.build_treebank([state.gold_tree for state in state_batch], self.transition_scheme(), self.reverse_sentence())
state_batch = [state._replace(gold_sequence=sequence) for (state, sequence) in zip(state_batch, gold_sequences)]
return state_batch
def build_batch_from_tagged_words(self, batch_size, data_iterator):
state_batch = []
for _ in range(batch_size):
sentence = next(data_iterator, None)
if (sentence is None):
break
state_batch.append(sentence)
if (len(state_batch) > 0):
state_batch = self.initial_state_from_words(state_batch)
return state_batch
def parse_sentences(self, data_iterator, build_batch_fn, batch_size, transition_choice, keep_state=False, keep_constituents=False, keep_scores=False):
treebank = []
treebank_indices = []
state_batch = build_batch_fn(batch_size, data_iterator)
batch_indices = list(range(len(state_batch)))
horizon_iterator = iter([])
if keep_constituents:
constituents = defaultdict(list)
while (len(state_batch) > 0):
(pred_scores, transitions, scores) = transition_choice(state_batch)
if (keep_scores and (scores is not None)):
state_batch = [state._replace(score=(state.score + score)) for (state, score) in zip(state_batch, scores)]
state_batch = parse_transitions.bulk_apply(self, state_batch, transitions)
if keep_constituents:
for (t_idx, transition) in enumerate(transitions):
if isinstance(transition, CloseConstituent):
constituents[batch_indices[t_idx]].append(state_batch[t_idx].constituents.value.value)
remove = set()
for (idx, state) in enumerate(state_batch):
if state.finished(self):
predicted_tree = state.get_tree(self)
if self.reverse_sentence():
predicted_tree = predicted_tree.reverse()
gold_tree = state.gold_tree
treebank.append(ParseResult(gold_tree, [ScoredTree(predicted_tree, state.score)], (state if keep_state else None), (constituents[batch_indices[idx]] if keep_constituents else None)))
treebank_indices.append(batch_indices[idx])
remove.add(idx)
if (len(remove) > 0):
state_batch = [state for (idx, state) in enumerate(state_batch) if (idx not in remove)]
batch_indices = [batch_idx for (idx, batch_idx) in enumerate(batch_indices) if (idx not in remove)]
for _ in range((batch_size - len(state_batch))):
horizon_state = next(horizon_iterator, None)
if (not horizon_state):
horizon_batch = build_batch_fn(batch_size, data_iterator)
if (len(horizon_batch) == 0):
break
horizon_iterator = iter(horizon_batch)
horizon_state = next(horizon_iterator, None)
state_batch.append(horizon_state)
batch_indices.append((len(treebank) + len(state_batch)))
treebank = utils.unsort(treebank, treebank_indices)
return treebank
def parse_sentences_no_grad(self, data_iterator, build_batch_fn, batch_size, transition_choice, keep_state=False, keep_constituents=False, keep_scores=False):
with torch.no_grad():
return self.parse_sentences(data_iterator, build_batch_fn, batch_size, transition_choice, keep_state, keep_constituents, keep_scores)
def analyze_trees(self, trees, batch_size=None, keep_state=True, keep_constituents=True, keep_scores=True):
if (batch_size is None):
batch_size = self.args['eval_batch_size']
tree_iterator = iter(trees)
treebank = self.parse_sentences(tree_iterator, self.build_batch_from_trees_with_gold_sequence, batch_size, self.predict_gold, keep_state, keep_constituents, keep_scores=keep_scores)
return treebank
def parse_tagged_words(self, words, batch_size):
logger.debug('Processing %d sentences', len(words))
self.eval()
sentence_iterator = iter(words)
treebank = self.parse_sentences_no_grad(sentence_iterator, self.build_batch_from_tagged_words, batch_size, self.predict, keep_state=False, keep_constituents=False)
results = [t.predictions[0].tree for t in treebank]
return results |
def AnyBut(s):
ranges = chars_to_ranges(s)
ranges.insert(0, (- maxint))
ranges.append(maxint)
result = CodeRanges(ranges)
result.str = ('AnyBut(%s)' % repr(s))
return result |
def _linear(raw, input, weight, bias=None):
x = raw(input, weight, bias)
layer_name = log.add_layer(name='fc')
top_blobs = log.add_blobs([x], name='fc_blob')
layer = caffe_net.Layer_param(name=layer_name, type='InnerProduct', bottom=[log.blobs(input)], top=top_blobs)
layer.fc_param(x.size()[1], has_bias=(bias is not None))
if (bias is not None):
layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
else:
layer.add_data(weight.cpu().data.numpy())
log.cnet.add_layer(layer)
return x |
def get_size(file_dir):
try:
file_name = glob.glob(os.path.join(file_dir, '*'))[0]
return os.stat(file_name).st_size
except:
logging.exception(f'error getting file from: {file_dir}')
return 0 |
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if (not os.path.isdir(d)):
continue
for (root, _, fnames) in sorted(os.walk(d)):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images |
def binomial_coefficients(n):
n = py_scalar_to_element(n)
d = {(0, n): 1, (n, 0): 1}
a = 1
for k in range(1, ((n // 2) + 1)):
a = ((a * ((n - k) + 1)) // k)
d[(k, (n - k))] = d[((n - k), k)] = a
return d |
def _isnamedtupleinstance(x):
t = type(x)
b = t.__bases__
if ((len(b) != 1) or (b[0] != tuple)):
return False
f = getattr(t, '_fields', None)
if (not isinstance(f, tuple)):
return False
return all((isinstance(n, str) for n in f)) |
class VideoQACollator(object):
def __init__(self, tokenizer, max_length=20, task_type='action', n_options=5):
self.tokenizer = tokenizer
self.max_length = max_length
self.task_type = task_type
self.n_options = n_options
def collate_batch(self, batch):
v_collate = default_collate
visual_inputs = v_collate([d['vid'] for d in batch])
text_examples = flat_list_of_lists([d['examples'] for d in batch])
n_examples_list = [d['n_examples'] for d in batch]
if (self.task_type in ['action', 'transition']):
text_str_list = flat_list_of_lists([[((d['q_str'] + ' ') + d['options_str_list'][i]) for i in range(self.n_options)] for d in text_examples])
else:
text_str_list = [d['q_str'] for d in text_examples]
batch_enc = self.tokenizer.batch_encode_plus(text_str_list, max_length=self.max_length, padding='max_length', return_tensors='pt', truncation=True)
text_input_ids = batch_enc.input_ids
text_input_mask = batch_enc.attention_mask
labels = (default_collate([int(d['label']) for d in text_examples]) if (text_examples[0]['label'] is not None) else None)
question_ids = [d['question_id'] for d in text_examples]
return dict(visual_inputs=visual_inputs, text_input_ids=text_input_ids, text_input_mask=text_input_mask, question_ids=question_ids, labels=labels, n_examples_list=n_examples_list) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('num_inputs', [2, 3, 5])
def test_add_n_double_backward(num_inputs, seed, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
shape0 = [2, 3, 4]
inputs = []
for i in range(num_inputs):
inputs.append(rng.randn(*shape0).astype(np.float32))
backward_function_tester(rng, F.add_n, inputs=inputs, func_args=[], func_kwargs={}, atol_accum=0.05, dstep=0.001, ctx=ctx) |
class Issue15WarmUpSupportTest(ReBenchTestCase):
def setUp(self):
super(Issue15WarmUpSupportTest, self).setUp()
self._set_path(__file__)
def test_run_id_indicates_warm_up_iterations_required(self):
cnf = Configurator(load_config((self._path + '/issue_15.conf')), DataStore(self.ui), self.ui, data_file=self._tmp_file)
runs = list(cnf.get_runs())
self.assertGreaterEqual(len(runs), 1)
self.assertTrue(runs[0].requires_warmup())
self.assertGreater(runs[0].warmup_iterations, 0)
def test_warm_up_results_should_be_ignored(self):
cnf = Configurator(load_config((self._path + '/issue_15.conf')), DataStore(self.ui), self.ui, data_file=self._tmp_file)
runs = list(cnf.get_runs())
self.assertEqual(runs[0].get_number_of_data_points(), 0)
self.assertEqual(runs[0].warmup_iterations, 13)
ex = Executor([runs[0]], False, TestDummyUI())
ex.execute()
self.assertEqual(runs[0].get_number_of_data_points(), 10) |
def compute_statistics(text_dir, target_dir, output_file=None):
files = utils.get_files_from_folder(text_dir)
files_data = []
files_indexes = []
for (i, doc_name) in enumerate(files):
text = utils.preprocess_text(files[doc_name])
json_file = ((target_dir + doc_name) + '.json')
if (not os.path.exists(json_file)):
continue
quote_objects = json.load(open(json_file, encoding='mac-roman'))
file_data = get_file_stats(text, quote_objects)
files_data.append(file_data)
files_indexes.append(doc_name)
return process_results(files_data, files_indexes, output_file) |
def add_datetime(func):
def wrapper(*args, **kwargs):
datetime_str = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(grey('[{}] '.format(datetime_str), bold=True), end='')
return func(*args, **kwargs)
return wrapper |
('utils.config_util.__get_default')
def test_overrides_default_values(get_default_mock):
get_default_mock.side_effect = (lambda key, default: (['Xmx6144M', 'd64'] if (key == 'java-options') else default))
parser = _get_command_line_parser(['valid-detector'], [], [])
result = parser.parse_args(['run', 'ex1', 'valid-detector', '--java-options', 'agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005'])
assert_equals(['agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005'], result.java_options) |
def cummean(x: np.array) -> np.array:
if (sum(np.isnan(x)) == len(x)):
return np.ones(len(x))
else:
sum_vals = np.nancumsum(x.astype(float))
count_vals = np.cumsum((~ np.isnan(x)))
return np.divide(sum_vals, count_vals, out=np.zeros_like(sum_vals), where=(count_vals != 0)) |
def make_td3_agent(base_config=spinning_up_td3_config, args=Namespace(env='InvertedPendulum-v2', tb='', prefix='td3', parent_folder='/tmp/mrl', layers=(256, 256), num_envs=None), agent_name_attrs=['env', 'seed', 'tb'], **kwargs):
config = make_ddpg_agent(base_config, args, agent_name_attrs, **kwargs)
del config.module_algorithm
config.module_algorithm = TD3()
layer_norm = (nn.LayerNorm if (hasattr(args, 'layer_norm') and args.layer_norm) else nn.Identity)
e = config.module_eval_env
config.module_critic2 = PytorchModel('critic2', (lambda : Critic(FCBody(((e.state_dim + e.goal_dim) + e.action_dim), args.layers, layer_norm, make_activ(config.activ), False), 1, False)))
return config |
class SRWLOptA(SRWLOpt):
def __init__(self, _shape='r', _ap_or_ob='a', _Dx=0, _Dy=0, _x=0, _y=0):
self.shape = _shape
self.ap_or_ob = _ap_or_ob
self.Dx = _Dx
self.Dy = _Dy
self.x = _x
self.y = _y |
def get_predictions_single(model_def, weights):
model_def.load_state_dict(torch.load(weights))
model = tta.SegmentationTTAWrapper(model_def, tta.aliases.d4_transform(), merge_mode='mean')
model.to(device)
if (torch.cuda.device_count() > 1):
model = nn.DataParallel(model)
final_predictions = []
model.eval()
with torch.no_grad():
for batch in tqdm(test_loader):
image = batch['image'].to(device)
pred = model(image)
final_predictions.append(pred.detach().cpu().numpy())
final_predictions = np.concatenate(final_predictions, axis=0)
return final_predictions |
class CustomDatasetDataLoader():
def __init__(self, opt):
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
print(('dataset [%s] was created' % type(self.dataset).__name__))
self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.batch_size, shuffle=(not opt.serial_batches), num_workers=int(opt.num_threads), drop_last=True)
def set_epoch(self, epoch):
self.dataset.current_epoch = epoch
def load_data(self):
return self
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
for (i, data) in enumerate(self.dataloader):
if ((i * self.opt.batch_size) >= self.opt.max_dataset_size):
break
(yield data) |
def install_lib_sig_segfault():
try:
os.environ.setdefault('SEGFAULT_SIGNALS', 'all')
import ctypes
import ctypes.util
libfn = ctypes.util.find_library('SegFault')
assert libfn, 'libSegFault not found'
ctypes.CDLL(libfn)
print('Installed libSegFault.so.')
except Exception as exc:
print(('installLibSigSegfault exception: %s' % exc)) |
class CComplexType(CNumericType):
is_complex = 1
to_py_function = '__pyx_PyComplex_FromComplex'
has_attributes = 1
scope = None
def __init__(self, real_type):
while (real_type.is_typedef and (not real_type.typedef_is_external)):
real_type = real_type.typedef_base_type
self.funcsuffix = ('_%s' % real_type.specialization_name())
if real_type.is_float:
self.math_h_modifier = real_type.math_h_modifier
else:
self.math_h_modifier = '_UNUSED'
self.real_type = real_type
CNumericType.__init__(self, (real_type.rank + 0.5), real_type.signed)
self.binops = {}
self.from_parts = ('%s_from_parts' % self.specialization_name())
self.default_value = ('%s(0, 0)' % self.from_parts)
def __eq__(self, other):
if (isinstance(self, CComplexType) and isinstance(other, CComplexType)):
return (self.real_type == other.real_type)
else:
return False
def __ne__(self, other):
if (isinstance(self, CComplexType) and isinstance(other, CComplexType)):
return (self.real_type != other.real_type)
else:
return True
def __lt__(self, other):
if (isinstance(self, CComplexType) and isinstance(other, CComplexType)):
return (self.real_type < other.real_type)
else:
return False
def __hash__(self):
return (~ hash(self.real_type))
def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0):
if (pyrex or for_display):
real_code = self.real_type.declaration_code('', for_display, dll_linkage, pyrex)
base_code = ('%s complex' % real_code)
else:
base_code = public_decl(self.sign_and_name(), dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def sign_and_name(self):
real_type_name = self.real_type.specialization_name()
real_type_name = real_type_name.replace('long__double', 'long_double')
real_type_name = real_type_name.replace('PY_LONG_LONG', 'long_long')
return ((Naming.type_prefix + real_type_name) + '_complex')
def assignable_from(self, src_type):
if ((not src_type.is_complex) and src_type.is_numeric and src_type.is_typedef and src_type.typedef_is_external):
return False
elif src_type.is_pyobject:
return True
else:
return super(CComplexType, self).assignable_from(src_type)
def assignable_from_resolved_type(self, src_type):
return ((src_type.is_complex and self.real_type.assignable_from_resolved_type(src_type.real_type)) or (src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type)) or (src_type is error_type))
def attributes_known(self):
if (self.scope is None):
from . import Symtab
self.scope = scope = Symtab.CClassScope('', None, visibility='extern')
scope.parent_type = self
scope.directives = {}
scope.declare_var('real', self.real_type, None, cname='real', is_cdef=True)
scope.declare_var('imag', self.real_type, None, cname='imag', is_cdef=True)
scope.declare_cfunction('conjugate', CFuncType(self, [CFuncTypeArg('self', self, None)], nogil=True), pos=None, defining=1, cname=('__Pyx_c_conj%s' % self.funcsuffix))
return True
def _utility_code_context(self):
return {'type': self.empty_declaration_code(), 'type_name': self.specialization_name(), 'real_type': self.real_type.empty_declaration_code(), 'func_suffix': self.funcsuffix, 'm': self.math_h_modifier, 'is_float': int(self.real_type.is_float)}
def create_declaration_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached('Header', 'Complex.c'))
env.use_utility_code(UtilityCode.load_cached('RealImag', 'Complex.c'))
env.use_utility_code(TempitaUtilityCode.load_cached('Declarations', 'Complex.c', self._utility_code_context()))
env.use_utility_code(TempitaUtilityCode.load_cached('Arithmetic', 'Complex.c', self._utility_code_context()))
return True
def can_coerce_to_pyobject(self, env):
return True
def can_coerce_from_pyobject(self, env):
return True
def create_to_py_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached('ToPy', 'Complex.c'))
return True
def create_from_py_utility_code(self, env):
env.use_utility_code(TempitaUtilityCode.load_cached('FromPy', 'Complex.c', self._utility_code_context()))
self.from_py_function = ('__Pyx_PyComplex_As_' + self.specialization_name())
return True
def lookup_op(self, nargs, op):
try:
return self.binops[(nargs, op)]
except KeyError:
pass
try:
op_name = complex_ops[(nargs, op)]
self.binops[(nargs, op)] = func_name = ('__Pyx_c_%s%s' % (op_name, self.funcsuffix))
return func_name
except KeyError:
return None
def unary_op(self, op):
return self.lookup_op(1, op)
def binary_op(self, op):
return self.lookup_op(2, op)
def py_type_name(self):
return 'complex'
def cast_code(self, expr_code):
return expr_code |
class MinWeight(BaseEliminationOrder):
def cost(self, node):
return np.prod([self.bayesian_model.get_cardinality(neig_node) for neig_node in self.moralized_model.neighbors(node)]) |
class HardwareConfig():
n_cpu: int = MISSING
n_gpu: int = MISSING
n_envs_per_worker: int = 2 |
class BertAdam(Optimizer):
def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if ((not isinstance(schedule, _LRSchedule)) and (schedule not in SCHEDULES)):
raise ValueError('Invalid schedule parameter: {}'.format(schedule))
if (not (0.0 <= b1 < 1.0)):
raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1))
if (not (0.0 <= b2 < 1.0)):
raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2))
if (not (e >= 0.0)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e))
if (not isinstance(schedule, _LRSchedule)):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
elif ((warmup != (- 1)) or (t_total != (- 1))):
logger.warning('warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. Please specify custom warmup and t_total in _LRSchedule object.')
defaults = dict(lr=lr, schedule=schedule, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if (len(state) == 0):
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['next_m'] = torch.zeros_like(p.data)
state['next_v'] = torch.zeros_like(p.data)
(next_m, next_v) = (state['next_m'], state['next_v'])
(beta1, beta2) = (group['b1'], group['b2'])
if (group['max_grad_norm'] > 0):
clip_grad_norm_(p, group['max_grad_norm'])
next_m.mul_(beta1).add_((1 - beta1), grad)
next_v.mul_(beta2).addcmul_((1 - beta2), grad, grad)
update = (next_m / (next_v.sqrt() + group['e']))
if (group['weight_decay'] > 0.0):
update += (group['weight_decay'] * p.data)
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = (lr_scheduled * update)
p.data.add_((- update_with_lr))
state['step'] += 1
return loss |
_params
def test_quad_vec_simple_inf(quadrature):
def f(x):
return (1 / (1 + (np.float64(x) ** 2)))
for epsabs in [0.1, 0.001, 1e-06]:
if ((quadrature == 'trapezoid') and (epsabs < 0.0001)):
continue
kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature)
(res, err) = quad_vec(f, 0, np.inf, **kwargs)
assert_allclose(res, (np.pi / 2), rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, 0, (- np.inf), **kwargs)
assert_allclose(res, ((- np.pi) / 2), rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, (- np.inf), 0, **kwargs)
assert_allclose(res, (np.pi / 2), rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, np.inf, 0, **kwargs)
assert_allclose(res, ((- np.pi) / 2), rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, (- np.inf), np.inf, **kwargs)
assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, np.inf, (- np.inf), **kwargs)
assert_allclose(res, (- np.pi), rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, np.inf, np.inf, **kwargs)
assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, (- np.inf), (- np.inf), **kwargs)
assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs)
assert_allclose(res, (np.pi / 2), rtol=0, atol=max(epsabs, err))
def f(x):
return (np.sin((x + 2)) / (1 + (x ** 2)))
exact = ((np.pi / np.e) * np.sin(2))
epsabs = 1e-05
(res, err, info) = quad_vec(f, (- np.inf), np.inf, limit=1000, norm='max', epsabs=epsabs, quadrature=quadrature, full_output=True)
assert (info.status == 1)
assert_allclose(res, exact, rtol=0, atol=max(epsabs, (1.5 * err))) |
def test_calc_on_policy_policy_value_estimate():
ground_truth_policy_value = OpenBanditDataset.calc_on_policy_policy_value_estimate(behavior_policy='random', campaign='all')
assert isinstance(ground_truth_policy_value, float) |
def find_parameters(module):
assert isinstance(module, nn.Module)
if getattr(module, '_is_replica', False):
def find_tensor_attributes(module):
tuples = [(k, v) for (k, v) in module.__dict__.items() if (torch.is_tensor(v) and v.requires_grad)]
return tuples
gen = module._named_members(get_members_fn=find_tensor_attributes)
return [param for (_, param) in gen]
else:
return list(module.parameters()) |
class Add2(PythonFunction):
def __init__(self, ctx=None):
super(Add2, self).__init__(ctx)
def name(self):
return 'PythonAdd2'
def min_outputs(self):
return 1
def grad_depends_output_data(self, i, o):
return False
def grad_depends_input_data(self, i, j):
return False
def setup_impl(self, inputs, outputs):
outputs[0].reset_shape(inputs[0].shape, True)
def forward_impl(self, inputs, outputs):
outputs[0].d = (inputs[0].d + inputs[1].d)
def backward_impl(self, inputs, outputs, propagate_down, accum):
if propagate_down[0]:
if accum[0]:
inputs[0].g += outputs[0].g
else:
inputs[0].g = outputs[0].g
if propagate_down[1]:
if accum[1]:
inputs[1].g += outputs[0].g
else:
inputs[1].g = outputs[0].g |
def _zinc(model, num_samples, egc_num_bases, egc_num_heads, aggrs, hidden):
zinc_data(data_location())
if (model == 'egc'):
config = ZincEgcConfig(num_samples=num_samples, softmax=False, sigmoid=False, hardtanh=False, num_bases=egc_num_bases, num_heads=egc_num_heads, aggrs=aggrs, hidden=hidden)
elif (model == 'gatv2'):
config = ZincGatv2Config(num_samples=num_samples, hidden=hidden)
else:
raise ValueError
return config |
def data_file(*relative_path):
dfolder = data_folder()
return os.path.join(dfolder, *relative_path) |
def _add_boundmethod_attribute(name: str, obj: Any, attributes: Dict[(str, Any)], ndarrays: Dict[(str, ndarray)], objects: Dict[(str, object)]) -> Tuple[(Dict, Dict, Dict)]:
attributes[name] = obj()
return (attributes, ndarrays, objects) |
.parametrize('name', sorted(ADAPTERS_MANAGER.adapters))
def test_adapter_class_has_interface(name):
assert isinstance(ADAPTERS_MANAGER.adapters[name], ContainerAdapterProtocol) |
_test()
def test_kernels_inside_component_0():
def kernels_inside_component_0(x: dace.float32[8], y: dace.float32[8], v: dace.float32[8], w: dace.float32[8], z: dace.float32[8]):
tmp = ((x + y) + v)
return (tmp + (w + z))
x = np.random.rand(8).astype(np.float32)
y = np.random.rand(8).astype(np.float32)
v = np.random.rand(8).astype(np.float32)
w = np.random.rand(8).astype(np.float32)
z = np.random.rand(8).astype(np.float32)
sdfg = kernels_inside_component_0.to_sdfg()
sdfg.apply_transformations([FPGATransformSDFG, InlineSDFG])
for state in sdfg.states():
if is_fpga_kernel(sdfg, state):
state.instrument = dace.InstrumentationType.FPGA
with config.set_temporary('compiler', 'fpga', 'concurrent_kernel_detection', value=True):
res = sdfg(x=x, y=y, v=v, w=w, z=z)
assert (count_kernels(sdfg) == 3)
assert np.allclose(res, ((((x + y) + v) + w) + z))
report = sdfg.get_latest_report()
assert (len(report.durations[(0, 0, (- 1))]) == 5)
full_fpga_events = 0
for event_name in report.durations[(0, 0, (- 1))]:
if ('Full FPGA' in event_name):
full_fpga_events += 1
assert (full_fpga_events == 2)
return sdfg |
class OpenPoseHead(nn.Module):
def __init__(self, num_classes=19, in_channels=128):
super(OpenPoseHead, self).__init__()
mid_channels = ((in_channels + num_classes) + (2 * num_classes))
self.model1_1 = nn.Sequential(nn.Conv2d(in_channels, in_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, (in_channels * 4), kernel_size=(1, 1), stride=(1, 1)), nn.ReLU(inplace=True), nn.Conv2d((in_channels * 4), (num_classes * 2), kernel_size=(1, 1), stride=(1, 1)))
self.model1_2 = nn.Sequential(nn.Conv2d(in_channels, in_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, (in_channels * 4), kernel_size=(1, 1), stride=(1, 1)), nn.ReLU(inplace=True), nn.Conv2d((in_channels * 4), num_classes, kernel_size=(1, 1), stride=(1, 1)))
self.modelx_1_list = nn.ModuleList()
self.modelx_2_list = nn.ModuleList()
for i in range(5):
self.modelx_1_list.append(nn.Sequential(nn.Conv2d(mid_channels, in_channels, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(1, 1), stride=(1, 1)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, (num_classes * 2), kernel_size=(1, 1), stride=(1, 1))))
self.modelx_2_list.append(nn.Sequential(nn.Conv2d(mid_channels, in_channels, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels, kernel_size=(1, 1), stride=(1, 1)), nn.ReLU(inplace=True), nn.Conv2d(in_channels, num_classes, kernel_size=(1, 1), stride=(1, 1))))
self._init_weight()
def forward(self, x):
vecs = []
heats = []
out_1 = self.model1_1(x)
out_2 = self.model1_2(x)
vecs.append(out_1)
heats.append(out_2)
for (modelx_1, modelx_2) in zip(self.modelx_1_list, self.modelx_2_list):
out = torch.cat([out_1, out_2, x], 1)
out_1 = modelx_1(out)
out_2 = modelx_2(out)
vecs.append(out_1)
heats.append(out_2)
return ((out_1, out_2), (heats, vecs))
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if (m.bias is not None):
nn.init.constant_(m.bias, 0.0)
nn.init.normal_(self.model1_1[(- 1)].weight, std=0.01)
nn.init.normal_(self.model1_2[(- 1)].weight, std=0.01)
for (modelx_1, modelx_2) in zip(self.modelx_1_list, self.modelx_2_list):
nn.init.normal_(modelx_1[(- 1)].weight, std=0.01)
nn.init.normal_(modelx_2[(- 1)].weight, std=0.01) |
class OffsetPaddleSetABreakoutWorld(RandomOffsetPaddleBreakoutWorld):
warnings.warn('This env. parameter was dropped and should no longer be used.', DeprecationWarning)
offset_range_start = 25
offset_range_end = 75 |
def calculate_vggface2_rgb_mean_std(dir, batch_size):
dataset = datasets.ImageFolder(dir, transforms.ToTensor())
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False)
(channels_sum, channels_squared_sum, num_batches) = (0, 0, 0)
for (data, _) in tqdm(dataloader):
channels_sum += torch.mean(data, dim=[0, 2, 3])
channels_squared_sum += torch.mean((data ** 2), dim=[0, 2, 3])
num_batches += 1
mean = (channels_sum / num_batches)
std = (((channels_squared_sum / num_batches) - (mean ** 2)) ** 0.5)
print('Mean: {}, Std: {}'.format(mean, std)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.