code stringlengths 101 5.91M |
|---|
def _resolve_handlers(l):
if (not isinstance(l, ConvertingList)):
return l
return [l[i] for i in range(len(l))] |
_experiment(snapshot_mode='none')
def sac_half_cheetah_batch(ctxt=None, seed=1):
deterministic.set_seed(seed)
runner = LocalRunner(snapshot_config=ctxt)
env = GarageEnv(normalize(gym.make('HalfCheetah-v2')))
policy = TanhGaussianMLPPolicy(env_spec=env.spec, hidden_sizes=[256, 256], hidden_nonlinearity=nn.ReLU, output_nonlinearity=None, min_std=np.exp((- 20.0)), max_std=np.exp(2.0))
qf1 = ContinuousMLPQFunction(env_spec=env.spec, hidden_sizes=[256, 256], hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec, hidden_sizes=[256, 256], hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1000000.0))
sac = SAC(env_spec=env.spec, policy=policy, qf1=qf1, qf2=qf2, gradient_steps_per_itr=1000, max_path_length=1000, max_eval_path_length=1000, replay_buffer=replay_buffer, min_buffer_size=10000.0, target_update_tau=0.005, discount=0.99, buffer_batch_size=256, reward_scale=1.0, steps_per_epoch=1)
if torch.cuda.is_available():
set_gpu_mode(True)
else:
set_gpu_mode(False)
sac.to()
runner.setup(algo=sac, env=env, sampler_cls=LocalSampler)
runner.train(n_epochs=1000, batch_size=1000) |
class Resnet152Triplet(nn.Module):
def __init__(self, embedding_dimension=512, pretrained=False):
super(Resnet152Triplet, self).__init__()
self.model = resnet152(pretrained=pretrained)
input_features_fc_layer = self.model.fc.in_features
self.model.fc = nn.Linear(input_features_fc_layer, embedding_dimension, bias=False)
def forward(self, images):
embedding = self.model(images)
embedding = F.normalize(embedding, p=2, dim=1)
return embedding |
class MpiAdam(object):
def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None):
self.var_list = var_list
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.scale_grad_by_procs = scale_grad_by_procs
size = sum((U.numel(v) for v in var_list))
self.m = np.zeros(size, 'float32')
self.v = np.zeros(size, 'float32')
self.t = 0
self.setfromflat = U.SetFromFlat(var_list)
self.getflat = U.GetFlat(var_list)
self.comm = (MPI.COMM_WORLD if (comm is None) else comm)
def update(self, localg, stepsize):
if ((self.t % 100) == 0):
self.check_synced()
localg = localg.astype('float32')
globalg = np.zeros_like(localg)
self.comm.Allreduce(localg, globalg, op=MPI.SUM)
if self.scale_grad_by_procs:
globalg /= self.comm.Get_size()
self.t += 1
a = ((stepsize * np.sqrt((1 - (self.beta2 ** self.t)))) / (1 - (self.beta1 ** self.t)))
self.m = ((self.beta1 * self.m) + ((1 - self.beta1) * globalg))
self.v = ((self.beta2 * self.v) + ((1 - self.beta2) * (globalg * globalg)))
step = (((- a) * self.m) / (np.sqrt(self.v) + self.epsilon))
self.setfromflat((self.getflat() + step))
def sync(self):
theta = self.getflat()
self.comm.Bcast(theta, root=0)
self.setfromflat(theta)
def check_synced(self):
if (self.comm.Get_rank() == 0):
theta = self.getflat()
self.comm.Bcast(theta, root=0)
else:
thetalocal = self.getflat()
thetaroot = np.empty_like(thetalocal)
self.comm.Bcast(thetaroot, root=0)
assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal) |
.parametrize('print_changed_only', [True, False])
def test_one_estimator_print_change_only(print_changed_only):
pca = PCA(n_components=10)
with config_context(print_changed_only=print_changed_only):
pca_repr = html.escape(str(pca))
html_output = estimator_html_repr(pca)
assert (pca_repr in html_output) |
def masked_l2(preds, actuals, mask):
loss = tf.nn.l2(preds, actuals)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss) |
def plot_all_task_group_box_plots(df_results: pd.DataFrame, score: str, path_to_output_dir: str, model_heads: Optional[List[Tuple[(str, str)]]]=None):
(fig, axes) = plt.subplots(2, 2, figsize=(12, 12))
task_groups: List[str] = list(TASK_GROUP_2_LABELING_FUNCTION.keys())
for (idx, task_group) in tqdm(enumerate(task_groups)):
plot_one_task_group_box_plot(df_results, axes.flat[idx], task_group, score, model_heads=model_heads)
df_ = filter_df(df_results, score=score, model_heads=model_heads)
legend_n_col: int = 2
handles = [Patch(facecolor=SCORE_MODEL_HEAD_2_COLOR[score][model][head], edgecolor=SCORE_MODEL_HEAD_2_COLOR[score][model][head], label=f'{MODEL_2_NAME[model]}+{HEAD_2_NAME[head]}') for (model, head) in df_[['model', 'head']].drop_duplicates().itertuples(index=False)]
fig.legend(handles=handles, loc='lower center', ncol=legend_n_col, fontsize=12)
fig.suptitle(f'Few-shot v. Full data {score.upper()} by Task Group', fontsize=16)
plt.tight_layout()
plt.subplots_adjust(top=0.92, bottom=0.1, hspace=0.25)
plt.savefig(os.path.join(path_to_output_dir, f'taskgroups_boxplot_{score}.png'), dpi=300)
plt.close('all')
return fig |
class Germany(Domain):
def __init__(self):
Domain.__init__(self)
try:
import fiona
import shapely.geometry
except ModuleNotFoundError:
raise ModuleNotFoundError('The Germany domain requires fiona and shapely. Please install fiona and shapely, e.g., using pip.')
fiona_collection = fiona.open('./shape_files/DEU_adm/DEU_adm0.shp')
geometry = fiona_collection.next()['geometry']
self.shape = shapely.geometry.asShape(geometry)
self.b = fiona_collection.bounds
def get_query_parameters(self):
return {'minlatitude': self.b[1], 'minlongitude': self.b[0], 'maxlatitude': self.b[3], 'maxlongitude': self.b[2]}
def is_in_domain(self, latitude, longitude):
return self.shape.contains(shapely.geometry.Point(longitude, latitude)) |
class LaheyFCompiler(FCompiler):
compiler_type = 'lahey'
description = 'Lahey/Fujitsu Fortran 95 Compiler'
version_pattern = 'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\\s*]*)'
executables = {'version_cmd': ['<F90>', '--version'], 'compiler_f77': ['lf95', '--fix'], 'compiler_fix': ['lf95', '--fix'], 'compiler_f90': ['lf95'], 'linker_so': ['lf95', '-shared'], 'archiver': ['ar', '-cr'], 'ranlib': ['ranlib']}
module_dir_switch = None
module_include_switch = None
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g', '--chk', '--chkglobal']
def get_library_dirs(self):
opt = []
d = os.environ.get('LAHEY')
if d:
opt.append(os.path.join(d, 'lib'))
return opt
def get_libraries(self):
opt = []
opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
return opt |
def inject_inferable_lora(model, lora_path='', unet_replace_modules=['UNet3DConditionModel'], text_encoder_replace_modules=['CLIPEncoderLayer'], is_extended=False, r=16):
from transformers.models.clip import CLIPTextModel
from diffusers import UNet3DConditionModel
def is_text_model(f):
return (('text_encoder' in f) and isinstance(model.text_encoder, CLIPTextModel))
def is_unet(f):
return (('unet' in f) and (model.unet.__class__.__name__ == 'UNet3DConditionModel'))
if os.path.exists(lora_path):
try:
for f in os.listdir(lora_path):
if f.endswith('.pt'):
lora_file = os.path.join(lora_path, f)
if is_text_model(f):
monkeypatch_or_replace_lora(model.text_encoder, torch.load(lora_file), target_replace_module=text_encoder_replace_modules, r=r)
print('Successfully loaded Text Encoder LoRa.')
continue
if is_unet(f):
monkeypatch_or_replace_lora_extended(model.unet, torch.load(lora_file), target_replace_module=unet_replace_modules, r=r)
print('Successfully loaded UNET LoRa.')
continue
print("Found a .pt file, but doesn't have the correct name format. (unet.pt, text_encoder.pt)")
except Exception as e:
print(e)
print("Couldn't inject LoRA's due to an error.") |
def calc_body_body_forces_torques_python_new(bodies, r_vectors, *args, **kwargs):
Nbodies = len(bodies)
force_torque_bodies = np.zeros(((2 * len(bodies)), 3))
torque = kwargs.get('omega_one_roller')
constant_torque_counter = 0
for i in range(Nbodies):
if (bodies[i].ID == 'bacteria_constant_torque'):
rotation_matrix = bodies[i].orientation.rotation_matrix()
if (constant_torque_counter == 0):
force_torque_bodies[((2 * i) + 1)] = np.dot(rotation_matrix, torque)
constant_torque_counter = 1
else:
force_torque_bodies[((2 * i) + 1)] = (- np.dot(rotation_matrix, torque))
constant_torque_counter = 0
return force_torque_bodies |
class GcnHIVNet(HIVNet):
def make_graph_layer(self, hidden_dim, layer_idx):
return GCNConv(hidden_dim, hidden_dim) |
def update_namespace_defs(old_defs: List[List[str]], new_defs: List[List[str]]) -> List[List[str]]:
next_insert_pos = 0
for new_def in new_defs:
if (not new_def):
continue
type_and_name = get_def_type_and_name(new_def)
if (type_and_name is None):
raise Exception('Cannot find type or name of definition.')
old_def_pos = next(filter((lambda old_def: (type_and_name == get_def_type_and_name(old_def[1]))), enumerate(old_defs)), ((- 1), []))[0]
if (old_def_pos >= next_insert_pos):
old_defs = ((old_defs[:old_def_pos] + [new_def]) + old_defs[(old_def_pos + 1):])
next_insert_pos = (old_def_pos + 1)
elif (old_def_pos >= 0):
old_defs = (((old_defs[:old_def_pos] + old_defs[(old_def_pos + 1):next_insert_pos]) + [new_def]) + old_defs[next_insert_pos:])
next_insert_pos += 1
else:
old_defs = ((old_defs[:next_insert_pos] + [new_def]) + old_defs[next_insert_pos:])
next_insert_pos += 1
return old_defs |
def test_shuffle():
movieLensDataHandler = AEDataHandler('MovieLensSmall', train_data_path, validation_input_data_path, validation_output_data_path, test_input_data_path, test_output_data_path)
train_dataloader = movieLensDataHandler.get_train_dataloader(shuffle=False)
first = True
first_batch = None
for batch in train_dataloader:
if first:
first_batch = batch
first = False
first = True
for batch in train_dataloader:
if first:
comparison = (batch[0] == first_batch[0])
assert comparison.all()
break |
def _check_assert(user_ids, item_ids, user_answer, item_answer):
for (idx, item_id) in enumerate(item_ids):
assert (sorted(item_id) == sorted(item_answer[idx]))
assert (sorted(user_ids[idx]) == sorted(user_answer[idx])) |
def covariate_observer(run, intervention):
prev_state = run[max((intervention.time - 1), 0)].values()
curr_state = run[intervention.time].values()
return np.concatenate([prev_state, curr_state]) |
def MinMaxScaler(data):
numerator = (data - np.min(data, 0))
denominator = (np.max(data, 0) - np.min(data, 0))
norm_data = (numerator / (denominator + 1e-07))
return norm_data |
.skipif((environ.get('DB_URL', '') == ''), reason='Skip tests that requires database setup and sql query specified')
def test_read_sql() -> None:
db_url = environ['DB_URL']
sql = os.path.join(os.getcwd(), 'dependency_example')
lx = lineagex(sql, 'mimiciii_derived', db_url, 'mimiciii_clinical, public')
print('dependency test with database connection', lx)
lx = lineagex(sql=sql, target_schema='mimiciii_derived', search_path_schema='mimiciii_clinical, public')
print('dependency test without database connection', lx) |
def evaluate_weight(dpr_dict, bm25_dict, qrels, mode, weight_dpr, weight_bm25, measurements={'recall_1', 'recall_2', 'recall_3', 'recall_4', 'recall_5', 'recall_6', 'recall_7', 'recall_8', 'recall_9', 'recall_10', 'P_1', 'P_2', 'P_3', 'P_4', 'P_5', 'P_6', 'P_7', 'P_8', 'P_9', 'P_10'}):
output_dir2 = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/bm25_dpr_legalbert/eval/{}'.format(mode[0])
output_file = 'eval22_score_{}_{}_weight_dpr_{}_weight_bm25_{}.txt'.format(mode[0], mode[2], weight_dpr, weight_bm25)
(run, measures) = evaluate_weighting(dpr_dict, bm25_dict, qrels, output_dir2, output_file, weight_dpr, weight_bm25, measurements)
return (run, measures) |
def get_recall(sess):
goal_item = get_goal_item(sess)
retri_items = get_session_items(sess)
if (goal_item in retri_items):
return 1
else:
return 0 |
def random_frame_sampling(cfg: Dict, total_video_len: int, use_fractional_t: bool=False) -> np.ndarray:
min_time_diff = (cfg['num_frames_per_video'] - 1)
max_time_diff = min((total_video_len - 1), cfg.get('max_dist', float('inf')))
if (type(cfg.get('total_dists')) in (list, tuple)):
time_diff_range = [d for d in cfg['total_dists'] if (min_time_diff <= d <= max_time_diff)]
else:
time_diff_range = range(min_time_diff, max_time_diff)
time_diff: int = random.choice(time_diff_range)
if use_fractional_t:
offset = (random.random() * ((total_video_len - time_diff) - 1))
else:
offset = random.randint(0, ((total_video_len - time_diff) - 1))
frames_idx = [offset]
if (cfg['num_frames_per_video'] > 1):
frames_idx.append((offset + time_diff))
if (cfg['num_frames_per_video'] > 2):
frames_idx.extend([(offset + t) for t in random.sample(range(1, time_diff), k=(cfg['num_frames_per_video'] - 2))])
frames_idx = sorted(frames_idx)
return np.array(frames_idx) |
def fast_hash(obj):
_hash_state.update(obj)
result = _hash_state.intdigest()
_hash_state.reset()
return result |
class SeparableConv2d_same(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, padding=0):
super(SeparableConv2d_same, self).__init__()
self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
self.depthwise_bn = SynchronizedBatchNorm2d(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
self.pointwise_bn = SynchronizedBatchNorm2d(planes)
def forward(self, x):
x = fixed_padding(x, self.depthwise.kernel_size[0], rate=self.depthwise.dilation[0])
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
return x |
def add_indent_lines(prefix, s):
if (not s):
return prefix
prefix_len = str_visible_len(prefix)
lines = s.splitlines(True)
return ''.join(([(prefix + lines[0])] + [((' ' * prefix_len) + line) for line in lines[1:]])) |
def clean(opts):
logs = glob.glob(os.path.join(opts.ckpt_dir, '*checkpoint*'))
print(logs)
for log in logs:
with open(log, 'r') as log_f:
log_ = json.load(log_f)
for fname in log_['latest']:
fpath = os.path.join(opts.ckpt_dir, ('weights_' + fname))
assert os.path.exists(fpath), fpath
to_rm = [l for l in log_['latest'][:(- 1)] if (l != log_['current'])]
to_kp = log_['latest'][(- 1)]
for fname in to_rm:
fpath = os.path.join(opts.ckpt_dir, ('weights_' + fname))
os.unlink(fpath)
print('Removed file ', fpath)
print('Kept file ', os.path.join(opts.ckpt_dir, ('weights_' + to_kp)))
with open(log, 'w') as log_f:
log_['latest'] = [log_['latest'][(- 1)]]
log_f.write(json.dumps(log_, indent=2)) |
def my_glob(folder):
for p in [f'{folder}/*', f'{folder}/*/*', f'{folder}/*/*/*']:
for f in glob.glob(p):
(yield f) |
def test_sparray_norm():
row = np.array([0, 0, 1, 1])
col = np.array([0, 1, 2, 3])
data = np.array([4, 5, 7, 9])
test_arr = scipy.sparse.coo_array((data, (row, col)), shape=(2, 4))
test_mat = scipy.sparse.coo_matrix((data, (row, col)), shape=(2, 4))
assert_equal(spnorm(test_arr, ord=1, axis=0), np.array([4, 5, 7, 9]))
assert_equal(spnorm(test_mat, ord=1, axis=0), np.array([4, 5, 7, 9]))
assert_equal(spnorm(test_arr, ord=1, axis=1), np.array([9, 16]))
assert_equal(spnorm(test_mat, ord=1, axis=1), np.array([9, 16])) |
(frozen=True)
class GeneralInfo():
version: str
example_queries: List[Query]
all_models: List[ModelMetadata] |
class UbuntuDataUtils(object):
def __init__(self, txt_path, bert_pretrained_dir):
self.txt_path = txt_path
self._bert_tokenizer_init(bert_pretrained_dir)
def _bert_tokenizer_init(self, bert_pretrained_dir, bert_pretrained='bert-base-uncased'):
self._bert_tokenizer = tokenization_bert.BertTokenizer(vocab_file=os.path.join(os.path.join(bert_pretrained_dir, bert_pretrained), ('%s-vocab.txt' % bert_pretrained)))
print('BERT tokenizer init completes')
def read_raw_file(self, data_type):
print('Loading raw txt file...')
ubuntu_path = (self.txt_path % data_type)
with open(ubuntu_path, 'r', encoding='utf8') as fr_handle:
data = [line.strip() for line in fr_handle if (len(line.strip()) > 0)]
print(('(%s) total number of sentence : %d' % (data_type, len(data))))
return data
def make_post_training_corpus(self, data, post_training_path):
with open(post_training_path, 'w', encoding='utf-8') as fw_handle:
cnt = 0
for document in data:
dialog_data = document.split('\t')
if (dialog_data[0] == '0'):
continue
for utt in dialog_data[1:(- 1)]:
if (len(utt) == 0):
continue
fw_handle.write((utt.strip() + '\n'))
fw_handle.write('\n')
cnt += 1
def make_examples_pkl(self, data, ubuntu_pkl_path):
with open(ubuntu_pkl_path, 'ab') as pkl_handle:
for dialog in tqdm(data):
dialog_data = dialog.split('\t')
label = dialog_data[0]
utterances = []
dialog_len = []
for utt in dialog_data[1:(- 1)]:
utt_tok = self._bert_tokenizer.tokenize(utt)
utterances.append(utt_tok)
dialog_len.append(len(utt_tok))
response = self._bert_tokenizer.tokenize(dialog_data[(- 1)])
pickle.dump(InputExamples(utterances=utterances, response=response, label=int(label), seq_lengths=(dialog_len, len(response))), pkl_handle)
print(ubuntu_pkl_path, ' save completes!')
def ubuntu_manual(self):
knowledge_path = 'ubuntu_manual_knowledge.txt'
ubuntu_knowledge_dict = dict()
ubuntu_man_l = []
with open(knowledge_path, 'r', encoding='utf-8') as f_handle:
for line in f_handle:
ubuntu_man = line.strip().split('\t')
if (len(ubuntu_man) == 2):
ubuntu_knowledge_dict[ubuntu_man[0]] = ubuntu_man[1]
ubuntu_man_l.append(ubuntu_man[1])
print(ubuntu_knowledge_dict.keys())
print(len(ubuntu_knowledge_dict)) |
def get_ap_offset(expr: Expression) -> Optional[int]:
reg_and_offset = get_reg_offset(expr)
if (reg_and_offset is None):
return None
(reg, offset) = reg_and_offset
return (None if (reg != Register.AP) else offset) |
class Adadelta(Optimizer):
def __init__(self, params, lr=1.0, rho=0.9, eps=1e-06, weight_decay=0):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= rho <= 1.0)):
raise ValueError('Invalid rho value: {}'.format(rho))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, rho=rho, eps=eps, weight_decay=weight_decay)
super(Adadelta, self).__init__(params, defaults)
_grad()
def step(self, closure=None):
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
grads = []
params_with_grad = []
states = []
square_avgs = []
acc_deltas = []
(rho, eps) = (group['rho'], group['eps'])
for p in group['params']:
if (p.grad is not None):
if p.grad.is_sparse:
raise RuntimeError('Adadelta does not support sparse gradients')
grads.append(p.grad)
params_with_grad.append(p)
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['acc_delta'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
acc_deltas.append(state['acc_delta'])
state['step'] += 1
states.append(state)
if (group['weight_decay'] != 0):
torch._foreach_add_(grads, params_with_grad, alpha=group['weight_decay'])
torch._foreach_mul_(square_avgs, rho)
torch._foreach_addcmul_(square_avgs, grads, grads, value=(1 - rho))
std = torch._foreach_add(square_avgs, eps)
torch._foreach_sqrt_(std)
deltas = torch._foreach_add(acc_deltas, eps)
torch._foreach_sqrt_(deltas)
torch._foreach_div_(deltas, std)
torch._foreach_mul_(deltas, grads)
torch._foreach_add_(params_with_grad, deltas, alpha=(- group['lr']))
torch._foreach_mul_(acc_deltas, rho)
torch._foreach_addcmul_(acc_deltas, deltas, deltas, value=(1 - rho))
return loss |
def make_builder(out_file, impl, vocab_size=None):
if (impl == 'mmap'):
return MMapIndexedDatasetBuilder(out_file, dtype=best_fitting_int_dtype(vocab_size))
elif (impl == 'fasta'):
raise NotImplementedError
else:
return IndexedDatasetBuilder(out_file) |
def _pairwise_distances(embeddings, squared=False):
dot_product = torch.matmul(embeddings, embeddings.t())
square_norm = torch.diag(dot_product)
distances = ((square_norm.unsqueeze(0) - (2.0 * dot_product)) + square_norm.unsqueeze(1))
distances[(distances < 0)] = 0
if (not squared):
mask = distances.eq(0).float()
distances = (distances + (mask * 1e-16))
distances = ((1.0 - mask) * torch.sqrt(distances))
return distances |
def train_epoch_with_utterances(batches, model, randomize=True):
if randomize:
random.shuffle(batches)
progbar = get_progressbar('train ', len(batches))
progbar.start()
loss_sum = 0.0
for (i, batch) in enumerate(batches):
batch_loss = model.train_step(batch)
loss_sum += batch_loss
progbar.update(i)
progbar.finish()
total_loss = (loss_sum / len(batches))
return total_loss |
def beat_seq(ts):
beatCount = ts.numerator
beatDuration = (4 / ts.denominator)
beat_sequence = (([0] * beatCount) * int((beatDuration / 0.25)))
beat_sequence[0] += 1
medium = 0
if ((ts.numerator % 3) == 0):
medium = 3
elif ((ts.numerator % 2) == 0):
medium = 2
for idx in range(len(beat_sequence)):
if ((idx % (beatDuration / 0.25)) == 0):
beat_sequence[idx] += 1
if (((medium == 3) and ((idx % ((3 * beatDuration) / 0.25)) == 0)) or ((medium == 2) and ((idx % ((2 * beatDuration) / 0.25)) == 0))):
beat_sequence[idx] += 1
return beat_sequence |
def fuzzy_parse_action(text):
text = text.strip(' ').strip('.')
pattern = '^(\\w+)\\[(.+)\\]'
match = re.match(pattern, text)
if match:
action_type = match.group(1)
argument = match.group(2)
return (action_type, argument)
else:
return (text, '') |
def get_last_window_attn_mask(window_size, max_seq_length=800):
assert (window_size > 0)
counter = 0
causal_mask = torch.tril(torch.ones(max_seq_length, max_seq_length))
for i in range(max_seq_length):
for j in range((i + 1)):
if ((i - j) <= window_size):
causal_mask[(i, j)] = 1.0
else:
causal_mask[(i, j)] = 0
counter += 1
return causal_mask.view(1, 1, max_seq_length, max_seq_length) |
def get_model(model_name, config, is_training=True, inference_only=False, num_pass=1, num_node=1, inp=None, label=None, batch_size=None):
config_dict = dict(config.__dict__)
config_copy = json.loads(json.dumps(config_dict), object_hook=(lambda d: namedtuple('X', d.keys())(*d.values())))
key = model_name
if (batch_size is not None):
batch_size = ((batch_size // num_pass) // num_node)
log.info('Batch size is set to {}'.format(batch_size), verbose=0)
if (key not in MODEL_REGISTRY):
raise ValueError('Unknown model "{}"'.format(key))
def _get_model(*args, **kwargs):
return MODEL_REGISTRY[key](*args, **kwargs)
if (num_pass > 1):
return MultiPassModelV2(config_copy, _get_model, is_training=is_training, num_passes=num_pass, batch_size=batch_size, inp=inp, label=label)
if (num_node > 1):
assert (num_pass == 1), 'Not supported'
return MultiNodeModel(config_copy, _get_model, is_training=is_training, num_worker=num_node, inp=inp, label=label)
return _get_model(config_copy, is_training=is_training, inp=inp, label=label, inference_only=inference_only, batch_size=batch_size, apply_grad=True) |
def _get_epoch_timings():
times_itrs = gt.get_times().stamps.itrs
times = OrderedDict()
epoch_time = 0
for key in sorted(times_itrs):
time = times_itrs[key][(- 1)]
epoch_time += time
times['time/{} (s)'.format(key)] = time
times['time/epoch (s)'] = epoch_time
times['time/total (s)'] = gt.get_times().total
return times |
def process_config(args):
if (args.config_file is not None):
with open(args.config_file) as file:
raw_config = yaml.load(file, Loader=yaml.Loader)
os.makedirs(args.save_path, exist_ok=True)
shutil.copy(args.config_file, os.path.join(args.save_path, 'config.yaml'))
else:
with open(os.path.join(args.save_path, 'config.yaml')) as file:
raw_config = yaml.load(file, Loader=yaml.Loader)
with open(os.path.join(_default_config_path, 'default_config.yaml')) as file:
defaults = yaml.load(file, Loader=yaml.Loader)
dataset_default_path = os.path.join(_default_config_path, 'datasets', (raw_config['dataset'] + '.yaml'))
if os.path.exists(dataset_default_path):
with open(dataset_default_path) as file:
dataset_defaults = yaml.load(file, Loader=yaml.Loader)
meta_config = {}
config = {}
stage_config = {}
config['dataset_name'] = raw_config['dataset']
config['save_path'] = args.save_path
config['image_channels'] = dataset_defaults['image_channels']
config['image_size'] = raw_config.get('image_size', dataset_defaults.get('image_size'))
for key in ['enc_gen_fc_layers', 'dis_cla_fc_layers', 'num_workers', 'device', 'growing_dataset']:
config[key] = raw_config.get(key, dataset_defaults.get(key, defaults[key]))
config['device'] = (args.device or config['device'])
config['data_path'] = (args.data_path or raw_config.get('data_path'))
config['dataset_args'] = {}
config['dataset_args'].update(dataset_defaults.get('dataset_args', {}))
config['dataset_args'].update(raw_config.get('dataset_args', {}))
config['all_factors'] = []
for factor_defaults in dataset_defaults['factors']:
if (factor_defaults['name'] != 'unknown'):
config['all_factors'].append(factor_defaults['name'])
config['plot_config'] = raw_config.get('plot_config', [])
config['labeled_factors'] = []
config['labeled_size'] = []
config['labeled_keep_prob'] = []
config['labeled_init'] = []
if ('labeled_factors' in raw_config):
for item in raw_config['labeled_factors']:
if isinstance(item, str):
name = item
config['labeled_factors'].append(name)
for factor_defaults in dataset_defaults['factors']:
if (factor_defaults['name'] == name):
break
size = factor_defaults['size']
config['labeled_size'].append(size)
config['labeled_keep_prob'].append(_make_keep_prob(size, factor_defaults.get('dropout')))
config['labeled_init'].append(factor_defaults.get('init'))
else:
name = item['name']
config['labeled_factors'].append(name)
for factor_defaults in dataset_defaults['factors']:
if (factor_defaults['name'] == name):
break
size = item.get('size', factor_defaults['size'])
config['labeled_size'].append(size)
config['labeled_keep_prob'].append(_make_keep_prob(size, item.get('dropout', factor_defaults.get('dropout'))))
config['labeled_init'].append(item.get('init', factor_defaults.get('init')))
else:
for factor_defaults in dataset_defaults['factors']:
name = factor_defaults['name']
if (name != 'unknown'):
config['labeled_factors'].append(name)
size = factor_defaults['size']
config['labeled_size'].append(size)
config['labeled_keep_prob'].append(_make_keep_prob(size, factor_defaults.get('dropout')))
config['labeled_init'].append(factor_defaults.get('init'))
if ('unknown_size' in raw_config):
config['unknown_size'] = raw_config['unknown_size']
config['unknown_keep_prob'] = _make_keep_prob(config['unknown_size'], raw_config.get('unknown_dropout', defaults['unknown_dropout']))
else:
config['unknown_size'] = 0
unknown_keep_prob = []
for factor_defaults in dataset_defaults['factors']:
if (factor_defaults['name'] not in config['labeled_factors']):
size = factor_defaults['size']
config['unknown_size'] += size
unknown_keep_prob.extend(_make_keep_prob(size, factor_defaults.get('dropout')))
if ('unknown_dropout' in raw_config):
config['unknown_keep_prob'] = _make_keep_prob(config['unknown_size'], raw_config['unknown_dropout'])
else:
config['unknown_keep_prob'] = sorted(unknown_keep_prob, reverse=True)
config['conv_channels'] = raw_config.get('conv_channels', dataset_defaults.get('conv_channels'))
if (config['conv_channels'] is None):
num_levels = 0
size = config['image_size']
while (size >= 12):
size = (((size + 2) // 4) * 2)
num_levels += 1
if (size >= 6):
num_levels += 1
config['conv_channels'] = defaults['conv_channels'][num_levels]
else:
num_levels = len(config['conv_channels'])
config['conv_layers'] = raw_config.get('conv_layers', dataset_defaults.get('conv_layers', ([1] * num_levels)))
config['fc_features'] = raw_config.get('fc_features', dataset_defaults.get('fc_features'))
if (config['fc_features'] is None):
config['fc_features'] = defaults['fc_features'][min(num_levels, 8)]
if (config['image_size'] <= 32):
batch_size = 64
elif (config['image_size'] <= 512):
batch_size = (2048 // config['image_size'])
else:
batch_size = 4
for stage in ['stage1', 'classifier', 'stage2', 'lenc']:
stage_config[stage] = {'batch_size': batch_size}
stage_config[stage].update(defaults.get('all_stages', {}))
stage_config[stage].update(defaults.get(stage, {}))
stage_config[stage].update(dataset_defaults.get('all_stages', {}))
stage_config[stage].update(dataset_defaults.get(stage, {}))
stage_config[stage].update(raw_config.get('all_stages', {}))
stage_config[stage].update(raw_config.get(stage, {}))
if ('sample_grid_size' in raw_config):
config['sample_row'] = raw_config['sample_grid_size']
elif (config['image_size'] <= 42):
config['sample_row'] = 24
elif (config['image_size'] <= 128):
config['sample_row'] = ((512 // config['image_size']) * 2)
elif (config['image_size'] <= 256):
config['sample_row'] = 6
else:
config['sample_row'] = 4
meta_config['has_unknown'] = (config['unknown_size'] > 0)
meta_config['has_labeled'] = (len(config['labeled_factors']) > 0)
meta_config['has_lenc_stage'] = (meta_config['has_labeled'] and stage_config['stage2']['use_embedding'])
config['sample_col'] = ((config['sample_row'] // 2) if (meta_config['has_unknown'] and meta_config['has_labeled']) else config['sample_row'])
if (args.start_from is not None):
meta_config['start_stage'] = args.start_from[0]
meta_config['start_iter'] = (int(args.start_from[1]) if (len(args.start_from) > 1) else 0)
else:
meta_config['start_stage'] = None
meta_config['config'] = config
meta_config['stage_config'] = stage_config
return meta_config |
def test_list_array():
array = ak.highlevel.Array(np.arange(((3 * 5) * 2)).reshape(3, 5, 2).tolist())
assert (ak.operations.num(array, axis=0) == 3)
assert (ak.operations.num(array, axis=1).to_list() == [5, 5, 5])
assert (ak.operations.num(array, axis=2).to_list() == [[2, 2, 2, 2, 2], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2]])
with pytest.raises(ValueError) as err:
assert ak.operations.num(array, axis=3)
assert ('axis=3 exceeds the depth of this array' in str(err.value))
assert (ak.operations.num(array, axis=(- 1)).to_list() == [[2, 2, 2, 2, 2], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2]])
assert (ak.operations.num(array, axis=(- 2)).to_list() == [5, 5, 5])
assert (ak.operations.num(array, axis=(- 3)) == 3)
with pytest.raises(ValueError) as err:
assert ak.operations.num(array, axis=(- 4))
assert ('axis=-4 exceeds the depth of this array' in str(err.value)) |
_module()
class MaskRCNN(TwoStageDetector):
'Implementation of `Mask R-CNN <
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None):
super(MaskRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained) |
def _test_ndarray_2d():
n = 4
m = 7
def run(x: ti.types.ndarray(), y: ti.types.ndarray()):
for i in range(n):
for j in range(m):
x[(i, j)] += ((i + j) + y[(i, j)])
a = ti.ndarray(ti.i32, shape=(n, m))
for i in range(n):
for j in range(m):
a[(i, j)] = (i * j)
b = np.ones((n, m), dtype=np.int32)
run(a, b)
for i in range(n):
for j in range(m):
assert (a[(i, j)] == ((((i * j) + i) + j) + 1))
run(b, a)
for i in range(n):
for j in range(m):
assert (b[(i, j)] == ((i * j) + (((i + j) + 1) * 2))) |
def sitk_resample_to_image(image, reference_image, default_value=0.0, interpolator=sitk.sitkLinear, transform=None, output_pixel_type=None):
if (transform is None):
transform = sitk.Transform()
transform.SetIdentity()
if (output_pixel_type is None):
output_pixel_type = image.GetPixelID()
resample_filter = sitk.ResampleImageFilter()
resample_filter.SetInterpolator(interpolator)
resample_filter.SetTransform(transform)
resample_filter.SetOutputPixelType(output_pixel_type)
resample_filter.SetDefaultPixelValue(default_value)
resample_filter.SetReferenceImage(reference_image)
return resample_filter.Execute(image) |
class ConvLinSeq(nn.Module):
def __init__(self, input_dims, linear_hidden_dims, conv_hidden_dims, output_dim, kernel_dim, k_lipschitz, p_drop):
super().__init__()
if (k_lipschitz is not None):
k_lipschitz = (k_lipschitz ** (1.0 / 2.0))
self.convolutions = convolution_sequential(input_dims=input_dims, hidden_dims=conv_hidden_dims, output_dim=output_dim, kernel_dim=kernel_dim, k_lipschitz=k_lipschitz, p_drop=p_drop)
self.linear = linear_sequential(input_dims=[((conv_hidden_dims[(- 1)] * (input_dims[0] // (2 ** len(conv_hidden_dims)))) * (input_dims[1] // (2 ** len(conv_hidden_dims))))], hidden_dims=linear_hidden_dims, output_dim=output_dim, k_lipschitz=k_lipschitz, p_drop=p_drop)
def forward(self, input):
batch_size = input.size(0)
input = self.convolutions(input)
input = self.linear(input.view(batch_size, (- 1)))
return input |
class EncoderBlock(nn.Module):
def __init__(self, n_heads, n_dims, total_ex, total_cat, seq_len, time_width):
super(EncoderBlock, self).__init__()
self.seq_len = seq_len
self.exercise_embed = nn.Embedding(total_ex, n_dims)
self.category_embed = nn.Embedding(total_cat, n_dims)
self.position_embed = nn.Embedding(seq_len, n_dims)
self.response_embed = nn.Embedding(total_ex, n_dims)
self.elapsetime_embed = nn.Embedding(time_width, n_dims)
self.layer_norm = nn.LayerNorm(n_dims)
self.multihead = MultiHeadWithFFN(n_heads=n_heads, n_dims=n_dims)
def forward(self, input_e, category, elapse_time, response, first_block=True):
if first_block:
_exe = self.exercise_embed(input_e)
_cat = self.category_embed(category)
_etime = self.elapsetime_embed(elapse_time)
_response = self.response_embed(response)
position_encoded = pos_encode((self.seq_len - 1))
if (config.device == 'cuda'):
position_encoded = position_encoded.cuda()
_pos = self.position_embed(position_encoded)
interaction = ((((_cat + _exe) + _etime) + _response) + _pos)
else:
interaction = input_e
output = self.multihead(q_input=interaction, kv_input=interaction)
return output |
def check_string(context, obj, stacklevel=3):
if (type(obj) is not str):
warn(("'%s' requires strings, got '%s'" % (context, type(obj).__name__)), WSGIWarning) |
def trainModel(model, trainData, validData, dataset, optim):
sys.stdout.flush()
model.train()
criterion = NMTCriterion(opt.num_classes)
vocab_size = dataset['dicts']['src'].size()
start_time = time.time()
def trainEpoch(epoch):
if (opt.extra_shuffle and (epoch > opt.curriculum)):
trainData.shuffle()
batchOrder = torch.randperm(len(trainData))
(total_loss, total_words, total_num_correct) = (0, 0, 0)
(report_loss, report_tgt_words, report_src_words, report_num_correct) = (0, 0, 0, 0)
start = time.time()
for i in range(len(trainData)):
batchIdx = (batchOrder[i] if (epoch > opt.curriculum) else i)
batch = trainData[batchIdx][:(- 1)]
src = batch[0]
inp = (src[0] % vocab_size)
inp_ = torch.unsqueeze(inp, 2)
if (len(opt.gpus) >= 1):
one_hot = Variable(torch.cuda.FloatTensor(src[0].size(0), src[0].size(1), vocab_size).zero_())
else:
one_hot = Variable(torch.FloatTensor(src[0].size(0), src[0].size(1), vocab_size).zero_())
one_hot_scatt = one_hot.scatter_(2, inp_, 1)
model.zero_grad()
outputs = model(one_hot_scatt)
targets = batch[1]
(loss, gradOutput, num_correct) = memoryEfficientLoss(outputs, targets, model, criterion)
outputs.backward(gradOutput)
optim.step()
num_words = targets.size(1)
report_loss += loss
report_num_correct += num_correct
report_tgt_words += num_words
report_src_words += sum(batch[0][1])
total_loss += loss
total_num_correct += num_correct
total_words += num_words
if ((i % opt.log_interval) == ((- 1) % opt.log_interval)):
print(('Epoch %2d, %5d/%5d; acc: %6.2f; %3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed' % (epoch, (i + 1), len(trainData), ((report_num_correct / report_tgt_words) * 100), (report_src_words / (time.time() - start)), (report_tgt_words / (time.time() - start)), (time.time() - start_time))))
sys.stdout.flush()
report_loss = report_tgt_words = report_src_words = report_num_correct = 0
start = time.time()
return ((total_loss / total_words), (total_num_correct / total_words))
for epoch in range(opt.start_epoch, (opt.epochs + 1)):
print('')
(train_loss, train_acc) = trainEpoch(epoch)
print(('Train accuracy: %g' % (train_acc * 100)))
print('Train Loss: ', train_loss)
(valid_loss, valid_acc) = eval(model, criterion, validData, vocab_size)
print(('Validation accuracy: %g' % (valid_acc * 100)))
print('Validation Loss: ', valid_loss)
sys.stdout.flush()
optim.updateLearningRate(valid_loss, epoch)
model_state_dict = (model.module.state_dict() if (len(opt.gpus) > 1) else model.state_dict())
model_state_dict = {k: v for (k, v) in model_state_dict.items() if ('generator' not in k)}
checkpoint = {'model': model_state_dict, 'dicts': dataset['dicts'], 'opt': opt, 'epoch': epoch, 'optim': optim}
torch.save(checkpoint, ('%s_acc_%.2f_loss_%.2f_e%d.pt' % (opt.save_model, (100 * valid_acc), valid_loss, epoch))) |
class SPN(nn.Module):
def __init__(self, nf=32, spn=1):
super(SPN, self).__init__()
self.mask_conv = nn.Conv2d(3, nf, 3, 1, 1)
self.encoder = VGG(nf)
self.decoder = Decoder(nf, spn)
self.left_right = spn_block(True, False)
self.right_left = spn_block(True, True)
self.top_down = spn_block(False, False)
self.down_top = spn_block(False, True)
self.post = nn.Conv2d(nf, 3, 3, 1, 1)
self.nf = nf
def forward(self, x, rgb):
X = self.mask_conv(x)
features = self.encoder(rgb)
guide = self.decoder(features)
G = torch.split(guide, self.nf, 1)
out1 = self.left_right(X, G[0], G[1], G[2])
out2 = self.right_left(X, G[3], G[4], G[5])
out3 = self.top_down(X, G[6], G[7], G[8])
out4 = self.down_top(X, G[9], G[10], G[11])
out = torch.max(out1, out2)
out = torch.max(out, out3)
out = torch.max(out, out4)
return self.post(out) |
def keep_t_if_possible_handler(info, t):
if (info.graph is info.graph_):
return t
else:
return replace_t_with_placeholder_handler(info, t) |
def bias_variable(shape):
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial) |
class BuiltinScope(Scope):
is_builtin_scope = True
def __init__(self):
if (Options.pre_import is None):
Scope.__init__(self, '__builtin__', None, None)
else:
Scope.__init__(self, '__builtin__', PreImportScope(), None)
self.type_names = {}
for (name, definition) in sorted(self.builtin_entries.items()):
(cname, type) = definition
self.declare_var(name, type, None, cname)
def lookup(self, name, language_level=None, str_is_str=None):
if (name == 'str'):
if (str_is_str is None):
str_is_str = (language_level in (None, 2))
if (not str_is_str):
name = 'unicode'
return Scope.lookup(self, name)
def declare_builtin(self, name, pos):
if (not hasattr(builtins, name)):
if (self.outer_scope is not None):
return self.outer_scope.declare_builtin(name, pos)
elif Options.error_on_unknown_names:
error(pos, ('undeclared name not builtin: %s' % name))
else:
warning(pos, ('undeclared name not builtin: %s' % name), 2)
def declare_builtin_cfunction(self, name, type, cname, python_equiv=None, utility_code=None):
name = EncodedString(name)
entry = self.declare_cfunction(name, type, None, cname, visibility='extern', utility_code=utility_code)
if python_equiv:
if (python_equiv == '*'):
python_equiv = name
else:
python_equiv = EncodedString(python_equiv)
var_entry = Entry(python_equiv, python_equiv, py_object_type)
var_entry.qualified_name = self.qualify_name(name)
var_entry.is_variable = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
var_entry.scope = entry.scope
entry.as_variable = var_entry
return entry
def declare_builtin_type(self, name, cname, utility_code=None, objstruct_cname=None):
name = EncodedString(name)
type = PyrexTypes.BuiltinObjectType(name, cname, objstruct_cname)
scope = CClassScope(name, outer_scope=None, visibility='extern')
scope.directives = {}
if (name == 'bool'):
type.is_final_type = True
type.set_scope(scope)
self.type_names[name] = 1
entry = self.declare_type(name, type, None, visibility='extern')
entry.utility_code = utility_code
var_entry = Entry(name=entry.name, type=self.lookup('type').type, pos=entry.pos, cname=entry.type.typeptr_cname)
var_entry.qualified_name = self.qualify_name(name)
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
var_entry.scope = self
if Options.cache_builtins:
var_entry.is_const = True
entry.as_variable = var_entry
return type
def builtin_scope(self):
return self
builtin_entries = {'type': ['((PyObject*)&PyType_Type)', py_object_type], 'bool': ['((PyObject*)&PyBool_Type)', py_object_type], 'int': ['((PyObject*)&PyInt_Type)', py_object_type], 'long': ['((PyObject*)&PyLong_Type)', py_object_type], 'float': ['((PyObject*)&PyFloat_Type)', py_object_type], 'complex': ['((PyObject*)&PyComplex_Type)', py_object_type], 'bytes': ['((PyObject*)&PyBytes_Type)', py_object_type], 'bytearray': ['((PyObject*)&PyByteArray_Type)', py_object_type], 'str': ['((PyObject*)&PyString_Type)', py_object_type], 'unicode': ['((PyObject*)&PyUnicode_Type)', py_object_type], 'tuple': ['((PyObject*)&PyTuple_Type)', py_object_type], 'list': ['((PyObject*)&PyList_Type)', py_object_type], 'dict': ['((PyObject*)&PyDict_Type)', py_object_type], 'set': ['((PyObject*)&PySet_Type)', py_object_type], 'frozenset': ['((PyObject*)&PyFrozenSet_Type)', py_object_type], 'slice': ['((PyObject*)&PySlice_Type)', py_object_type], 'None': ['Py_None', py_object_type], 'False': ['Py_False', py_object_type], 'True': ['Py_True', py_object_type]} |
def make_compute(sdfg, state):
A_pipe_in = state.add_read('A_pipe')
A_pipe_out = state.add_write('A_pipe')
B_pipe_in = state.add_read('B_pipe')
B_pipe_out = state.add_write('B_pipe')
C_pipe_in = state.add_read('C_pipe')
C_pipe_out = state.add_write('C_pipe')
(entry_n0, exit_n0) = state.add_map('n0', {'n0': '0:N/P'}, schedule=dace.ScheduleType.FPGA_Device)
(entry_k, exit_k) = state.add_map('k', {'k': '0:K'}, schedule=dace.ScheduleType.FPGA_Device)
(entry_a, exit_a) = state.add_map('buffer_A', {'n1': '0:P'}, schedule=dace.ScheduleType.FPGA_Device)
(entry_m, exit_m) = state.add_map('m', {'m': '0:M'}, schedule=dace.ScheduleType.FPGA_Device)
(entry_c, exit_c) = state.add_map('write_C', {'n1': '0:P', 'm': '0:M'}, schedule=dace.ScheduleType.FPGA_Device)
sdfg.add_scalar('A_reg', dtype=dace.float32, transient=True, storage=dace.dtypes.StorageType.FPGA_Registers)
A_reg = state.add_write('A_reg')
sdfg.add_array('C_buffer', [M], dtype=dace.float32, transient=True, storage=dace.dtypes.StorageType.FPGA_Local)
C_buffer_in = state.add_read('C_buffer')
C_buffer_out = state.add_write('C_buffer')
buffer_a_tasklet = state.add_tasklet('buffer_a', {'a_in'}, {'a_reg', 'a_out'}, 'if n1 == P - p - 1:\n a_reg = a_in\nif p < P - 1:\n a_out = a_in')
state.add_memlet_path(A_pipe_in, entry_n0, entry_k, entry_a, buffer_a_tasklet, memlet=dace.Memlet('A_pipe[p]', dynamic=False), dst_conn='a_in')
state.add_memlet_path(buffer_a_tasklet, exit_a, A_reg, memlet=dace.Memlet('A_reg[0]', dynamic=True), src_conn='a_reg')
state.add_memlet_path(buffer_a_tasklet, exit_a, exit_k, exit_n0, A_pipe_out, memlet=dace.Memlet('A_pipe[p + 1]', dynamic=True), src_conn='a_out')
compute_tasklet = state.add_tasklet('multiply_add', {'a_in', 'b_in', 'c_in'}, {'b_out', 'c_out'}, 'c_prev = 0 if k == 0 else c_in\nc_out = c_prev + a_in * b_in\nif p < P - 1:\n b_out = b_in')
state.add_memlet_path(A_reg, entry_m, compute_tasklet, dst_conn='a_in', memlet=dace.Memlet('A_reg[0]'))
state.add_memlet_path(B_pipe_in, entry_n0, entry_k, entry_m, compute_tasklet, memlet=dace.Memlet('B_pipe[p]', dynamic=False), dst_conn='b_in')
state.add_memlet_path(compute_tasklet, exit_m, exit_k, exit_n0, B_pipe_out, memlet=dace.Memlet('B_pipe[p + 1]', dynamic=True), src_conn='b_out')
state.add_memlet_path(C_buffer_in, entry_k, entry_m, compute_tasklet, dst_conn='c_in', memlet=dace.Memlet('C_buffer[m]'))
state.add_memlet_path(entry_n0, C_buffer_in, memlet=dace.Memlet())
state.add_memlet_path(compute_tasklet, exit_m, exit_k, C_buffer_out, memlet=dace.Memlet('C_buffer[m]'), src_conn='c_out')
state.add_memlet_path(C_buffer_out, exit_n0, memlet=dace.Memlet())
write_c_tasklet = state.add_tasklet('write_c', {'buffer_in', 'forward_in'}, {'c_out'}, 'if n1 <= p:\n c_out = forward_in if p > 0 and n1 > 0 else buffer_in')
state.add_memlet_path(C_buffer_out, entry_c, write_c_tasklet, memlet=dace.Memlet('C_buffer[m]', dynamic=True), dst_conn='buffer_in')
state.add_memlet_path(C_pipe_in, entry_n0, entry_c, write_c_tasklet, memlet=dace.Memlet('C_pipe[p-1]', dynamic=True), dst_conn='forward_in')
state.add_memlet_path(write_c_tasklet, exit_c, exit_n0, C_pipe_out, memlet=dace.Memlet('C_pipe[p]', dynamic=True), src_conn='c_out')
(compute_entry, compute_exit) = state.add_map('unroll_compute', {'p': '0:P'}, schedule=dace.ScheduleType.FPGA_Device, unroll=True)
state.add_memlet_path(compute_entry, A_pipe_in, memlet=dace.memlet.Memlet())
state.add_memlet_path(compute_entry, B_pipe_in, memlet=dace.memlet.Memlet())
state.add_memlet_path(compute_entry, C_pipe_in, memlet=dace.memlet.Memlet())
state.add_memlet_path(A_pipe_out, compute_exit, memlet=dace.memlet.Memlet())
state.add_memlet_path(B_pipe_out, compute_exit, memlet=dace.memlet.Memlet())
state.add_memlet_path(C_pipe_out, compute_exit, memlet=dace.memlet.Memlet()) |
class DeepSVDDConf(DetectorConfig):
_default_transform = MeanVarNormalize()
def __init__(self, net_name='merlion', xp_path='./results/deepsvdd', load_model='./results/deepsvdd/deepsvdd.pkl', objective='one-class', nu=0.1, device='cpu', seed=(- 1), optimizer_name='adam', lr=0.001, n_epochs=300, lr_milestone=None, batch_size=32, weight_decay=0.001, pretrain=True, ae_optimizer_name='adam', ae_lr=0.001, ae_n_epochs=300, ae_lr_milestone=None, ae_batch_size=32, ae_weight_decay=1e-06, n_jobs_dataloader=0, normal_class=0, input_channels=None, final_out_channels=None, sequence_length=8, n_layers=3, dropout=0.1, hidden_size=5, kernel_size=3, stride=1, **kwargs):
super(DeepSVDDConf, self).__init__(**kwargs) |
def main(in_directory, out_directory, short_name):
phrases = get_tokenized_phrases(in_directory)
os.makedirs(out_directory, exist_ok=True)
out_filename = os.path.join(out_directory, ('%s.train.json' % short_name))
process_utils.write_list(out_filename, phrases) |
class ATSDmat(SpectralMatrix):
def assemble(self, method):
(test, trial) = (self.testfunction, self.trialfunction)
assert isinstance(test[0], T)
assert isinstance(trial[0], SD)
N = test[0].N
k = np.arange(N, dtype=float)
self._keyscale = 1
def _getkey(j):
if (j == 0):
return (((((- np.pi) / 2) * k[2:]) * ((k[2:] ** 2) - (k[:(- 2)] ** 2))) * self._keyscale)
return (((((k[j:(- 2)] * ((k[j:(- 2)] ** 2) - (k[:(- (j + 2))] ** 2))) - (k[(j + 2):] * ((k[(j + 2):] ** 2) - (k[:(- (j + 2))] ** 2)))) * np.pi) / 2.0) * self._keyscale)
d = dict.fromkeys(np.arange(0, (N - 2), 2), _getkey)
return d |
class EvalConfig():
config = attr.ib()
config_args = attr.ib()
logdir = attr.ib()
section = attr.ib()
inferred = attr.ib()
output = attr.ib() |
def random_uniform(dims: Sequence[Dim], *, dtype: Optional[str]=None, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None, minval: Union[(int, float, Tensor)]=0, maxval: Union[(int, float, Tensor)]=1, seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]]=None, algorithm: Optional[str]=None, explicit_state: Optional[Tensor]=None, auto_update_state: Optional[bool]=None, static: Optional[bool]=None, out: Optional[Tensor]=None):
return random(dims=dims, dtype=dtype, device=device, sparse_dim=sparse_dim, feature_dim=feature_dim, distribution='uniform', minval=minval, maxval=maxval, seed=seed, algorithm=algorithm, explicit_state=explicit_state, auto_update_state=auto_update_state, static=static, out=out) |
.parametrize('name', sorted(PARAMETERS))
.filterwarnings('ignore:.*method is good for exploring strategies.*')
def test_get_examples(name, swagger_20):
if (name == 'body'):
example = expected = {'name': 'John'}
media_type = 'application/json'
cls = PayloadAlternatives
else:
example = 'John'
expected = {'name': example}
media_type = None
cls = ParameterSet
operation = make_operation(swagger_20, **{name: cls([OpenAPI20Parameter({'in': name, 'name': 'name', 'required': True, 'type': 'string', 'x-example': example})])})
strategies = operation.get_strategies_from_examples()
assert (len(strategies) == 1)
assert (strategies[0].example() == Case(operation, data_generation_method=DataGenerationMethod.positive, media_type=media_type, **{name: expected})) |
def extract_features(number, audio_features, targets, path):
global max_len, min_len
if (not os.path.exists(os.path.join(prefix, '{1}/{0}/positive_out.wav'.format(number, path)))):
return
positive_file = wave.open(os.path.join(prefix, '{1}/{0}/positive_out.wav'.format(number, path)))
sr1 = positive_file.getframerate()
nframes1 = positive_file.getnframes()
wave_data1 = np.frombuffer(positive_file.readframes(nframes1), dtype=np.short).astype(np.float)
len1 = (nframes1 / sr1)
neutral_file = wave.open(os.path.join(prefix, '{1}/{0}/neutral_out.wav'.format(number, path)))
sr2 = neutral_file.getframerate()
nframes2 = neutral_file.getnframes()
wave_data2 = np.frombuffer(neutral_file.readframes(nframes2), dtype=np.short).astype(np.float)
len2 = (nframes2 / sr2)
negative_file = wave.open(os.path.join(prefix, '{1}/{0}/negative_out.wav'.format(number, path)))
sr3 = negative_file.getframerate()
nframes3 = negative_file.getnframes()
wave_data3 = np.frombuffer(negative_file.readframes(nframes3), dtype=np.short).astype(np.float)
len3 = (nframes3 / sr3)
for l in [len1, len2, len3]:
if (l > max_len):
max_len = l
if (l < min_len):
min_len = l
with open(os.path.join(prefix, '{1}/{0}/new_label.txt'.format(number, path))) as fli:
target = float(fli.readline())
if (wave_data1.shape[0] < 1):
wave_data1 = np.array((([0.0001] * sr1) * 5))
if (wave_data2.shape[0] < 1):
wave_data2 = np.array((([0.0001] * sr2) * 5))
if (wave_data3.shape[0] < 1):
wave_data3 = np.array((([0.0001] * sr3) * 5))
audio_features.append([wav2vlad(wave_data1, sr1), wav2vlad(wave_data2, sr2), wav2vlad(wave_data3, sr3)])
targets.append(target) |
def hardness_metric(batch, num_classes):
if (('train' not in batch) and ('support' not in batch)):
raise ValueError('The tasks do not contain any training/support set. Make sure the tasks contain either the "train" or the "support" key.')
if (('test' not in batch) and ('query' not in batch)):
raise ValueError('The tasks do not contain any test/query set. Make sure the tasks contain either the "test" of the "query" key.')
train = ('train' if ('train' in batch) else 'support')
test = ('test' if ('test' in batch) else 'query')
with torch.no_grad():
backbone = torch.hub.load('pytorch/vision:v0.5.0', 'resnet152', pretrained=True, verbose=False)
backbone.eval()
(train_inputs, train_targets) = batch[train]
(test_inputs, test_targets) = batch[test]
(batch_size, num_images, num_channels) = train_inputs.shape[:3]
num_test_images = test_inputs.size(1)
backbone.to(device=train_inputs.device)
if (num_channels != 3):
raise ValueError('The images must be RGB images.')
padded_train_inputs = _pad_images(train_inputs, size=(224, 224), mode='constant', value=0.0)
padded_test_inputs = _pad_images(test_inputs, size=(224, 224), mode='constant', value=0.0)
train_logits = backbone(padded_train_inputs.view((- 1), 3, 224, 224))
train_logits = F.relu(train_logits.view(batch_size, num_images, (- 1)))
train_features = get_prototypes(train_logits, train_targets, num_classes)
weights = F.normalize(train_features, p=2, dim=2)
test_logits = backbone(padded_test_inputs.view((- 1), 3, 224, 224))
test_logits = test_logits.view(batch_size, num_test_images, (- 1))
test_logits = F.normalize(test_logits, p=2, dim=2)
test_logits = torch.bmm(weights, test_logits.transpose(1, 2))
test_log_probas = (- F.cross_entropy(test_logits, test_targets, reduction='none'))
log_odds_ratios = (torch.log1p((- test_log_probas.exp())) - test_log_probas)
return torch.mean(log_odds_ratios, dim=1) |
def main(args):
env = gym.make('LunarLander-v2', render_mode='rgb_array')
env = gym.wrappers.RecordVideo(env, args.video_folder)
env.reset()
for _ in range(MAX_STEPS):
img = env.render()
random.shuffle(LUNAR_LANDER_OPTIONS)
options_str = ', '.join(LUNAR_LANDER_OPTIONS)
img_fn = os.path.join('/tmp', 'frame.jpg')
messages = [{'role': ROLE_USER, 'content': f'''<image>
You are playing lunar lander. The goal is to land the craft between the yellow flags. What is the optimal next action? {options_str}'''}]
Image.fromarray(img).save(img_fn)
example = {'images': [img_fn], 'messages': messages}
output = requests.post(args.server_endpoint, json=example).json()['output']
print(('> ' + output))
if (output == '[FIRE LEFT ENGINE]'):
action = 1
elif (output == '[FIRE MAIN ENGINE]'):
action = 2
elif (output == '[FIRE RIGHT ENGINE]'):
action = 3
else:
action = 0
(observation, reward, terminated, truncated, info) = env.step(action)
if (terminated or truncated):
break |
class TestBuildCommand():
def setup(self):
self.test_subclasses = [BuildCommandTestImpl]
self.build_command_subclasses_orig = BuildCommand.__subclasses__
BuildCommand._get_implementations = (lambda *_: self.test_subclasses)
self.logger = logging.getLogger('test')
def teardown(self):
BuildCommand._get_implementations = self.build_command_subclasses_orig
def test_get_correct_command(self):
actual = BuildCommand.create(BuildCommandTestImpl.TEST_COMMAND)
assert_is_instance(actual, BuildCommandTestImpl)
def test_generic_command_returns_base_implementation(self):
uut = BuildCommand.create('-some_command-')
assert_is_instance(uut, BuildCommand)
assert_equals('-some_command-', uut._name)
def test_has_no_dependencies_by_default(self):
uut = BuildCommand.create('-some_command-')
assert (not uut._get_dependencies('-output-', '-project_dir-', self.logger))
def test_raise_on_multiple_matches(self):
duplicate_command = BuildCommandTestImpl.TEST_COMMAND
self.test_subclasses = [BuildCommandTestImpl, BuildCommandTestImpl]
assert_raises(ValueError, BuildCommand.create, duplicate_command)
('data.build_command.Shell.exec')
def test_execute_runs_command(self, shell_mock):
uut = BuildCommand.create('-command-')
uut.execute('-project_dir-', self.logger)
shell_mock.assert_called_with('-command-', cwd='-project_dir-', logger=ANY)
('data.build_command.Shell.exec')
def test_execute_runs_command_with_args(self, shell_mock):
command = '-command- -arg- --arg--'
uut = BuildCommand.create(command)
uut.execute('-project_dir-', self.logger)
shell_mock.assert_called_with(command, cwd='-project_dir-', logger=ANY)
def test_saves_args(self):
uut = BuildCommand.create('-some_command- arg1 arg2')
assert_equals(['arg1', 'arg2'], uut.args)
def test_quoted_args_are_saved_without_quotes(self):
uut = BuildCommand.create('-some_command- \'arg 1\' "arg 2"')
assert_equals(['arg 1', 'arg 2'], uut.args)
('data.build_command.Shell.exec')
def test_quotes_args_for_execution(self, shell_mock):
uut = BuildCommand.create("-command- 'arg 1'")
uut.execute('-pdir-', self.logger)
shell_mock.assert_called_with("-command- 'arg 1'", cwd='-pdir-', logger=ANY)
('data.build_command.Shell.exec')
def test_raises_on_error(self, shell_mock):
shell_mock.side_effect = CommandFailedError('-command-', '-output-')
uut = BuildCommand.create('-some_command-')
assert_raises(CommandFailedError, uut.execute, '-p-', self.logger)
('data.build_command.Shell.exec')
def test_default_does_not_filter_output_on_error(self, shell_mock):
shell_mock.side_effect = CommandFailedError('-command-', '-output-')
uut = BuildCommand.create('-some_command-')
try:
uut.execute('-project_dir-', self.logger)
except CommandFailedError as e:
assert_equals('\n-output-', e.output) |
def _is_fromfile_compatible(stream):
if (sys.version_info[0] < 3):
return True
bad_cls = []
try:
import gzip
bad_cls.append(gzip.GzipFile)
except ImportError:
pass
try:
import bz2
bad_cls.append(bz2.BZ2File)
except ImportError:
pass
bad_cls = tuple(bad_cls)
return (not isinstance(stream, bad_cls)) |
def test_clobber():
for func_input_type in img_funcs:
for func_output_type in img_funcs:
img = np.random.rand(5, 5)
img_in = func_input_type(img)
img_in_before = img_in.copy()
func_output_type(img_in)
assert_equal(img_in, img_in_before) |
def check_type(obj, expected_type, logger):
if (not isinstance(obj, expected_type)):
exception = TypeError(f'Expected type {type(obj)}, got type {str(expected_type)}')
logger.exception(repr(exception))
raise exception |
def noelse(A: dace.float32[1]):
if (A[0] > 0):
def mytask():
(o >> A[0])
o = 5 |
_module()
class DRIVEDataset(CustomDataset):
CLASSES = ('background', 'vessel')
PALETTE = [[120, 120, 120], [6, 230, 230]]
def __init__(self, **kwargs):
super(DRIVEDataset, self).__init__(img_suffix='.png', seg_map_suffix='_manual1.png', reduce_zero_label=False, **kwargs)
assert osp.exists(self.img_dir) |
class TBVisualizer(object):
def __init__(self, opt):
self._opt = opt
self._save_path = os.path.join(opt.checkpoints_dir, opt.name)
self._log_path = os.path.join(self._save_path, 'loss_log2.txt')
self._tb_path = os.path.join(self._save_path, 'summary.json')
self._writer = SummaryWriter(self._save_path)
with open(self._log_path, 'a') as log_file:
now = time.strftime('%c')
log_file.write((' Training Loss (%s) \n' % now))
def __del__(self):
self._writer.close()
def display_current_results(self, visuals, it, is_train, save_visuals=False):
for (label, image_numpy) in visuals.items():
sum_name = '{}/{}'.format(('Train' if is_train else 'Test'), label)
self._writer.add_image(sum_name, image_numpy, it)
if save_visuals:
util.save_image(image_numpy, os.path.join(self._opt.checkpoints_dir, self._opt.name, 'event_imgs', sum_name, ('%08d.png' % it)))
self._writer.export_scalars_to_json(self._tb_path)
def plot_scalars(self, scalars, it, is_train):
for (label, scalar) in scalars.items():
sum_name = '{}/{}'.format(('Train' if is_train else 'Test'), label)
self._writer.add_scalar(sum_name, scalar, it)
def print_current_train_errors(self, epoch, i, iters_per_epoch, errors, t, visuals_were_stored):
log_time = time.strftime('[%d/%m/%Y %H:%M:%S]')
visuals_info = ('v' if visuals_were_stored else '')
message = ('%s (T%s, epoch: %d, it: %d/%d, t/smpl: %.3fs)\n' % (log_time, visuals_info, epoch, i, iters_per_epoch, t))
for (k, v) in errors.items():
msg = ('\t%s:%.3f\n' % (k, v))
message += msg
print(message)
with open(self._log_path, 'a') as log_file:
log_file.write(('%s\n' % message))
def print_current_validate_errors(self, epoch, errors, t):
log_time = time.strftime('[%d/%m/%Y %H:%M:%S]')
message = ('%s (V, epoch: %d, time_to_val: %ds)\n' % (log_time, epoch, t))
for (k, v) in errors.items():
message += ('\t%s:%.3f\n' % (k, v))
print(message)
with open(self._log_path, 'a') as log_file:
log_file.write(('%s\n' % message))
def save_images(self, visuals):
for (label, image_numpy) in visuals.items():
image_name = ('%s.png' % label)
save_path = os.path.join(self._save_path, 'samples', image_name)
util.save_image(image_numpy, save_path) |
.parametrize('n, user_answer, item_answer', [(5, [[], [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]], [[], [1, 2, 3, 4, 5, 1, 2, 3, 9, 10, 1, 5, 3, 1, 2]])])
.parametrize('dataset_type', [pytest.param('spark_dataframe_test', marks=pytest.mark.spark), pytest.param('pandas_dataframe_test', marks=pytest.mark.core)])
def test_last_n_interactions_splitter_without_drops(n, user_answer, item_answer, dataset_type, request):
dataframe = request.getfixturevalue(dataset_type)
filtered_dataframe = LastNSplitter(N=n, divide_column='user_id', query_column='user_id', strategy='interactions', drop_cold_users=False, drop_cold_items=False).split(dataframe)
if (dataset_type == 'pandas_dataframe_test'):
item_ids = _get_column_list_pandas(filtered_dataframe, 'item_id')
user_ids = _get_column_list_pandas(filtered_dataframe, 'user_id')
else:
item_ids = _get_column_list(filtered_dataframe, 'item_id')
user_ids = _get_column_list(filtered_dataframe, 'user_id')
_check_assert(user_ids, item_ids, user_answer, item_answer) |
class MarkupLMTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = MarkupLMTokenizer
def __init__(self, vocab_file, merges_file, tags_dict, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, max_depth=50, max_width=1000, pad_width=1001, pad_token_label=(- 100), only_label_first_subword=True, trim_offsets=False, **kwargs):
super().__init__(vocab_file=vocab_file, merges_file=merges_file, tags_dict=tags_dict, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, max_depth=max_depth, max_width=max_width, pad_width=pad_width, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs)
if trim_offsets:
raise NotImplementedError('`trim_offsets=True` is not implemented for MarkupLMTokenizerFast. Please set it to False.')
self.tags_dict = tags_dict
pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if (pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space):
pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type'))
pre_tok_state['add_prefix_space'] = add_prefix_space
self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
self.add_prefix_space = add_prefix_space
tokenizer_component = 'post_processor'
tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
if tokenizer_component_instance:
state = json.loads(tokenizer_component_instance.__getstate__())
if ('sep' in state):
state['sep'] = tuple(state['sep'])
if ('cls' in state):
state['cls'] = tuple(state['cls'])
changes_to_apply = False
if (state.get('add_prefix_space', add_prefix_space) != add_prefix_space):
state['add_prefix_space'] = add_prefix_space
changes_to_apply = True
if changes_to_apply:
component_class = getattr(processors, state.pop('type'))
new_value = component_class(**state)
setattr(self.backend_tokenizer, tokenizer_component, new_value)
self.max_depth = max_depth
self.max_width = max_width
self.pad_width = pad_width
self.unk_tag_id = len(self.tags_dict)
self.pad_tag_id = (self.unk_tag_id + 1)
self.pad_xpath_tags_seq = ([self.pad_tag_id] * self.max_depth)
self.pad_xpath_subs_seq = ([self.pad_width] * self.max_depth)
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
def get_xpath_seq(self, xpath):
xpath_tags_list = []
xpath_subs_list = []
xpath_units = xpath.split('/')
for unit in xpath_units:
if (not unit.strip()):
continue
name_subs = unit.strip().split('[')
tag_name = name_subs[0]
sub = (0 if (len(name_subs) == 1) else int(name_subs[1][:(- 1)]))
xpath_tags_list.append(self.tags_dict.get(tag_name, self.unk_tag_id))
xpath_subs_list.append(min(self.max_width, sub))
xpath_tags_list = xpath_tags_list[:self.max_depth]
xpath_subs_list = xpath_subs_list[:self.max_depth]
xpath_tags_list += ([self.pad_tag_id] * (self.max_depth - len(xpath_tags_list)))
xpath_subs_list += ([self.pad_width] * (self.max_depth - len(xpath_subs_list)))
return (xpath_tags_list, xpath_subs_list)
_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])], text_pair: Optional[Union[(PreTokenizedInput, List[PreTokenizedInput])]]=None, xpaths: Union[(List[List[int]], List[List[List[int]]])]=None, node_labels: Optional[Union[(List[int], List[List[int]])]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
def _is_valid_text_input(t):
if isinstance(t, str):
return True
elif isinstance(t, (list, tuple)):
if (len(t) == 0):
return True
elif isinstance(t[0], str):
return True
elif isinstance(t[0], (list, tuple)):
return ((len(t[0]) == 0) or isinstance(t[0][0], str))
else:
return False
else:
return False
if (text_pair is not None):
if (not _is_valid_text_input(text)):
raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ')
if (not isinstance(text_pair, (list, tuple))):
raise ValueError('Nodes must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')
elif (not isinstance(text, (list, tuple))):
raise ValueError('Nodes must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')
if (text_pair is not None):
is_batched = isinstance(text, (list, tuple))
else:
is_batched = (isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)))
nodes = (text if (text_pair is None) else text_pair)
assert (xpaths is not None), 'You must provide corresponding xpaths'
if is_batched:
assert (len(nodes) == len(xpaths)), 'You must provide nodes and xpaths for an equal amount of examples'
for (nodes_example, xpaths_example) in zip(nodes, xpaths):
assert (len(nodes_example) == len(xpaths_example)), 'You must provide as many nodes as there are xpaths'
else:
assert (len(nodes) == len(xpaths)), 'You must provide as many nodes as there are xpaths'
if is_batched:
if ((text_pair is not None) and (len(text) != len(text_pair))):
raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')
batch_text_or_text_pairs = (list(zip(text, text_pair)) if (text_pair is not None) else text)
is_pair = bool((text_pair is not None))
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[(List[TextInput], List[TextInputPair], List[PreTokenizedInput])], is_pair: bool=None, xpaths: Optional[List[List[List[int]]]]=None, node_labels: Optional[Union[(List[int], List[List[int]])]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
(padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> List[str]:
batched_input = ([(text, pair)] if pair else [text])
encodings = self._tokenizer.encode_batch(batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs)
return encodings[0].tokens
_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[(TextInput, PreTokenizedInput)], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[List[List[int]]]=None, node_labels: Optional[List[int]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
(padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._encode_plus(text=text, xpaths=xpaths, text_pair=text_pair, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[(List[TextInput], List[TextInputPair], List[PreTokenizedInput])], is_pair: bool=None, xpaths: Optional[List[List[List[int]]]]=None, node_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
if (not isinstance(batch_text_or_text_pairs, list)):
raise TypeError(f'batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})')
self.set_truncation_and_padding(padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of)
if is_pair:
batch_text_or_text_pairs = [([text], text_pair) for (text, text_pair) in batch_text_or_text_pairs]
encodings = self._tokenizer.encode_batch(batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True)
tokens_and_encodings = [self._convert_encoding(encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=(True if (node_labels is not None) else return_offsets_mapping), return_length=return_length, verbose=verbose) for encoding in encodings]
sanitized_tokens = {}
for key in tokens_and_encodings[0][0].keys():
stack = [e for (item, _) in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for (_, item) in tokens_and_encodings for e in item]
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for (i, (toks, _)) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += ([i] * len(toks['input_ids']))
sanitized_tokens['overflow_to_sample_mapping'] = overflow_to_sample_mapping
for input_ids in sanitized_tokens['input_ids']:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
xpath_tags_seq = []
xpath_subs_seq = []
for batch_index in range(len(sanitized_tokens['input_ids'])):
if return_overflowing_tokens:
original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]
else:
original_index = batch_index
xpath_tags_seq_example = []
xpath_subs_seq_example = []
for (id, sequence_id, word_id) in zip(sanitized_tokens['input_ids'][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids):
if (word_id is not None):
if (is_pair and (sequence_id == 0)):
xpath_tags_seq_example.append(self.pad_xpath_tags_seq)
xpath_subs_seq_example.append(self.pad_xpath_subs_seq)
else:
(xpath_tags_list, xpath_subs_list) = self.get_xpath_seq(xpaths[original_index][word_id])
xpath_tags_seq_example.extend([xpath_tags_list])
xpath_subs_seq_example.extend([xpath_subs_list])
elif (id in [self.cls_token_id, self.sep_token_id, self.pad_token_id]):
xpath_tags_seq_example.append(self.pad_xpath_tags_seq)
xpath_subs_seq_example.append(self.pad_xpath_subs_seq)
else:
raise ValueError('Id not recognized')
xpath_tags_seq.append(xpath_tags_seq_example)
xpath_subs_seq.append(xpath_subs_seq_example)
sanitized_tokens['xpath_tags_seq'] = xpath_tags_seq
sanitized_tokens['xpath_subs_seq'] = xpath_subs_seq
if (node_labels is not None):
labels = []
for batch_index in range(len(sanitized_tokens['input_ids'])):
if return_overflowing_tokens:
original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]
else:
original_index = batch_index
labels_example = []
for (id, offset, word_id) in zip(sanitized_tokens['input_ids'][batch_index], sanitized_tokens['offset_mapping'][batch_index], sanitized_encodings[batch_index].word_ids):
if (word_id is not None):
if self.only_label_first_subword:
if (offset[0] == 0):
labels_example.append(node_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
else:
labels_example.append(node_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
labels.append(labels_example)
sanitized_tokens['labels'] = labels
if (not return_offsets_mapping):
del sanitized_tokens['offset_mapping']
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
def _encode_plus(self, text: Union[(TextInput, PreTokenizedInput)], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[List[List[int]]]=None, node_labels: Optional[List[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
batched_input = ([(text, text_pair)] if text_pair else [text])
batched_xpaths = [xpaths]
batched_node_labels = ([node_labels] if (node_labels is not None) else None)
batched_output = self._batch_encode_plus(batched_input, is_pair=bool((text_pair is not None)), xpaths=batched_xpaths, node_labels=batched_node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
if ((return_tensors is None) and (not return_overflowing_tokens)):
batched_output = BatchEncoding({key: (value[0] if ((len(value) > 0) and isinstance(value[0], list)) else value) for (key, value) in batched_output.items()}, batched_output.encodings)
self._eventual_warn_about_too_long_sequence(batched_output['input_ids'], max_length, verbose)
return batched_output
def _pad(self, encoded_inputs: Union[(Dict[(str, EncodedInput)], BatchEncoding)], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None) -> dict:
if (return_attention_mask is None):
return_attention_mask = ('attention_mask' in self.model_input_names)
required_input = encoded_inputs[self.model_input_names[0]]
if (padding_strategy == PaddingStrategy.LONGEST):
max_length = len(required_input)
if ((max_length is not None) and (pad_to_multiple_of is not None) and ((max_length % pad_to_multiple_of) != 0)):
max_length = (((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of)
needs_to_be_padded = ((padding_strategy != PaddingStrategy.DO_NOT_PAD) and (len(required_input) != max_length))
if (return_attention_mask and ('attention_mask' not in encoded_inputs)):
encoded_inputs['attention_mask'] = ([1] * len(required_input))
if needs_to_be_padded:
difference = (max_length - len(required_input))
if (self.padding_side == 'right'):
if return_attention_mask:
encoded_inputs['attention_mask'] = (encoded_inputs['attention_mask'] + ([0] * difference))
if ('token_type_ids' in encoded_inputs):
encoded_inputs['token_type_ids'] = (encoded_inputs['token_type_ids'] + ([self.pad_token_type_id] * difference))
if ('xpath_tags_seq' in encoded_inputs):
encoded_inputs['xpath_tags_seq'] = (encoded_inputs['xpath_tags_seq'] + ([self.pad_xpath_tags_seq] * difference))
if ('xpath_subs_seq' in encoded_inputs):
encoded_inputs['xpath_subs_seq'] = (encoded_inputs['xpath_subs_seq'] + ([self.pad_xpath_subs_seq] * difference))
if ('labels' in encoded_inputs):
encoded_inputs['labels'] = (encoded_inputs['labels'] + ([self.pad_token_label] * difference))
if ('special_tokens_mask' in encoded_inputs):
encoded_inputs['special_tokens_mask'] = (encoded_inputs['special_tokens_mask'] + ([1] * difference))
encoded_inputs[self.model_input_names[0]] = (required_input + ([self.pad_token_id] * difference))
elif (self.padding_side == 'left'):
if return_attention_mask:
encoded_inputs['attention_mask'] = (([0] * difference) + encoded_inputs['attention_mask'])
if ('token_type_ids' in encoded_inputs):
encoded_inputs['token_type_ids'] = (([self.pad_token_type_id] * difference) + encoded_inputs['token_type_ids'])
if ('xpath_tags_seq' in encoded_inputs):
encoded_inputs['xpath_tags_seq'] = (([self.pad_xpath_tags_seq] * difference) + encoded_inputs['xpath_tags_seq'])
if ('xpath_subs_seq' in encoded_inputs):
encoded_inputs['xpath_subs_seq'] = (([self.pad_xpath_subs_seq] * difference) + encoded_inputs['xpath_subs_seq'])
if ('labels' in encoded_inputs):
encoded_inputs['labels'] = (([self.pad_token_label] * difference) + encoded_inputs['labels'])
if ('special_tokens_mask' in encoded_inputs):
encoded_inputs['special_tokens_mask'] = (([1] * difference) + encoded_inputs['special_tokens_mask'])
encoded_inputs[self.model_input_names[0]] = (([self.pad_token_id] * difference) + required_input)
else:
raise ValueError(('Invalid padding strategy:' + str(self.padding_side)))
return encoded_inputs
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len(((((cls + token_ids_0) + sep) + token_ids_1) + sep)) * [0])
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files) |
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset: Dataset, num_replicas: Optional[int]=None, rank: Optional[int]=None, shuffle: bool=True, seed=0) -> None:
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
device = get_device()
self.seed = sync_random_seed(seed, device)
def __iter__(self) -> Iterator:
if self.shuffle:
g = torch.Generator()
g.manual_seed((self.epoch + self.seed))
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
indices = indices[self.rank:self.total_size:self.num_replicas]
assert (len(indices) == self.num_samples)
return iter(indices) |
def _call_method(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs) |
class Checkpoint(object):
SAVE_PATH = 'outputs'
LOAD_PATH = '../../../outputs'
TRAINER_STATE_NAME = 'trainer_states.pt'
MODEL_NAME = 'model.pt'
def __init__(self, model: nn.Module=None, optimizer: Optimizer=None, trainset_list: list=None, validset: SpectrogramDataset=None, epoch: int=None) -> None:
self.model = model
self.optimizer = optimizer
self.trainset_list = trainset_list
self.validset = validset
self.epoch = epoch
def save(self):
date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
trainer_states = {'optimizer': self.optimizer, 'trainset_list': self.trainset_list, 'validset': self.validset, 'epoch': self.epoch}
torch.save(trainer_states, os.path.join(os.getcwd(), self.TRAINER_STATE_NAME))
torch.save(self.model, os.path.join(os.getcwd(), self.MODEL_NAME))
logger.info(('save checkpoints\n%s\n%s' % (os.path.join(os.getcwd(), f'{date_time}-{self.TRAINER_STATE_NAME}'), os.path.join(os.getcwd(), f'{date_time}-{self.MODEL_NAME}'))))
def load(self, path):
logger.info(('load checkpoints\n%s\n%s' % (os.path.join(path, self.TRAINER_STATE_NAME), os.path.join(path, self.MODEL_NAME))))
if torch.cuda.is_available():
resume_checkpoint = torch.load(os.path.join(path, self.TRAINER_STATE_NAME))
model = torch.load(os.path.join(path, self.MODEL_NAME))
else:
resume_checkpoint = torch.load(os.path.join(path, self.TRAINER_STATE_NAME), map_location=(lambda storage, loc: storage))
model = torch.load(os.path.join(path, self.MODEL_NAME), map_location=(lambda storage, loc: storage))
if isinstance(model, ListenAttendSpell):
if isinstance(model, nn.DataParallel):
model.module.flatten_parameters()
else:
model.flatten_parameters()
return Checkpoint(model=model, optimizer=resume_checkpoint['optimizer'], epoch=resume_checkpoint['epoch'], trainset_list=resume_checkpoint['trainset_list'], validset=resume_checkpoint['validset'])
def get_latest_checkpoint(self):
checkpoints_path = sorted(os.listdir(self.LOAD_PATH), reverse=True)[0]
sorted_listdir = sorted(os.listdir(os.path.join(self.LOAD_PATH, checkpoints_path)), reverse=True)
return os.path.join(checkpoints_path, sorted_listdir[1]) |
def model(inputs, is_training=True):
batch_norm_params = {'is_training': is_training, 'decay': 0.9, 'updates_collections': None}
with slim.arg_scope([slim.conv2d, slim.fully_connected], normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params):
x = tf.reshape(inputs, [(- 1), 28, 28, 1])
net = slim.conv2d(x, 32, [5, 5], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.conv2d(net, 64, [5, 5], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.flatten(net, scope='flatten3')
net = slim.fully_connected(net, 1024, scope='fc3')
net = slim.dropout(net, is_training=is_training, scope='dropout3', keep_prob=FLAGS.keep_prob)
outputs = slim.fully_connected(net, 10, activation_fn=None, normalizer_fn=None, scope='fco')
return outputs |
def get_version():
version_file = 'mmhuman3d/version.py'
with open(version_file, 'r', encoding='utf-8') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] |
def rename_keys(s_dict):
keys = list(s_dict.keys())
for key in keys:
if ('transformer_layers' in key):
s_dict[key.replace('transformer_layers', 'layers')] = s_dict.pop(key)
elif ('subsample' in key):
s_dict[key.replace('subsample', 'conv')] = s_dict.pop(key) |
def test_cases():
for (values, method, expected) in _cases:
r = rankdata(values, method=method)
assert_array_equal(r, expected) |
def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model):
pt_state_dict = {k: v.numpy() for (k, v) in pt_state_dict.items()}
model_prefix = flax_model.base_model_prefix
random_flax_state_dict = flatten_dict(flax_model.params)
flax_state_dict = {}
load_model_with_head_into_base_model = ((model_prefix not in flax_model.params) and (model_prefix in set([k.split('.')[0] for k in pt_state_dict.keys()])))
load_base_model_into_model_with_head = ((model_prefix in flax_model.params) and (model_prefix not in set([k.split('.')[0] for k in pt_state_dict.keys()])))
for (pt_key, pt_tensor) in pt_state_dict.items():
pt_tuple_key = tuple(pt_key.split('.'))
has_base_model_prefix = (pt_tuple_key[0] == model_prefix)
if (load_model_with_head_into_base_model and has_base_model_prefix):
pt_tuple_key = pt_tuple_key[1:]
(flax_key, flax_tensor) = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix)
require_base_model_prefix = (((model_prefix,) + flax_key) in random_flax_state_dict)
if (load_base_model_into_model_with_head and require_base_model_prefix):
flax_key = ((model_prefix,) + flax_key)
if (flax_key in random_flax_state_dict):
if (flax_tensor.shape != random_flax_state_dict[flax_key].shape):
raise ValueError(f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape {random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.')
flax_state_dict[flax_key] = jnp.asarray(flax_tensor)
return unflatten_dict(flax_state_dict) |
def find_last_entry(entries, time_point, start_time):
if (time_point is None):
return (entries[(- 1)], (- 1))
s = utils.time_to_seconds(time_point)
last = None
last_elasp = None
for entry in entries:
timestamp = entry['timestamp']
elasp = (timestamp - start_time)
if (elasp > s):
break
last = entry
last_elasp = elasp
return (last, last_elasp) |
def _mk_fp_unary_pred(f, a, ctx):
ctx = _get_ctx(ctx)
[a] = _coerce_fp_expr_list([a], ctx)
if z3_debug():
_z3_assert(is_fp(a), 'First argument must be a Z3 floating-point expression')
return BoolRef(f(ctx.ref(), a.as_ast()), ctx) |
def _is_long(x):
if hasattr(x, 'data'):
x = x.data
return (isinstance(x, torch.LongTensor) or isinstance(x, torch.cuda.LongTensor)) |
class MyBashProcess(BashProcess):
def _run(self, command: str) -> Tuple[(str, int)]:
try:
output = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode().strip()
except subprocess.CalledProcessError as error:
if self.return_err_output:
return (error.stdout.decode().strip(), error.returncode)
return (str(error).strip(), error.returncode)
if self.strip_newlines:
output = output.strip()
return (output, 0) |
def _gen_dir_name():
now_str = datetime.now().strftime('%m-%d-%y_%H.%M.%S')
rand_str = ''.join(random.choices(string.ascii_lowercase, k=4))
return f'{now_str}_{rand_str}' |
def test_update():
def fake_condition(memo_info, manager, args):
if ((memo_info.state == 'ENTANGLED') and (memo_info.fidelity > 0.8)):
return [memo_info]
else:
return []
def fake_action(memories, args):
return (FakeProtocol('protocol'), [None], [None], [{}])
tl = Timeline()
node = FakeNode('node', tl)
assert (len(node.resource_manager.rule_manager) == 0)
rule = Rule(1, fake_action, fake_condition, None, None)
node.resource_manager.load(rule)
assert (len(node.resource_manager.rule_manager) == 1)
for memo_info in node.resource_manager.memory_manager:
assert (memo_info.state == 'RAW')
protocol = FakeProtocol('protocol1')
node.protocols.append(protocol)
memo_array = node.resource_manager.memory_manager.memory_array
memo_array[0].fidelity = 0.5
memo_array[0].detach(memo_array)
memo_array[0].attach(protocol)
node.resource_manager.update(protocol, memo_array[0], 'ENTANGLED')
assert (len(node.protocols) == len(rule.protocols) == 0)
assert (len(memo_array[0]._observers) == 1)
assert (node.resource_manager.memory_manager[0].state == 'ENTANGLED')
protocol = FakeProtocol('protocol2')
node.protocols.append(protocol)
memo_array[1].fidelity = 0.9
memo_array[1].detach(memo_array)
memo_array[1].attach(protocol)
node.resource_manager.update(protocol, memo_array[1], 'ENTANGLED')
assert (len(node.resource_manager.waiting_protocols) == len(rule.protocols) == 1)
assert (len(memo_array[1]._observers) == 1)
assert (node.resource_manager.memory_manager[1].state == 'OCCUPIED') |
def make(env_id: EnvId):
if (env_id == '2048'):
from pgx.play2048 import Play2048
return Play2048()
elif (env_id == 'animal_shogi'):
from pgx.animal_shogi import AnimalShogi
return AnimalShogi()
elif (env_id == 'backgammon'):
from pgx.backgammon import Backgammon
return Backgammon()
elif (env_id == 'chess'):
from pgx.chess import Chess
return Chess()
elif (env_id == 'connect_four'):
from pgx.connect_four import ConnectFour
return ConnectFour()
elif (env_id == 'gardner_chess'):
from pgx.gardner_chess import GardnerChess
return GardnerChess()
elif (env_id == 'go_9x9'):
from pgx.go import Go
return Go(size=9, komi=7.5)
elif (env_id == 'go_19x19'):
from pgx.go import Go
return Go(size=19, komi=7.5)
elif (env_id == 'hex'):
from pgx.hex import Hex
return Hex()
elif (env_id == 'kuhn_poker'):
from pgx.kuhn_poker import KuhnPoker
return KuhnPoker()
elif (env_id == 'leduc_holdem'):
from pgx.leduc_holdem import LeducHoldem
return LeducHoldem()
elif (env_id == 'minatar-asterix'):
from pgx.minatar.asterix import MinAtarAsterix
return MinAtarAsterix()
elif (env_id == 'minatar-breakout'):
from pgx.minatar.breakout import MinAtarBreakout
return MinAtarBreakout()
elif (env_id == 'minatar-freeway'):
from pgx.minatar.freeway import MinAtarFreeway
return MinAtarFreeway()
elif (env_id == 'minatar-seaquest'):
from pgx.minatar.seaquest import MinAtarSeaquest
return MinAtarSeaquest()
elif (env_id == 'minatar-space_invaders'):
from pgx.minatar.space_invaders import MinAtarSpaceInvaders
return MinAtarSpaceInvaders()
elif (env_id == 'othello'):
from pgx.othello import Othello
return Othello()
elif (env_id == 'shogi'):
from pgx.shogi import Shogi
return Shogi()
elif (env_id == 'sparrow_mahjong'):
from pgx.sparrow_mahjong import SparrowMahjong
return SparrowMahjong()
elif (env_id == 'tic_tac_toe'):
from pgx.tic_tac_toe import TicTacToe
return TicTacToe()
else:
envs = '\n'.join(available_envs())
raise ValueError(f'''Wrong env_id '{env_id}' is passed. Available ids are:
{envs}''') |
def NormalFan(polytope, lattice=None):
dimension_error = ValueError('the normal fan is only defined for full-dimensional polytopes')
from sage.geometry.lattice_polytope import is_LatticePolytope
if is_LatticePolytope(polytope):
if (polytope.dim() != polytope.lattice_dim()):
raise dimension_error
rays = polytope.facet_normals()
cones = (v.ambient_facet_indices() for v in polytope.faces(dim=0))
else:
if (polytope.dim() != polytope.ambient_dim()):
raise dimension_error
if (not polytope.is_compact()):
raise NotImplementedError('the normal fan is only supported for polytopes (compact polyhedra).')
cones = [[ieq.index() for ieq in vertex.incident()] for vertex in polytope.vertices()]
rays = [ieq.A() for ieq in polytope.inequalities()]
return Fan(cones, rays, lattice=lattice, check=False, is_complete=True) |
def test_not_complete_and_not_homogeneous_labeling():
(h, c, v) = homogeneity_completeness_v_measure([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2) |
class MaxPoolBlock(nn.Module):
expansion = 1
def __init__(self, in_filters, out_filters, shortcut=False, bias=False):
super().__init__()
self.shortcut = shortcut
self.increasing = (out_filters > in_filters)
if self.increasing:
self.max_pool = nn.MaxPool1d(3, stride=2, padding=1)
self.conv1 = nn.Conv1d(in_filters, out_filters, 3, padding=1, stride=1, bias=bias)
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv2 = nn.Conv1d(out_filters, out_filters, 3, padding=1, stride=1, bias=bias)
self.bn2 = nn.BatchNorm1d(out_filters)
if self.shortcut:
if self.increasing:
self._shortcut = nn.Sequential(nn.Conv1d(in_filters, out_filters, 1, stride=1, bias=bias))
else:
self._shortcut = nn.Sequential()
def forward(self, inputs):
if self.increasing:
inputs = self.max_pool(inputs)
H = self.conv1(inputs)
H = self.bn1(H)
H = F.relu(H)
H = self.conv2(H)
H = self.bn2(H)
if self.shortcut:
H += self._shortcut(inputs)
H = F.relu(H)
return H |
class ExperimentContext():
def __init__(self, *, snapshot_dir, snapshot_mode, snapshot_gap):
self.snapshot_dir = snapshot_dir
self.snapshot_mode = snapshot_mode
self.snapshot_gap = snapshot_gap |
def test_case47():
url = (brokerIp + '/ngsi-ld/v1/subscriptions/')
headers = {'Content-Type': 'application/json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata36), headers=headers)
print(r.content)
print(r.status_code)
url = (discoveryIp + '/ngsi9/subscription')
r = requests.get(url)
print(r.content)
print(r.status_code)
url = (brokerIp + '/ngsi-ld/v1/subscriptions/')
headers = {'Content-Type': 'application/json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata36), headers=headers)
print(r.content)
print(r.status_code)
assert (r.status_code == 201) |
class ArchGradientFunction(torch.autograd.Function):
def forward(ctx, x, binary_gates, run_func, backward_func):
ctx.run_func = run_func
ctx.backward_func = backward_func
detached_x = detach_variable(x)
with torch.enable_grad():
output = run_func(detached_x)
ctx.save_for_backward(detached_x, output)
return output.data
def backward(ctx, grad_output):
(detached_x, output) = ctx.saved_tensors
grad_x = torch.autograd.grad(output, detached_x, grad_output, only_inputs=True)
binary_grads = ctx.backward_func(detached_x.data, output.data, grad_output.data)
return (grad_x[0], binary_grads, None, None) |
def visualize_kernel(tensor, ind):
vis = tf.gather_nd(tensor, ind)
(H, W) = vis.get_shape().as_list()
vis = tf.expand_dims(vis, 0)
vis = tf.expand_dims(vis, 3)
vis = tf.image.resize_nearest_neighbor(vis, [(10 * H), (10 * W)])
return vis |
class XLNetForQuestionAnswering():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def _seg_36():
return [(42626, 'M', u''), (42627, 'V'), (42628, 'M', u''), (42629, 'V'), (42630, 'M', u''), (42631, 'V'), (42632, 'M', u''), (42633, 'V'), (42634, 'M', u''), (42635, 'V'), (42636, 'M', u''), (42637, 'V'), (42638, 'M', u''), (42639, 'V'), (42640, 'M', u''), (42641, 'V'), (42642, 'M', u''), (42643, 'V'), (42644, 'M', u''), (42645, 'V'), (42646, 'M', u''), (42647, 'V'), (42648, 'M', u''), (42649, 'V'), (42650, 'M', u''), (42651, 'V'), (42652, 'M', u''), (42653, 'M', u''), (42654, 'V'), (42744, 'X'), (42752, 'V'), (42786, 'M', u''), (42787, 'V'), (42788, 'M', u''), (42789, 'V'), (42790, 'M', u''), (42791, 'V'), (42792, 'M', u''), (42793, 'V'), (42794, 'M', u''), (42795, 'V'), (42796, 'M', u''), (42797, 'V'), (42798, 'M', u''), (42799, 'V'), (42802, 'M', u''), (42803, 'V'), (42804, 'M', u''), (42805, 'V'), (42806, 'M', u''), (42807, 'V'), (42808, 'M', u''), (42809, 'V'), (42810, 'M', u''), (42811, 'V'), (42812, 'M', u''), (42813, 'V'), (42814, 'M', u''), (42815, 'V'), (42816, 'M', u''), (42817, 'V'), (42818, 'M', u''), (42819, 'V'), (42820, 'M', u''), (42821, 'V'), (42822, 'M', u''), (42823, 'V'), (42824, 'M', u''), (42825, 'V'), (42826, 'M', u''), (42827, 'V'), (42828, 'M', u''), (42829, 'V'), (42830, 'M', u''), (42831, 'V'), (42832, 'M', u''), (42833, 'V'), (42834, 'M', u''), (42835, 'V'), (42836, 'M', u''), (42837, 'V'), (42838, 'M', u''), (42839, 'V'), (42840, 'M', u''), (42841, 'V'), (42842, 'M', u''), (42843, 'V'), (42844, 'M', u''), (42845, 'V'), (42846, 'M', u''), (42847, 'V'), (42848, 'M', u''), (42849, 'V'), (42850, 'M', u''), (42851, 'V'), (42852, 'M', u''), (42853, 'V'), (42854, 'M', u''), (42855, 'V'), (42856, 'M', u'')] |
class Logger(object):
def __init__(self, outfile):
self.terminal = sys.stdout
self.log = open(outfile, 'w')
sys.stdout = self
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
self.terminal.flush() |
def install_import_hook(module_to_instrument: str, tracer: ExecutionTracer, coverage_metrics: (set[config.CoverageMetric] | None)=None, dynamic_constant_provider: (DynamicConstantProvider | None)=None) -> ImportHookContextManager:
if (dynamic_constant_provider is None):
dynamic_constant_provider = DynamicConstantProvider(ConstantPool(), EmptyConstantProvider(), probability=0, max_constant_length=1)
if (coverage_metrics is None):
coverage_metrics = set(config.configuration.statistics_output.coverage_metrics)
to_wrap = None
for finder in sys.meta_path:
if (isclass(finder) and (finder.__name__ == 'PathFinder') and hasattr(finder, 'find_spec')):
to_wrap = finder
break
if (not to_wrap):
raise RuntimeError('Cannot find a PathFinder in sys.meta_path')
hook = InstrumentationFinder(to_wrap, module_to_instrument, tracer, coverage_metrics=coverage_metrics, dynamic_constant_provider=dynamic_constant_provider)
sys.meta_path.insert(0, hook)
return ImportHookContextManager(hook) |
class Caffe2BenchmarkBase(object):
tensor_index = 0
test_index = 0
def __init__(self):
self.args = {}
self.user_provided_name = None
self._num_inputs_require_grads = 0
self._pass_count = 0
def _set_backward_test(self, is_backward):
pass
def _device_option(self, device):
if (device not in ['cuda', 'cpu']):
raise ValueError('Missing attrs in configs')
if ('cuda' in device):
self.dev = core.DeviceOption(caffe2_pb2.CUDA, 0)
else:
self.dev = core.DeviceOption(caffe2_pb2.CPU)
return self.dev
def tensor(self, shapes, dtype='float32', device='cpu'):
blob_name = ('blob_' + str(Caffe2BenchmarkBase.tensor_index))
dev = self._device_option(device)
with core.DeviceScope(dev):
workspace.FeedBlob(blob_name, benchmark_utils.numpy_random(dtype, *shapes))
Caffe2BenchmarkBase.tensor_index += 1
return blob_name
def module_name(self):
if self.user_provided_name:
return self.user_provided_name
return self.__class__.__name__
def set_module_name(self, name):
self.user_provided_name = name
def _value_to_str(self, value):
ret = value
if (type(value) == bool):
ret = int(value)
return str(ret)
def test_name(self, name_type='long', **kargs):
if (name_type == 'long'):
test_name_str = []
for key in kargs:
value = kargs[key]
test_name_str.append((key + self._value_to_str(value)))
name = ((self.module_name() + '_') + '_'.join(test_name_str)).replace(' ', '')
elif (name_type == 'short'):
name = '_'.join([self.module_name(), 'test', str(Caffe2BenchmarkBase.test_index)])
Caffe2BenchmarkBase.test_index += 1
return name |
class ShowCategory(Enum):
SUBNET = 5
HOST_CPU = 4
TPU_LAYER = 3
NODE_OP = 2
TPU_GDMA = 1
TPU_BD = 0 |
class Downsampler(nn.Module):
def __init__(self, n_planes, factor, kernel_type, phase=0, kernel_width=None, support=None, sigma=None, preserve_size=False):
super(Downsampler, self).__init__()
assert (phase in [0, 0.5]), 'phase should be 0 or 0.5'
if (kernel_type == 'lanczos2'):
support = 2
kernel_width = ((4 * factor) + 1)
kernel_type_ = 'lanczos'
elif (kernel_type == 'lanczos3'):
support = 3
kernel_width = ((6 * factor) + 1)
kernel_type_ = 'lanczos'
elif (kernel_type == 'gauss12'):
kernel_width = 7
sigma = (1 / 2)
kernel_type_ = 'gauss'
elif (kernel_type == 'gauss1sq2'):
kernel_width = 9
sigma = (1.0 / np.sqrt(2))
kernel_type_ = 'gauss'
elif (kernel_type in ['lanczos', 'gauss', 'box']):
kernel_type_ = kernel_type
else:
assert False, 'wrong name kernel'
self.kernel = get_kernel(factor, kernel_type_, phase, kernel_width, support=support, sigma=sigma)
downsampler = nn.Conv2d(n_planes, n_planes, kernel_size=self.kernel.shape, stride=factor, padding=0)
downsampler.weight.data[:] = 0
downsampler.bias.data[:] = 0
kernel_torch = torch.from_numpy(self.kernel)
for i in range(n_planes):
downsampler.weight.data[(i, i)] = kernel_torch
self.downsampler_ = downsampler
if preserve_size:
if ((self.kernel.shape[0] % 2) == 1):
pad = int(((self.kernel.shape[0] - 1) / 2.0))
else:
pad = int(((self.kernel.shape[0] - factor) / 2.0))
self.padding = nn.ReplicationPad2d(pad)
self.preserve_size = preserve_size
def forward(self, input):
if self.preserve_size:
x = self.padding(input)
else:
x = input
self.x = x
return self.downsampler_(x) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.