code stringlengths 101 5.91M |
|---|
class Pool():
def __init__(self, processes=1):
self.processes = processes
def _worker_loop(self, func, ip, inputQueue):
while True:
args = inputQueue.get(block=True, timeout=None)
if (args == (- 1)):
break
p = mp.Process(target=func, args=([ip] + args), daemon=False)
p.start()
p.join()
if (p.exitcode != 0):
print(('ERROR: process %i terminated unexpectedly' % os.getpid()))
break
def starmap(self, func, iterable):
inputQueue = mp.Queue()
for args in iterable:
inputQueue.put(list(args))
for _ in range(self.processes):
inputQueue.put((- 1))
ps = [mp.Process(target=self._worker_loop, args=(func, ip, inputQueue), daemon=False) for ip in range(self.processes)]
for p in ps:
p.start()
for p in ps:
p.join()
def __enter__(self):
return self
def __exit__(self, *args):
pass |
class SquadDataTrainingArguments():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
class TestChoi(ChannelTestCase):
def test_init(self):
mat4 = (np.eye(4) / 2.0)
chan = Choi(mat4)
self.assertAllClose(chan.data, mat4)
self.assertEqual(chan.dim, (2, 2))
mat8 = (np.eye(8) / 2.0)
chan = Choi(mat8, input_dims=4)
self.assertAllClose(chan.data, mat8)
self.assertEqual(chan.dim, (4, 2))
chan = Choi(mat8, input_dims=2)
self.assertAllClose(chan.data, mat8)
self.assertEqual(chan.dim, (2, 4))
mat16 = (np.eye(16) / 4)
chan = Choi(mat16)
self.assertAllClose(chan.data, mat16)
self.assertEqual(chan.dim, (4, 4))
self.assertRaises(QiskitError, Choi, mat8, input_dims=[4], output_dims=[4])
def test_circuit_init(self):
(circuit, target) = self.simple_circuit_no_measure()
op = Choi(circuit)
target = Choi(target)
self.assertEqual(op, target)
def test_circuit_init_except(self):
circuit = self.simple_circuit_with_measure()
self.assertRaises(QiskitError, Choi, circuit)
def test_equal(self):
mat = self.rand_matrix(4, 4)
self.assertEqual(Choi(mat), Choi(mat))
def test_copy(self):
mat = np.eye(4)
orig = Choi(mat)
cpy = orig.copy()
cpy._data[(0, 0)] = 0.0
self.assertFalse((cpy == orig))
def test_evolve(self):
input_psi = [0, 1]
input_rho = [[0, 0], [0, 1]]
chan = Choi(self.choiI)
target_rho = np.array([[0, 0], [0, 1]])
self.assertAllClose(chan._evolve(input_psi), target_rho)
self.assertAllClose(chan._evolve(np.array(input_psi)), target_rho)
self.assertAllClose(chan._evolve(input_rho), target_rho)
self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho)
chan = Choi(self.choiH)
target_rho = (np.array([[1, (- 1)], [(- 1), 1]]) / 2)
self.assertAllClose(chan._evolve(input_psi), target_rho)
self.assertAllClose(chan._evolve(np.array(input_psi)), target_rho)
self.assertAllClose(chan._evolve(input_rho), target_rho)
self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho)
chan = Choi(self.depol_choi(1))
target_rho = (np.eye(2) / 2)
self.assertAllClose(chan._evolve(input_psi), target_rho)
self.assertAllClose(chan._evolve(np.array(input_psi)), target_rho)
self.assertAllClose(chan._evolve(input_rho), target_rho)
self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho)
def test_is_cptp(self):
self.assertTrue(Choi(self.depol_choi(0.25)).is_cptp())
self.assertFalse(Choi(((1.25 * self.choiI) - (0.25 * self.depol_choi(1)))).is_cptp())
def test_conjugate(self):
(Zp, Zm) = (np.diag([1, 0]), np.diag([0, 1]))
(Yp, Ym) = ((np.array([[1, (- 1j)], [1j, 1]]) / 2), (np.array([[1, 1j], [(- 1j), 1]]) / 2))
chan = Choi((np.kron(Zp, Yp) + np.kron(Zm, Ym)))
targ = Choi((np.kron(Zp, Ym) + np.kron(Zm, Yp)))
chan_conj = chan.conjugate()
self.assertEqual(chan_conj, targ)
def test_transpose(self):
(Zp, Zm) = (np.diag([1, 0]), np.diag([0, 1]))
(Yp, Ym) = ((np.array([[1, (- 1j)], [1j, 1]]) / 2), (np.array([[1, 1j], [(- 1j), 1]]) / 2))
chan = Choi((np.kron(Zp, Yp) + np.kron(Zm, Ym)))
targ = Choi((np.kron(Yp, Zp) + np.kron(Ym, Zm)))
chan_t = chan.transpose()
self.assertEqual(chan_t, targ)
def test_adjoint(self):
(Zp, Zm) = (np.diag([1, 0]), np.diag([0, 1]))
(Yp, Ym) = ((np.array([[1, (- 1j)], [1j, 1]]) / 2), (np.array([[1, 1j], [(- 1j), 1]]) / 2))
chan = Choi((np.kron(Zp, Yp) + np.kron(Zm, Ym)))
targ = Choi((np.kron(Ym, Zp) + np.kron(Yp, Zm)))
chan_adj = chan.adjoint()
self.assertEqual(chan_adj, targ)
def test_compose_except(self):
self.assertRaises(QiskitError, Choi(np.eye(4)).compose, Choi(np.eye(8)))
self.assertRaises(QiskitError, Choi(np.eye(4)).compose, 2)
def test_compose(self):
chan1 = Choi(self.choiX)
chan2 = Choi(self.choiY)
chan = chan1.compose(chan2)
targ = Choi(self.choiZ)
self.assertEqual(chan, targ)
chan1 = Choi(self.depol_choi(0.5))
chan = chan1.compose(chan1)
targ = Choi(self.depol_choi(0.75))
self.assertEqual(chan, targ)
(Zp, Zm) = (np.diag([1, 0]), np.diag([0, 1]))
(Xp, Xm) = ((np.array([[1, 1], [1, 1]]) / 2), (np.array([[1, (- 1)], [(- 1), 1]]) / 2))
chan1 = Choi((np.kron(Zp, Xp) + np.kron(Zm, Xm)))
chan2 = Choi(self.choiX)
targ = Choi((np.kron(Zp, Xp) + np.kron(Zm, Xm)))
self.assertEqual(chan1.compose(chan2), targ)
self.assertEqual((chan1 chan2), targ)
targ = Choi((np.kron(Zm, Xp) + np.kron(Zp, Xm)))
self.assertEqual(chan2.compose(chan1), targ)
self.assertEqual((chan2 chan1), targ)
chan1 = Choi((np.eye(8) / 4), input_dims=2, output_dims=4)
chan2 = Choi((np.eye(8) / 2), input_dims=4, output_dims=2)
chan = chan1.compose(chan2)
self.assertEqual(chan.dim, (2, 2))
chan = chan2.compose(chan1)
self.assertEqual(chan.dim, (4, 4))
def test_compose_front(self):
chan1 = Choi(self.choiX)
chan2 = Choi(self.choiY)
chan = chan1.compose(chan2, front=True)
targ = Choi(self.choiZ)
self.assertEqual(chan, targ)
chan1 = Choi(self.depol_choi(0.5))
chan = chan1.compose(chan1, front=True)
targ = Choi(self.depol_choi(0.75))
self.assertEqual(chan, targ)
(Zp, Zm) = (np.diag([1, 0]), np.diag([0, 1]))
(Xp, Xm) = ((np.array([[1, 1], [1, 1]]) / 2), (np.array([[1, (- 1)], [(- 1), 1]]) / 2))
chan1 = Choi((np.kron(Zp, Xp) + np.kron(Zm, Xm)))
chan2 = Choi(self.choiX)
chan = chan2.compose(chan1, front=True)
targ = Choi((np.kron(Zp, Xp) + np.kron(Zm, Xm)))
self.assertEqual(chan, targ)
chan = chan1.compose(chan2, front=True)
targ = Choi((np.kron(Zm, Xp) + np.kron(Zp, Xm)))
self.assertEqual(chan, targ)
chan1 = Choi((np.eye(8) / 4), input_dims=2, output_dims=4)
chan2 = Choi((np.eye(8) / 2), input_dims=4, output_dims=2)
chan = chan1.compose(chan2, front=True)
self.assertEqual(chan.dim, (4, 4))
chan = chan2.compose(chan1, front=True)
self.assertEqual(chan.dim, (2, 2))
def test_expand(self):
(rho0, rho1) = (np.diag([1, 0]), np.diag([0, 1]))
rho_init = np.kron(rho0, rho0)
chan1 = Choi(self.choiI)
chan2 = Choi(self.choiX)
chan = chan1.expand(chan2)
rho_targ = np.kron(rho1, rho0)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
chan = chan2.expand(chan1)
rho_targ = np.kron(rho0, rho1)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
chan_dep = Choi(self.depol_choi(1))
chan = chan_dep.expand(chan_dep)
rho_targ = (np.diag([1, 1, 1, 1]) / 4)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
def test_tensor(self):
(rho0, rho1) = (np.diag([1, 0]), np.diag([0, 1]))
rho_init = np.kron(rho0, rho0)
chan1 = Choi(self.choiI)
chan2 = Choi(self.choiX)
rho_targ = np.kron(rho1, rho0)
chan = chan2.tensor(chan1)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
chan = (chan2 ^ chan1)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
rho_targ = np.kron(rho0, rho1)
chan = chan1.tensor(chan2)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
chan = (chan1 ^ chan2)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
rho_targ = (np.diag([1, 1, 1, 1]) / 4)
chan_dep = Choi(self.depol_choi(1))
chan = chan_dep.tensor(chan_dep)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
chan = (chan_dep ^ chan_dep)
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho_init), rho_targ)
def test_power(self):
p_id = 0.9
depol = Choi(self.depol_choi((1 - p_id)))
p_id3 = (p_id ** 3)
chan3 = depol.power(3)
targ3 = Choi(self.depol_choi((1 - p_id3)))
self.assertEqual(chan3, targ3)
def test_power_except(self):
chan = Choi(self.depol_choi(1))
self.assertRaises(QiskitError, chan.power, 0.5)
def test_add(self):
mat1 = (0.5 * self.choiI)
mat2 = (0.5 * self.depol_choi(1))
targ = Choi((mat1 + mat2))
chan1 = Choi(mat1)
chan2 = Choi(mat2)
self.assertEqual(chan1.add(chan2), targ)
self.assertEqual((chan1 + chan2), targ)
def test_add_except(self):
chan1 = Choi(self.choiI)
chan2 = Choi(np.eye(8))
self.assertRaises(QiskitError, chan1.add, chan2)
self.assertRaises(QiskitError, chan1.add, 5)
def test_subtract(self):
mat1 = (0.5 * self.choiI)
mat2 = (0.5 * self.depol_choi(1))
targ = Choi((mat1 - mat2))
chan1 = Choi(mat1)
chan2 = Choi(mat2)
self.assertEqual(chan1.subtract(chan2), targ)
self.assertEqual((chan1 - chan2), targ)
def test_subtract_except(self):
chan1 = Choi(self.choiI)
chan2 = Choi(np.eye(8))
self.assertRaises(QiskitError, chan1.subtract, chan2)
self.assertRaises(QiskitError, chan1.subtract, 5)
def test_multiply(self):
chan = Choi(self.choiI)
val = 0.5
targ = Choi((val * self.choiI))
self.assertEqual(chan.multiply(val), targ)
self.assertEqual((val * chan), targ)
self.assertEqual((chan * val), targ)
def test_multiply_except(self):
chan = Choi(self.choiI)
self.assertRaises(QiskitError, chan.multiply, 's')
self.assertRaises(QiskitError, chan.multiply, chan)
def test_negate(self):
chan = Choi(self.choiI)
targ = Choi(((- 1) * self.choiI))
self.assertEqual((- chan), targ) |
class wrong_loss(nn.Module):
def __init__(self):
super(wrong_loss, self).__init__()
def forward(self, pred_score, target_score):
tar_sum = torch.sum(target_score, dim=1, keepdim=True)
tar_sum_is_0 = torch.eq(tar_sum, 0)
tar_sum.masked_fill_(tar_sum_is_0, 1e-06)
tar = (target_score / tar_sum)
res = F.log_softmax(pred_score, dim=1)
loss = F.kl_div(res, tar, size_average=True)
loss *= target_score.size(1)
return loss |
_tokenizer('bpe')
class SentencePieceBPETokenizer(SentencePieceUnigramTokenizer):
MODEL_TYPE = 'bpe' |
def find_model_using_name(model_name):
model_filename = (('models.' + model_name) + '_model')
modellib = importlib.import_module(model_filename)
model = None
target_model_name = (model_name.replace('_', '') + 'model')
for (name, cls) in modellib.__dict__.items():
if ((name.lower() == target_model_name.lower()) and issubclass(cls, torch.nn.Module)):
model = cls
if (model is None):
print(('In %s.py, there should be a subclass of torch.nn.Module with class name that matches %s in lowercase.' % (model_filename, target_model_name)))
exit(0)
return model |
class BaseTrainer():
def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None):
self.actor = actor
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.loaders = loaders
self.update_settings(settings)
self.epoch = 0
self.stats = {}
self.device = getattr(settings, 'device', None)
if (self.device is None):
self.device = torch.device(('cuda:0' if (torch.cuda.is_available() and settings.use_gpu) else 'cpu'))
self.actor.to(self.device)
def update_settings(self, settings=None):
if (settings is not None):
self.settings = settings
if (self.settings.env.workspace_dir is not None):
self.settings.env.workspace_dir = os.path.expanduser(self.settings.env.workspace_dir)
self._checkpoint_dir = os.path.join(self.settings.env.workspace_dir, 'checkpoints')
if (not os.path.exists(self._checkpoint_dir)):
os.makedirs(self._checkpoint_dir)
else:
self._checkpoint_dir = None
def train(self, max_epochs, load_latest=False, fail_safe=True):
epoch = (- 1)
num_tries = 10
for i in range(num_tries):
try:
if load_latest:
self.load_checkpoint()
for epoch in range((self.epoch + 1), (max_epochs + 1)):
self.epoch = epoch
self.train_epoch()
if (self.lr_scheduler is not None):
self.lr_scheduler.step()
if self._checkpoint_dir:
self.save_checkpoint()
except:
print('Training crashed at epoch {}'.format(epoch))
if fail_safe:
self.epoch -= 1
load_latest = True
print('Traceback for the error!')
print(traceback.format_exc())
print('Restarting training from last epoch ...')
else:
raise
print('Finished training!')
def train_epoch(self):
raise NotImplementedError
def save_checkpoint(self):
net = (self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net)
actor_type = type(self.actor).__name__
net_type = type(net).__name__
state = {'epoch': self.epoch, 'actor_type': actor_type, 'net_type': net_type, 'net': net.state_dict(), 'net_info': getattr(net, 'info', None), 'constructor': getattr(net, 'constructor', None), 'optimizer': self.optimizer.state_dict(), 'stats': self.stats, 'settings': self.settings}
directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path)
if (not os.path.exists(directory)):
os.makedirs(directory)
if ((self.epoch % 3) == 0):
tmp_file_path = '{}/{}_ep{:04d}.tmp'.format(directory, net_type, self.epoch)
torch.save(state, tmp_file_path)
file_path = '{}/{}_ep{:04d}.pth.tar'.format(directory, net_type, self.epoch)
os.rename(tmp_file_path, file_path)
def load_checkpoint(self, checkpoint=None, fields=None, ignore_fields=None, load_constructor=False):
net = (self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net)
actor_type = type(self.actor).__name__
net_type = type(net).__name__
if (checkpoint is None):
checkpoint_list = sorted(glob.glob('{}/{}/{}_ep*.pth.tar'.format(self._checkpoint_dir, self.settings.project_path, net_type)))
if checkpoint_list:
checkpoint_path = checkpoint_list[(- 1)]
else:
print('No matching checkpoint file found')
return
elif isinstance(checkpoint, int):
checkpoint_path = '{}/{}/{}_ep{:04d}.pth.tar'.format(self._checkpoint_dir, self.settings.project_path, net_type, checkpoint)
elif isinstance(checkpoint, str):
if os.path.isdir(checkpoint):
checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint)))
if checkpoint_list:
checkpoint_path = checkpoint_list[(- 1)]
else:
raise Exception('No checkpoint found')
else:
checkpoint_path = os.path.expanduser(checkpoint)
else:
raise TypeError
checkpoint_dict = loading.torch_load_legacy(checkpoint_path)
assert (net_type == checkpoint_dict['net_type']), 'Network is not of correct type.'
if (fields is None):
fields = checkpoint_dict.keys()
if (ignore_fields is None):
ignore_fields = ['settings']
ignore_fields.extend(['lr_scheduler', 'constructor', 'net_type', 'actor_type', 'net_info'])
for key in fields:
if (key in ignore_fields):
continue
if (key == 'net'):
net.load_state_dict(checkpoint_dict[key])
elif (key == 'optimizer'):
self.optimizer.load_state_dict(checkpoint_dict[key])
else:
setattr(self, key, checkpoint_dict[key])
if (load_constructor and ('constructor' in checkpoint_dict) and (checkpoint_dict['constructor'] is not None)):
net.constructor = checkpoint_dict['constructor']
if (('net_info' in checkpoint_dict) and (checkpoint_dict['net_info'] is not None)):
net.info = checkpoint_dict['net_info']
if ('epoch' in fields):
self.lr_scheduler.last_epoch = self.epoch
return True |
def orient_shapes_hwd(data: (list | tuple), slice_axis: int) -> np.ndarray:
if (slice_axis == 0):
return np.array(data)[[2, 1, 0]]
elif (slice_axis == 1):
return np.array(data)[[2, 0, 1]]
elif (slice_axis == 2):
return np.array(data) |
def parse_args():
parser = argparse.ArgumentParser(description='Train a music captioning model')
parser.add_argument('experiment_id', type=str)
parser.add_argument('--metrics', type=bool, default=False)
parser.add_argument('--device_num', type=str, default='0')
parser.add_argument('--decoding', type=str, help='type of decoding to use in inference', default=None)
parser.add_argument('--beam_size', type=int, help='beam size to use in beam search decoding', default=None)
args = parser.parse_args()
return args |
def filter_kwargs(func):
(func)
def wrapper(*args, **kwargs):
new_kwargs = {k: v for (k, v) in kwargs.items() if (k in ['backend', 'layers', 'models', 'utils'])}
return func(*args, **new_kwargs)
return wrapper |
def get_morgan_bit_fps(data, bits=2048, radius=2):
X = [[c for c in AllChem.GetMorganFingerprintAsBitVect(m, radius, nBits=bits).ToBitString()] for m in data]
X = pd.DataFrame(X)
return X |
class TestInstructions(QiskitTestCase):
def test_instructions_equal(self):
hop1 = Instruction('h', 1, 0, [])
hop2 = Instruction('s', 1, 0, [])
hop3 = Instruction('h', 1, 0, [])
uop1 = Instruction('u', 1, 0, [0.4, 0.5, 0.5])
uop2 = Instruction('u', 1, 0, [0.4, 0.6, 0.5])
uop3 = Instruction('v', 1, 0, [0.4, 0.5, 0.5])
uop4 = Instruction('u', 1, 0, [0.4, 0.5, 0.5])
self.assertFalse((hop1 == hop2))
self.assertTrue((hop1 == hop3))
self.assertFalse((uop1 == uop2))
self.assertTrue((uop1 == uop4))
self.assertFalse((uop1 == uop3))
self.assertTrue((HGate() == HGate()))
self.assertFalse((HGate() == CnotGate()))
self.assertFalse((hop1 == HGate()))
def test_instructions_equal_with_parameters(self):
theta = Parameter('theta')
phi = Parameter('phi')
self.assertEqual(Instruction('u', 1, 0, [theta, phi, 0.4]), Instruction('u', 1, 0, [theta, phi, 0.4]))
self.assertNotEqual(Instruction('u', 1, 0, [theta, phi, 0]), Instruction('u', 1, 0, [phi, theta, 0]))
self.assertNotEqual(Instruction('u', 1, 0, [theta, phi, 0.4]), Instruction('u', 1, 0, [theta, phi, 0.5]))
self.assertNotEqual(Instruction('u', 1, 0, [0.3, phi, 0.4]), Instruction('u', 1, 0, [theta, phi, 0.5]))
def circuit_instruction_circuit_roundtrip(self):
q = QuantumRegister(4)
c = ClassicalRegister(4)
circ1 = QuantumCircuit(q, c, name='circ1')
circ1.h(q[0])
circ1.crz(0.1, q[0], q[1])
circ1.iden(q[1])
circ1.u3(0.1, 0.2, (- 0.2), q[0])
circ1.barrier()
circ1.measure(q, c)
circ1.rz(0.8, q[0]).c_if(c, 6)
inst = circ1.to_instruction()
circ2 = QuantumCircuit(q, c, name='circ2')
circ2.append(inst, q[:])
self.assertEqual(circ1, circ2)
def test_append_opaque_wrong_dimension(self):
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
opaque_gate = Gate(name='crz_2', num_qubits=2, params=[0.5])
self.assertRaises(QiskitError, circ.append, opaque_gate, [qr[0]])
def test_opaque_gate(self):
q = QuantumRegister(4)
c = ClassicalRegister(4)
circ = QuantumCircuit(q, c, name='circ')
opaque_gate = Gate(name='crz_2', num_qubits=2, params=[0.5])
circ.append(opaque_gate, [q[2], q[0]])
self.assertEqual(circ.data[0][0].name, 'crz_2')
self.assertEqual(circ.decompose(), circ)
def test_opaque_instruction(self):
q = QuantumRegister(4)
c = ClassicalRegister(2)
circ = QuantumCircuit(q, c)
opaque_inst = Instruction(name='my_inst', num_qubits=3, num_clbits=1, params=[0.5])
circ.append(opaque_inst, [q[3], q[1], q[0]], [c[1]])
self.assertEqual(circ.data[0][0].name, 'my_inst')
self.assertEqual(circ.decompose(), circ)
def test_mirror_gate(self):
q = QuantumRegister(4)
c = ClassicalRegister(4)
circ = QuantumCircuit(q, c, name='circ')
circ.h(q[0])
circ.crz(0.1, q[0], q[1])
circ.iden(q[1])
circ.u3(0.1, 0.2, (- 0.2), q[0])
gate = circ.to_instruction()
circ = QuantumCircuit(q, c, name='circ')
circ.u3(0.1, 0.2, (- 0.2), q[0])
circ.iden(q[1])
circ.crz(0.1, q[0], q[1])
circ.h(q[0])
gate_mirror = circ.to_instruction()
self.assertEqual(gate.mirror().definition, gate_mirror.definition)
def test_mirror_instruction(self):
q = QuantumRegister(4)
c = ClassicalRegister(4)
circ = QuantumCircuit(q, c, name='circ')
circ.t(q[1])
circ.u3(0.1, 0.2, (- 0.2), q[0])
circ.barrier()
circ.measure(q[0], c[0])
circ.rz(0.8, q[0]).c_if(c, 6)
inst = circ.to_instruction()
circ = QuantumCircuit(q, c, name='circ')
circ.rz(0.8, q[0]).c_if(c, 6)
circ.measure(q[0], c[0])
circ.barrier()
circ.u3(0.1, 0.2, (- 0.2), q[0])
circ.t(q[1])
inst_mirror = circ.to_instruction()
self.assertEqual(inst.mirror().definition, inst_mirror.definition)
def test_mirror_opaque(self):
opaque_gate = Gate(name='crz_2', num_qubits=2, params=[0.5])
self.assertEqual(opaque_gate.mirror(), opaque_gate)
hgate = HGate()
self.assertEqual(hgate.mirror(), hgate)
def test_inverse_gate(self):
q = QuantumRegister(4)
circ = QuantumCircuit(q, name='circ')
circ.h(q[0])
circ.crz(0.1, q[0], q[1])
circ.iden(q[1])
circ.u3(0.1, 0.2, (- 0.2), q[0])
gate = circ.to_instruction()
circ = QuantumCircuit(q, name='circ')
circ.u3((- 0.1), 0.2, (- 0.2), q[0])
circ.iden(q[1])
circ.crz((- 0.1), q[0], q[1])
circ.h(q[0])
gate_inverse = circ.to_instruction()
self.assertEqual(gate.inverse().definition, gate_inverse.definition)
def test_inverse_recursive(self):
qr0 = QuantumRegister(2)
circ0 = QuantumCircuit(qr0, name='circ0')
circ0.t(qr0[0])
circ0.rx(0.4, qr0[1])
circ0.cx(qr0[1], qr0[0])
little_gate = circ0.to_instruction()
qr1 = QuantumRegister(4)
circ1 = QuantumCircuit(qr1, name='circ1')
circ1.cu1((- 0.1), qr1[0], qr1[2])
circ1.iden(qr1[1])
circ1.append(little_gate, [qr1[2], qr1[3]])
circ_inv = QuantumCircuit(qr1, name='circ1_dg')
circ_inv.append(little_gate.inverse(), [qr1[2], qr1[3]])
circ_inv.iden(qr1[1])
circ_inv.cu1(0.1, qr1[0], qr1[2])
self.assertEqual(circ1.inverse(), circ_inv)
def test_inverse_instruction_with_measure(self):
q = QuantumRegister(4)
c = ClassicalRegister(4)
circ = QuantumCircuit(q, c, name='circ')
circ.t(q[1])
circ.u3(0.1, 0.2, (- 0.2), q[0])
circ.barrier()
circ.measure(q[0], c[0])
inst = circ.to_instruction()
self.assertRaises(QiskitError, inst.inverse)
def test_inverse_instruction_with_conditional(self):
q = QuantumRegister(4)
c = ClassicalRegister(4)
circ = QuantumCircuit(q, c, name='circ')
circ.t(q[1])
circ.u3(0.1, 0.2, (- 0.2), q[0])
circ.barrier()
circ.measure(q[0], c[0])
circ.rz(0.8, q[0]).c_if(c, 6)
inst = circ.to_instruction()
self.assertRaises(QiskitError, inst.inverse)
def test_inverse_opaque(self):
opaque_gate = Gate(name='crz_2', num_qubits=2, params=[0.5])
self.assertRaises(QiskitError, opaque_gate.inverse) |
_module()
class TPSPreprocessor(BasePreprocessor):
def __init__(self, num_fiducial=20, img_size=(32, 100), rectified_img_size=(32, 100), num_img_channel=1, init_cfg=None):
super().__init__(init_cfg=init_cfg)
assert isinstance(num_fiducial, int)
assert (num_fiducial > 0)
assert isinstance(img_size, tuple)
assert isinstance(rectified_img_size, tuple)
assert isinstance(num_img_channel, int)
self.num_fiducial = num_fiducial
self.img_size = img_size
self.rectified_img_size = rectified_img_size
self.num_img_channel = num_img_channel
self.LocalizationNetwork = LocalizationNetwork(self.num_fiducial, self.num_img_channel)
self.GridGenerator = GridGenerator(self.num_fiducial, self.rectified_img_size)
def forward(self, batch_img):
batch_C_prime = self.LocalizationNetwork(batch_img)
build_P_prime = self.GridGenerator.build_P_prime(batch_C_prime, batch_img.device)
build_P_prime_reshape = build_P_prime.reshape([build_P_prime.size(0), self.rectified_img_size[0], self.rectified_img_size[1], 2])
batch_rectified_img = F.grid_sample(batch_img, build_P_prime_reshape, padding_mode='border', align_corners=True)
return batch_rectified_img |
def train(args, train_dataset, model, tokenizer):
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if (os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt'))):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt')))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt')))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if os.path.exists(args.model_name_or_path):
global_step = int(args.model_name_or_path.split('-')[(- 1)].split('/')[0])
epochs_trained = (global_step // (len(train_dataloader) // args.gradient_accumulation_steps))
steps_trained_in_current_epoch = (global_step % (len(train_dataloader) // args.gradient_accumulation_steps))
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))
for (step, batch) in enumerate(epoch_iterator):
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet', 'albert']) else None)
outputs = model(**inputs)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
logs = {}
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
loss_scalar = ((tr_loss - logging_loss) / args.logging_steps)
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for (key, value) in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt'))
torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt'))
logger.info('Saving optimizer and scheduler states to %s', output_dir)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank in [(- 1), 0]):
tb_writer.close()
return (global_step, (tr_loss / global_step)) |
class AdExchangeAgent(ph.Agent):
(frozen=True)
class AdExchangeView(ph.AgentView):
users_info: dict
def __init__(self, agent_id: str, publisher_id: str, advertiser_ids: Iterable=tuple(), strategy: str='first'):
super().__init__(agent_id)
self.publisher_id = publisher_id
self.advertiser_ids = advertiser_ids
self.strategy = strategy
def view(self, neighbour_id=None) -> ph.View:
if (neighbour_id and neighbour_id.startswith('ADV')):
return self.AdExchangeView(users_info={1: {'age': 18, 'zipcode': 94025}, 2: {'age': 40, 'zipcode': 90250}})
else:
return super().view(neighbour_id)
.msg_handler(ImpressionRequest)
def handle_impression_request(self, _ctx: ph.Context, msg: ph.Message[ImpressionRequest]):
logger.debug('AdExchange impression request %s', msg)
return [(adv_id, msg.payload) for adv_id in self.advertiser_ids]
def handle_batch(self, ctx: ph.Context, batch: Sequence[ph.Message]):
bids = []
msgs = []
for message in batch:
if isinstance(message.payload, Bid):
bids.append(message)
else:
msgs += self.handle_message(ctx, message)
if (len(bids) > 0):
msgs += self.auction(bids)
return msgs
def auction(self, bids: Sequence[ph.Message[Bid]]):
if (self.strategy == 'first'):
(winner, cost) = self._first_price_auction(bids)
elif (self.strategy == 'second'):
(winner, cost) = self._second_price_auction(bids)
else:
raise ValueError(f'Unknown auction strategy: {self.strategy}')
logger.debug('AdExchange auction done winner: %s cost: %s', winner, cost)
msgs = []
advertiser_ids = [m.sender_id for m in bids]
msgs.append((self.publisher_id, Ads(advertiser_id=winner.sender_id, theme=winner.payload.theme, user_id=winner.payload.user_id)))
for adv_id in advertiser_ids:
adv_cost = (cost if (adv_id == winner.sender_id) else 0.0)
msgs.append((adv_id, AuctionResult(cost=adv_cost, winning_bid=winner.payload.bid)))
return msgs
def _first_price_auction(self, bids: Sequence[ph.Message[Bid]]):
sorted_bids = sorted(bids, key=(lambda m: m.payload.bid), reverse=True)
winner = sorted_bids[0]
cost = sorted_bids[0].payload.bid
return (winner, cost)
def _second_price_auction(self, bids: Sequence[ph.Message[Bid]]):
sorted_bids = sorted(bids, key=(lambda m: m.payload.bid), reverse=True)
winner = sorted_bids[0]
cost = (sorted_bids[1].payload.bid if (len(bids) > 1) else sorted_bids[0].payload.bid)
return (winner, cost) |
def get_prefix_allowed_tokens_fn(model, split_token='|', title_trie: Trie=None):
return _get_end_to_end_prefix_allowed_tokens_fn((lambda x: model.encode(x).tolist()), (lambda x: model.decode(torch.tensor(x))), model.model.decoder.dictionary.bos(), model.model.decoder.dictionary.pad(), model.model.decoder.dictionary.eos(), len(model.model.decoder.dictionary), split_token, title_trie) |
def test_starred_assignment_in_middle():
run_cell('a, b, c, d, e = 1, 2, 3, 4, 5')
run_cell('x, *star, y = [a, b, c, d, e]')
run_cell('a += 1')
run_cell('logging.info(x)')
assert_detected()
run_cell('logging.info(star)')
assert_not_detected()
run_cell('logging.info(y)')
assert_not_detected()
run_cell('e += 1')
run_cell('logging.info(y)')
assert_detected()
run_cell('logging.info(star)')
assert_not_detected()
run_cell('c += 1')
run_cell('logging.info(star)')
assert_detected() |
class nnUNetTrainerV2(nnUNetTrainer):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16)
self.max_num_epochs = 1000
self.initial_lr = 0.01
self.deep_supervision_scales = None
self.ds_loss_weights = None
self.pin_memory = True
def initialize(self, training=True, force_load_plans=False):
if (not self.was_initialized):
maybe_mkdir_p(self.output_folder)
if (force_load_plans or (self.plans is None)):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
net_numpool = len(self.net_num_pool_op_kernel_sizes)
weights = np.array([(1 / (2 ** i)) for i in range(net_numpool)])
mask = np.array(([True] + [(True if (i < (net_numpool - 1)) else False) for i in range(1, net_numpool)]))
weights[(~ mask)] = 0
weights = (weights / weights.sum())
self.ds_loss_weights = weights
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
self.folder_with_preprocessed_data = join(self.dataset_directory, (self.plans['data_identifier'] + ('_stage%d' % self.stage)))
if training:
(self.dl_tr, self.dl_val) = self.get_basic_generators()
if self.unpack_data:
print('unpacking dataset')
unpack_dataset(self.folder_with_preprocessed_data)
print('done')
else:
print('INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you will wait all winter for your model to finish!')
(self.tr_gen, self.val_gen) = get_moreDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params['patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory, use_nondetMultiThreadedAugmenter=False)
self.print_to_log_file(('TRAINING KEYS:\n %s' % str(self.dataset_tr.keys())), also_print_to_console=False)
self.print_to_log_file(('VALIDATION KEYS:\n %s' % str(self.dataset_val.keys())), also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-05, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 0.01, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, (lambda x: x), InitWeights_He(0.01), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def initialize_optimizer_and_scheduler(self):
assert (self.network is not None), 'self.initialize_network must be called first'
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.99, nesterov=True)
self.lr_scheduler = None
def run_online_evaluation(self, output, target):
target = target[0]
output = output[0]
return super().run_online_evaluation(output, target)
def validate(self, do_mirroring: bool=True, use_sliding_window: bool=True, step_size: float=0.5, save_softmax: bool=True, use_gaussian: bool=True, overwrite: bool=True, validation_folder_name: str='validation_raw', debug: bool=False, all_in_gpu: bool=False, segmentation_export_kwargs: dict=None, run_postprocessing_on_folds: bool=True):
ds = self.network.do_ds
self.network.do_ds = False
ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian, overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug, all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs, run_postprocessing_on_folds=run_postprocessing_on_folds)
self.network.do_ds = ds
return ret
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool=True, mirror_axes: Tuple[int]=None, use_sliding_window: bool=True, step_size: float=0.5, use_gaussian: bool=True, pad_border_mode: str='constant', pad_kwargs: dict=None, all_in_gpu: bool=False, verbose: bool=True, mixed_precision=True) -> Tuple[(np.ndarray, np.ndarray)]:
ds = self.network.do_ds
self.network.do_ds = False
ret = super().predict_preprocessed_data_return_seg_and_softmax(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, use_gaussian=use_gaussian, pad_border_mode=pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose, mixed_precision=mixed_precision)
self.network.do_ds = ds
return ret
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
def do_split(self):
if (self.fold == 'all'):
tr_keys = val_keys = list(self.dataset.keys())
else:
splits_file = join(self.dataset_directory, 'splits_final.pkl')
if (not isfile(splits_file)):
splits = []
all_keys_sorted = np.sort(list(self.dataset.keys()))
kfold = KFold(n_splits=3, shuffle=True, random_state=12345)
self.print_to_log_file('Creating new {}-fold cross-validation split...'.format(kfold))
for (i, (train_idx, test_idx)) in enumerate(kfold.split(all_keys_sorted)):
train_keys = np.array(all_keys_sorted)[train_idx]
test_keys = np.array(all_keys_sorted)[test_idx]
splits.append(OrderedDict())
splits[(- 1)]['train'] = train_keys
splits[(- 1)]['val'] = test_keys
save_pickle(splits, splits_file)
else:
self.print_to_log_file('Using splits from existing split file:', splits_file)
splits = load_pickle(splits_file)
self.print_to_log_file(('The split file contains %d splits.' % len(splits)))
self.print_to_log_file(('Desired fold for training: %d' % self.fold))
if (self.fold < len(splits)):
tr_keys = splits[self.fold]['train']
val_keys = splits[self.fold]['val']
self.print_to_log_file(('This split has %d training and %d validation cases.' % (len(tr_keys), len(val_keys))))
else:
self.print_to_log_file(('INFO: You requested fold %d for training but splits contain only %d folds. I am now creating a random (but seeded) 80:20 split!' % (self.fold, len(splits))))
rnd = np.random.RandomState(seed=(12345 + self.fold))
keys = np.sort(list(self.dataset.keys()))
idx_tr = rnd.choice(len(keys), int((len(keys) * 0.8)), replace=False)
idx_val = [i for i in range(len(keys)) if (i not in idx_tr)]
tr_keys = [keys[i] for i in idx_tr]
val_keys = [keys[i] for i in idx_val]
self.print_to_log_file(('This random 80:20 split has %d training and %d validation cases.' % (len(tr_keys), len(val_keys))))
tr_keys.sort()
val_keys.sort()
self.dataset_tr = OrderedDict()
for i in tr_keys:
self.dataset_tr[i] = self.dataset[i]
self.dataset_val = OrderedDict()
for i in val_keys:
self.dataset_val[i] = self.dataset[i]
def setup_DA_params(self):
self.deep_supervision_scales = ([[1, 1, 1]] + list((list(i) for i in (1 / np.cumprod(np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))))[:(- 1)])
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi))
self.data_aug_params['rotation_y'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi))
self.data_aug_params['rotation_z'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi))
if self.do_dummy_2D_aug:
self.data_aug_params['dummy_2D'] = True
self.print_to_log_file('Using dummy2d data augmentation')
self.data_aug_params['elastic_deform_alpha'] = default_2D_augmentation_params['elastic_deform_alpha']
self.data_aug_params['elastic_deform_sigma'] = default_2D_augmentation_params['elastic_deform_sigma']
self.data_aug_params['rotation_x'] = default_2D_augmentation_params['rotation_x']
else:
self.do_dummy_2D_aug = False
if ((max(self.patch_size) / min(self.patch_size)) > 1.5):
default_2D_augmentation_params['rotation_x'] = (((((- 15.0) / 360) * 2.0) * np.pi), (((15.0 / 360) * 2.0) * np.pi))
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params['mask_was_used_for_normalization'] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array(([self.patch_size[0]] + list(self.basic_generator_patch_size)))
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range'])
self.data_aug_params['scale_range'] = (0.7, 1.4)
self.data_aug_params['do_elastic'] = False
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size
self.data_aug_params['num_cached_per_thread'] = 2
def maybe_update_lr(self, epoch=None):
if (epoch is None):
ep = (self.epoch + 1)
else:
ep = epoch
self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9)
self.print_to_log_file('lr:', np.round(self.optimizer.param_groups[0]['lr'], decimals=6))
def on_epoch_end(self):
super().on_epoch_end()
continue_training = (self.epoch < self.max_num_epochs)
if (self.epoch == 100):
if (self.all_val_eval_metrics[(- 1)] == 0):
self.optimizer.param_groups[0]['momentum'] = 0.95
self.network.apply(InitWeights_He(0.01))
self.print_to_log_file('At epoch 100, the mean foreground Dice was 0. This can be caused by a too high momentum. High momentum (0.99) is good for datasets where it works, but sometimes causes issues such as this one. Momentum has now been reduced to 0.95 and network weights have been reinitialized')
return continue_training
def run_training(self):
self.maybe_update_lr(self.epoch)
ds = self.network.do_ds
self.network.do_ds = True
ret = super().run_training()
self.network.do_ds = ds
return ret |
class FlaxLogitsWarper(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def check_md5_hash(path, md5_hash):
computed_md5_hash = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter((lambda : f.read(4096)), b''):
computed_md5_hash.update(chunk)
computed_md5_hash = computed_md5_hash.hexdigest()
if (md5_hash != computed_md5_hash):
print('MD5 mismatch for {}: {} == {}'.format(path, md5_hash, computed_md5_hash))
sys.exit(1) |
class Superpixels(meta.Augmenter):
def __init__(self, p_replace=0, n_segments=100, max_size=128, interpolation='linear', name=None, deterministic=False, random_state=None):
super(Superpixels, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.p_replace = iap.handle_probability_param(p_replace, 'p_replace', tuple_to_uniform=True, list_to_choice=True)
self.n_segments = iap.handle_discrete_param(n_segments, 'n_segments', value_range=(1, None), tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.max_size = max_size
self.interpolation = interpolation
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images, allowed=['bool', 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16', 'int32', 'int64'], disallowed=['uint128', 'uint256', 'int128', 'int256', 'float16', 'float32', 'float64', 'float96', 'float128', 'float256'], augmenter=self)
nb_images = len(images)
rss = ia.derive_random_states(random_state, (1 + nb_images))
n_segments_samples = self.n_segments.draw_samples((nb_images,), random_state=rss[0])
for (i, (image, rs)) in enumerate(zip(images, rss[1:])):
replace_samples = self.p_replace.draw_samples((n_segments_samples[i],), random_state=rs)
if (np.max(replace_samples) == 0):
pass
else:
image = images[i]
(min_value, _center_value, max_value) = iadt.get_value_range_of_dtype(image.dtype)
orig_shape = image.shape
if (self.max_size is not None):
size = max(image.shape[0], image.shape[1])
if (size > self.max_size):
resize_factor = (self.max_size / size)
(new_height, new_width) = (int((image.shape[0] * resize_factor)), int((image.shape[1] * resize_factor)))
image = ia.imresize_single_image(image, (new_height, new_width), interpolation=self.interpolation)
image_sp = np.copy(image)
segments = segmentation.slic(image, n_segments=n_segments_samples[i], compactness=10)
nb_channels = image.shape[2]
for c in sm.xrange(nb_channels):
regions = measure.regionprops((segments + 1), intensity_image=image[(..., c)])
for (ridx, region) in enumerate(regions):
if (replace_samples[(ridx % len(replace_samples))] >= 0.5):
mean_intensity = region.mean_intensity
image_sp_c = image_sp[(..., c)]
if (image_sp_c.dtype.kind in ['i', 'u', 'b']):
value = int(np.round(mean_intensity))
value = min(max(value, min_value), max_value)
image_sp_c[(segments == ridx)] = value
else:
image_sp_c[(segments == ridx)] = mean_intensity
if (orig_shape != image.shape):
image_sp = ia.imresize_single_image(image_sp, orig_shape[0:2], interpolation=self.interpolation)
images[i] = image_sp
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.p_replace, self.n_segments, self.max_size, self.interpolation] |
def load_clip(device):
model = CLIPModel.from_pretrained('openai/clip-vit-base-patch32').to(device)
processor = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32')
tokenizer = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32')
return (model, processor, tokenizer) |
def ndc_projection(x=0.1, n=1.0, f=50.0):
return np.array([[(n / x), 0, 0, 0], [0, (n / (- x)), 0, 0], [0, 0, ((- (f + n)) / (f - n)), ((- ((2 * f) * n)) / (f - n))], [0, 0, (- 1), 0]]).astype(np.float32) |
_arg_scope
def mobilenet(input_tensor, num_classes=1001, depth_multiplier=1.0, scope='MobilenetV2', conv_defs=None, finegrain_classification_mode=False, min_depth=None, divisible_by=None, activation_fn=None, **kwargs):
if (conv_defs is None):
conv_defs = V2_DEF
if ('multiplier' in kwargs):
raise ValueError('mobilenetv2 doesn\'t support generic multiplier parameter use "depth_multiplier" instead.')
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
if (depth_multiplier < 1):
conv_defs['spec'][(- 1)].params['num_outputs'] /= depth_multiplier
if activation_fn:
conv_defs = copy.deepcopy(conv_defs)
defaults = conv_defs['defaults']
conv_defaults = defaults[(slim.conv2d, slim.fully_connected, slim.separable_conv2d)]
conv_defaults['activation_fn'] = activation_fn
depth_args = {}
if (min_depth is not None):
depth_args['min_depth'] = min_depth
if (divisible_by is not None):
depth_args['divisible_by'] = divisible_by
with slim.arg_scope((lib.depth_multiplier,), **depth_args):
return lib.mobilenet(input_tensor, num_classes=num_classes, conv_defs=conv_defs, scope=scope, multiplier=depth_multiplier, **kwargs) |
class ptb_rum_single_config(object):
cell = 'rum'
num_steps = 150
learning_rate = 0.002
T_norm = 1.0
num_layers = 1
init_scale = 0.01
max_grad_norm = 1.0
cell_size = 2000
embed_size = 128
max_epoch = 100
max_max_epoch = max_epoch
keep_prob = 0.65
zoneout_h = 0.9
lr_decay = 0.1
batch_size = 128
vocab_size = 50
use_layer_norm = True
use_zoneout = True
dataset = 'ptb' |
class TestTranslation(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir)
def test_raw(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--dataset-impl', 'raw'])
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--dataset-impl', 'raw'])
generate_main(data_dir, ['--dataset-impl', 'raw'])
((not torch.cuda.is_available()), 'test requires a GPU')
def test_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fp16') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--fp16'])
generate_main(data_dir)
((not torch.cuda.is_available()), 'test requires a GPU')
def test_memory_efficient_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_memory_efficient_fp16') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--memory-efficient-fp16'])
generate_main(data_dir)
def test_update_freq(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_update_freq') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3'])
generate_main(data_dir)
def test_max_positions(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_max_positions') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
with self.assertRaises(Exception) as context:
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'])
self.assertTrue(('skip this example with --skip-invalid-size-inputs-valid-test' in str(context.exception)))
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'])
with self.assertRaises(Exception) as context:
generate_main(data_dir)
generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test'])
def test_generation(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_sampling') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir, ['--sampling', '--temperature', '2', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--sampling', '--sampling-topk', '3', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--sampling', '--sampling-topp', '0.2', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--diversity-rate', '0.5', '--beam', '6'])
with self.assertRaises(ValueError):
generate_main(data_dir, ['--diverse-beam-groups', '4', '--match-source-len'])
generate_main(data_dir, ['--prefix-size', '2'])
def test_eval_bleu(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_eval_bleu') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--eval-bleu', '--eval-bleu-print-samples', '--eval-bleu-remove-bpe', '--eval-bleu-detok', 'space', '--eval-bleu-args', '{"beam": 4, "min_len": 10}'])
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8'])
generate_main(data_dir)
def test_lstm_bidirectional(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm', ['--encoder-layers', '2', '--encoder-bidirectional', '--encoder-hidden-size', '16', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8', '--decoder-layers', '2'])
generate_main(data_dir)
def test_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'], run_validation=True)
generate_main(data_dir)
((not torch.cuda.is_available()), 'test requires a GPU')
def test_transformer_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--fp16'], run_validation=True)
generate_main(data_dir)
def test_multilingual_transformer(self):
encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']]
decoder_langtok_flags = [[], ['--decoder-langtok']]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(f'test_multilingual_transformer_{i}_{j}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, arch='multilingual_transformer', task='multilingual_translation', extra_flags=((['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'multilingual_translation', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
def test_transformer_cross_self_attention(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_cross_self_attention') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-embed-dim', '8', '--no-cross-attention', '--cross-self-attention'], run_validation=True)
generate_main(data_dir, extra_flags=[])
def test_lightconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', ['--encoder-conv-type', 'lightweight', '--decoder-conv-type', 'lightweight', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir)
def test_dynamicconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_dynamicconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', ['--encoder-conv-type', 'dynamic', '--decoder-conv-type', 'dynamic', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir)
def test_cmlm_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_cmlm_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'cmlm_transformer', ['--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
((not torch.cuda.is_available()), 'test requires a GPU')
def test_levenshtein_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_levenshtein_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'levenshtein_transformer', ['--apply-bert-init', '--early-exit', '6,6,6', '--criterion', 'nat_loss'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'nonautoregressive_transformer', ['--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '0', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_iterative_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_iterative_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'iterative_nonautoregressive_transformer', ['--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--stochastic-approx', '--dae-ratio', '0.5', '--train-step', '3'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_insertion_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_insertion_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'insertion_transformer', ['--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'random_mask'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_mixture_of_experts(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_moe') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--task', 'translation_moe', '--user-dir', 'examples/translation_moe/src', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir, ['--task', 'translation_moe', '--user-dir', 'examples/translation_moe/src', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--gen-expert', '0'])
def test_alignment(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_alignment') as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ['--align-suffix', 'align'])
train_translation_model(data_dir, 'transformer_align', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--load-alignments', '--alignment-layer', '1', '--criterion', 'label_smoothed_cross_entropy_with_alignment'], run_validation=True)
generate_main(data_dir) |
class _Calibrator(object):
def __init__(self, dataset: Dataset, results_dir: str, image_shape: Tuple[(int, int)], num_samples: int, theta_dims: int):
self.dataset = dataset
self.results_dir = results_dir
self.image_shape = image_shape
self.num_samples = num_samples
self.theta_dims = theta_dims
def __call__(self, video_name: str, writer: _ScoresWriter):
features = self.dataset.features_dataset[video_name]
labelled_thetas = skeletons_to_angles(features.skeletons, theta_dims=self.theta_dims)
labelled_indexes = features.labelled_indexes
centerline_accuracy = CenterlineAccuracyCheck(frame_preprocessing=self.dataset.frame_preprocessing, image_shape=self.image_shape)
with self.dataset.frames_dataset.open(video_name) as frames:
frames_amount = min(self.num_samples, len(labelled_indexes))
random_label_index = np.random.choice(labelled_indexes, frames_amount, replace=False)
thetas = labelled_thetas[random_label_index]
for (theta, index) in zip(thetas, random_label_index):
cur_frame = frames[index]
(score, _) = centerline_accuracy(theta=theta, template_skeleton=features.skeletons[index], template_measurements=features.measurements, template_frame=cur_frame, real_frame_orig=cur_frame)
writer.add(locals())
results_file = os.path.join(self.results_dir, (video_name + '_calibration.h5'))
if os.path.exists(results_file):
os.remove(results_file)
writer.write(results_file=results_file)
logger.info(f'Evaluated known skeletons reconstruction for {video_name}, average score {np.mean(writer.all_scores):.4f}')
return results_file |
class TFElectraModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class RetinaPolicy(object):
def __init__(self, scale_ranges=[1, 1.1], img_size=[512, 512], translate=[0.1, 0.1], rotation=[(- 20), 20], crop_dims=[480, 480], brightness=None):
self.scale_ranges = scale_ranges
self.img_size = img_size
self.translate = translate
self.rotation = rotation
self.brightness = brightness
self.crop_dims = crop_dims
def __call__(self, image, mask=None):
tf_mask = None
(i, j, h, w) = transforms.RandomCrop.get_params(image, self.crop_dims)
tf_image = transforms.functional.crop(image, i, j, h, w)
if (mask is not None):
tf_mask = transforms.functional.crop(mask, i, j, h, w)
angle = transforms.RandomRotation.get_params(self.rotation)
tf_image = transforms.functional.rotate(tf_image, angle)
if (mask is not None):
tf_mask = transforms.functional.rotate(tf_mask, angle)
params = transforms.RandomAffine.get_params(degrees=[0, 0], translate=self.translate, scale_ranges=[1, 1], img_size=self.img_size, shears=[0, 0])
tf_image = transforms.functional.affine(tf_image, params[0], params[1], params[2], params[3])
if (mask is not None):
tf_mask = transforms.functional.affine(tf_mask, params[0], params[1], params[2], params[3])
params = transforms.RandomAffine.get_params(degrees=[0, 0], translate=[0, 0], scale_ranges=self.scale_ranges, img_size=self.img_size, shears=[0, 0])
tf_image = transforms.functional.affine(tf_image, params[0], params[1], params[2], params[3])
if (mask is not None):
tf_mask = transforms.functional.affine(tf_mask, params[0], params[1], params[2], params[3])
if (self.brightness is not None):
tf = transforms.ColorJitter(brightness=self.brightness)
tf_image = tf(tf_image)
if (mask is not None):
return (tf_image, tf_mask)
else:
return tf_image
def __repr__(self):
return 'Retinal Vessel Segmentation Augmentation Policy' |
def reduce_df(dataframe, num_per_class):
df_list = []
for i in range(10):
df_list.append(dataframe.iloc[(i * 5000):((i * 5000) + num_per_class)])
df = pd.concat(df_list)
return df |
def get_model_fn(n_token, cutoffs, train_bin_sizes, eval_bin_sizes):
def model_fn(features, labels, mode, params):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
batch_size = params['batch_size']
mems = params['cache']
inp = tf.transpose(features['inputs'], [1, 0])
tgt = tf.transpose(features['labels'], [1, 0])
bin_sizes = (train_bin_sizes if is_training else eval_bin_sizes)
if bin_sizes:
inp_perms = [tf.transpose(features['inp_mask'], [1, 0])]
tgt_perms = [tf.transpose(features['tgt_mask'], [1, 0])]
head_tgt = tf.transpose(features['head_labels'], [1, 0])
for b in range(len(bin_sizes)):
inp_perm = tf.transpose(features['inp_perm_{}'.format(b)], [1, 0, 2])
tgt_perm = tf.transpose(features['tgt_perm_{}'.format(b)], [1, 0, 2])
inp_perms.append(inp_perm)
tgt_perms.append(tgt_perm)
else:
(inp_perms, tgt_perms, head_tgt) = (None, None, None)
if (FLAGS.init == 'uniform'):
initializer = tf.initializers.random_uniform(minval=(- FLAGS.init_range), maxval=FLAGS.init_range, seed=None)
elif (FLAGS.init == 'normal'):
initializer = tf.initializers.random_normal(stddev=FLAGS.init_std, seed=None)
proj_initializer = tf.initializers.random_normal(stddev=FLAGS.proj_init_std, seed=None)
tie_projs = [False for _ in range((len(cutoffs) + 1))]
if FLAGS.proj_share_all_but_first:
for i in range(1, len(tie_projs)):
tie_projs[i] = True
tf.logging.info('Vocab size : {}'.format(n_token))
tf.logging.info('Batch size : {}'.format(batch_size))
(loss, new_mems) = model.transformer(dec_inp=inp, target=tgt, mems=mems, n_token=n_token, n_layer=FLAGS.n_layer, d_model=FLAGS.d_model, d_embed=FLAGS.d_embed, n_head=FLAGS.n_head, d_head=FLAGS.d_head, d_inner=FLAGS.d_inner, dropout=FLAGS.dropout, dropatt=FLAGS.dropatt, initializer=initializer, is_training=is_training, mem_len=FLAGS.mem_len, cutoffs=cutoffs, div_val=FLAGS.div_val, tie_projs=tie_projs, input_perms=inp_perms, target_perms=tgt_perms, head_target=head_tgt, same_length=FLAGS.same_length, clamp_len=FLAGS.clamp_len, use_tpu=FLAGS.use_tpu, untie_r=FLAGS.untie_r, proj_same_dim=FLAGS.proj_same_dim)
total_loss = tf.reduce_mean(loss)
if (mode == tf.estimator.ModeKeys.EVAL):
if FLAGS.use_tpu:
with tf.colocate_with(total_loss):
total_loss = ((tf.contrib.tpu.cross_replica_sum(total_loss) / FLAGS.num_hosts) / FLAGS.num_core_per_host)
metric_loss = tf.tile(tf.reshape(total_loss, [1, 1]), [batch_size, 1])
eval_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=(metric_fn, [metric_loss]))
eval_spec.cache = new_mems
return eval_spec
global_step = tf.train.get_global_step()
if (FLAGS.warmup_steps > 0):
warmup_lr = ((tf.to_float(global_step) / tf.to_float(FLAGS.warmup_steps)) * FLAGS.learning_rate)
else:
warmup_lr = 0.0
num_params = np.sum([np.prod(v.shape) for v in tf.trainable_variables()])
tf.logging.info('#params: {}'.format(num_params))
decay_lr = tf.train.cosine_decay(FLAGS.learning_rate, global_step=(global_step - FLAGS.warmup_steps), decay_steps=(FLAGS.train_steps - FLAGS.warmup_steps), alpha=FLAGS.min_lr_ratio)
learning_rate = tf.where((global_step < FLAGS.warmup_steps), warmup_lr, decay_lr)
if FLAGS.use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(tf.train.AdamOptimizer(learning_rate=learning_rate))
else:
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
grads_and_vars = optimizer.compute_gradients(total_loss)
(gradients, variables) = zip(*grads_and_vars)
(clipped, _) = tf.clip_by_global_norm(gradients, FLAGS.clip)
train_op = optimizer.apply_gradients(zip(clipped, variables), global_step=tf.train.get_global_step())
train_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op)
if (FLAGS.mem_len < FLAGS.tgt_len):
new_mems = [new_mems[:FLAGS.mem_len] for mem_t in new_mems]
train_spec.cache = new_mems
return train_spec
return model_fn |
def fix_parentheses(string):
stack = list()
output_string = ''
for i in range(len(string)):
if (string[i] == '('):
stack.append(i)
output_string += string[i]
elif (string[i] == ')'):
if (len(stack) == 0):
pass
else:
output_string += string[i]
stack.pop()
else:
output_string += string[i]
for i in range(len(stack)):
output_string += ')'
return output_string |
def test_isotropic_eddington_dehnencore_in_nfw_sigmar():
pot = [potential.NFWPotential(amp=2.3, a=1.3)]
denspot = potential.DehnenCoreSphericalPotential(amp=2.5, a=1.15)
dfp = eddingtondf(pot=pot, denspot=denspot)
numpy.random.seed(10)
samp = dfp.sample(n=1000000)
tol = 0.08
check_sigmar_against_jeans(samp, pot, tol, dens=(lambda r: denspot.dens(r, 0, use_physical=False)), rmin=(pot[0]._scale / 10.0), rmax=(pot[0]._scale * 10.0), bins=31)
return None |
def print_cfg(blocks):
for block in blocks:
print(('[%s]' % block['type']))
for (key, value) in block.items():
if (key != 'type'):
print(('%s=%s' % (key, value)))
print('') |
def test_lambda_metric():
env = MockEnv()
metric = ph.metrics.LambdaMetric(extract_fn=(lambda env: env.test_property), train_reduce_fn=(lambda values: np.sum(values)), eval_reduce_fn=(lambda values: (np.sum(values) * 2)))
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 15.0)
assert (metric.reduce(values, mode='evaluate') == 30.0) |
class ResUNetBN2F(ResUNet2):
NORM_TYPE = 'BN'
CHANNELS = [None, 16, 32, 64, 128]
TR_CHANNELS = [None, 16, 32, 64, 128] |
class Encoder(nn.Module):
def __init__(self, cin, cout, size=64, nf=64, activation=nn.Tanh):
super(Encoder, self).__init__()
extra = int((np.log2(size) - 6))
network = [nn.Conv2d(cin, nf, kernel_size=4, stride=2, padding=1, bias=False), nn.ReLU(inplace=True), nn.Conv2d(nf, (nf * 2), kernel_size=4, stride=2, padding=1, bias=False), nn.ReLU(inplace=True), nn.Conv2d((nf * 2), (nf * 4), kernel_size=4, stride=2, padding=1, bias=False), nn.ReLU(inplace=True), nn.Conv2d((nf * 4), (nf * 8), kernel_size=4, stride=2, padding=1, bias=False), nn.ReLU(inplace=True)]
for i in range(extra):
nf *= 2
network += [nn.Conv2d((nf * 4), (nf * 8), kernel_size=4, stride=2, padding=1, bias=False), nn.ReLU(inplace=True)]
network += [nn.Conv2d((nf * 8), (nf * 8), kernel_size=4, stride=1, padding=0, bias=False), nn.ReLU(inplace=True), nn.Conv2d((nf * 8), cout, kernel_size=1, stride=1, padding=0, bias=False)]
if (activation is not None):
network += [activation()]
self.network = nn.Sequential(*network)
def forward(self, input):
return self.network(input).reshape(input.size(0), (- 1)) |
_lr_scheduler('cosine', dataclass=CosineLRScheduleConfig)
class CosineLRSchedule(FairseqLRScheduler):
def __init__(self, cfg: CosineLRScheduleConfig, fairseq_optimizer):
super().__init__(cfg, fairseq_optimizer)
if (isinstance(cfg.lr, Collection) and (len(cfg.lr) > 1)):
raise ValueError(f'Cannot use a fixed learning rate schedule with cosine. Consider --lr-scheduler=fixed instead. ({cfg.lr})')
self.max_lr = (cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr)
assert (self.max_lr > cfg.min_lr), f'max_lr (={cfg.lr}) must be more than min_lr (={cfg.min_lr})'
warmup_end_lr = self.max_lr
if (cfg.warmup_init_lr < 0):
cfg.warmup_init_lr = cfg.min_lr
self.t_mult = cfg.t_mult
self.period = cfg.lr_period_updates
if (self.period <= 0):
assert (cfg.max_update > 0), 'Either --max_update or --lr-period-updates must be set'
self.period = (cfg.max_update - cfg.warmup_updates)
if (cfg.warmup_updates > 0):
self.lr_step = ((warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates)
else:
self.lr_step = 1
self.warmup_updates = cfg.warmup_updates
self.lr_shrink = cfg.lr_shrink
self.lr = cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
if (num_updates < self.cfg.warmup_updates):
self.lr = (self.cfg.warmup_init_lr + (num_updates * self.lr_step))
else:
curr_updates = (num_updates - self.cfg.warmup_updates)
if (self.t_mult != 1):
i = math.floor(math.log((1 - ((curr_updates / self.period) * (1 - self.t_mult))), self.t_mult))
t_i = ((self.t_mult ** i) * self.period)
t_curr = (curr_updates - (((1 - (self.t_mult ** i)) / (1 - self.t_mult)) * self.period))
else:
i = math.floor((curr_updates / self.period))
t_i = self.period
t_curr = (curr_updates - (self.period * i))
lr_shrink = (self.lr_shrink ** i)
min_lr = (self.cfg.min_lr * lr_shrink)
max_lr = (self.max_lr * lr_shrink)
self.lr = (min_lr + ((0.5 * (max_lr - min_lr)) * (1 + math.cos(((math.pi * t_curr) / t_i)))))
self.optimizer.set_lr(self.lr)
return self.lr |
def encode_affinity(n_cpu_core=1, n_gpu=0, cpu_reserved=0, contexts_per_gpu=1, gpu_per_run=1, cpu_per_run=1, cpu_per_worker=1, async_sample=False, sample_gpu_per_run=0, optim_sample_share_gpu=False, hyperthread_offset=None, n_socket=None, run_slot=None, alternating=False, set_affinity=True):
affinity_code = f'{n_cpu_core}{N_CPU_CORE}_{n_gpu}{N_GPU}'
if (hyperthread_offset is None):
hyperthread_offset = get_hyperthread_offset()
if (n_socket is None):
n_socket = get_n_socket()
if (contexts_per_gpu > 1):
affinity_code += f'_{contexts_per_gpu}{CONTEXTS_PER_GPU}'
if (gpu_per_run > 1):
affinity_code += f'_{gpu_per_run}{GPU_PER_RUN}'
if (n_gpu == 0):
affinity_code += f'_{cpu_per_run}{CPU_PER_RUN}'
if (cpu_per_worker > 1):
affinity_code += f'_{cpu_per_worker}{CPU_PER_WORKER}'
if (hyperthread_offset != n_cpu_core):
affinity_code += f'_{hyperthread_offset}{HYPERTHREAD_OFFSET}'
if (n_socket > 1):
affinity_code += f'_{n_socket}{N_SOCKET}'
if (cpu_reserved > 0):
affinity_code += f'_{cpu_reserved}{CPU_RESERVED}'
if async_sample:
affinity_code += f'_1{ASYNC_SAMPLE}'
if (sample_gpu_per_run > 0):
affinity_code += f'_{sample_gpu_per_run}{SAMPLE_GPU_PER_RUN}'
if optim_sample_share_gpu:
affinity_code += f'_1{OPTIM_SAMPLE_SHARE_GPU}'
if alternating:
affinity_code += f'_1{ALTERNATING}'
if (not set_affinity):
affinity_code += f'_0{SET_AFFINITY}'
if (run_slot is not None):
assert (run_slot <= ((n_gpu * contexts_per_gpu) // gpu_per_run))
affinity_code = (f'{run_slot}{RUN_SLOT}_' + affinity_code)
return affinity_code |
_task('speech_to_text_wav2vec_triple_dataset')
class SpeechToTextTaskWav2VecTripleDataset(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', help='manifest root path')
parser.add_argument('--config-yaml', type=str, default='config.yaml', help='Configuration YAML filename (under manifest root)')
parser.add_argument('--max-source-positions', default=6000, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
parser.add_argument('--raw-wav', default=True, type=bool, help='whether to input raw wav file')
def __init__(self, args, tgt_dict):
super().__init__(args)
check_import(import_successful)
self.tgt_dict = tgt_dict
self.data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
def setup_task(cls, args, **kwargs):
data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
dict_path = op.join(args.data, data_cfg.vocab_filename)
if (not op.isfile(dict_path)):
raise FileNotFoundError(f'Dict not found: {dict_path}')
tgt_dict = Dictionary.load(dict_path)
logger.info(f'dictionary size ({data_cfg.vocab_filename}): {len(tgt_dict):,}')
if (getattr(args, 'train_subset', None) is not None):
if (not all((s.startswith('train') for s in args.train_subset.split(',')))):
raise ValueError('Train splits should be named like "train*".')
return cls(args, tgt_dict)
def build_criterion(self, args):
from fairseq import criterions
if (self.data_cfg.prepend_tgt_lang_tag and (args.ignore_prefix_size != 1)):
raise ValueError('Please set "--ignore-prefix-size 1" since target language ID token is prepended as BOS.')
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith('train')
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreator.from_tsv(self.args.data, self.data_cfg, split, self.tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=self.args.seed)
def target_dictionary(self):
return self.tgt_dict
def source_dictionary(self):
return None
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions)
def build_model(self, args):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
return super(SpeechToTextTaskWav2VecTripleDataset, self).build_model(args)
def build_generator(self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None):
if (self.data_cfg.prepend_tgt_lang_tag and (args.prefix_size != 1)):
raise ValueError('Please set "--prefix-size 1" since target language ID token is prepended as BOS.')
lang_token_ids = {i for (s, i) in self.tgt_dict.indices.items() if SpeechToTextDataset.is_lang_tag(s)}
extra_gen_cls_kwargs = {'symbols_to_strip_from_output': lang_token_ids}
return super().build_generator(models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs)
def build_tokenizer(self, args):
logger.info(f'pre-tokenizer: {self.data_cfg.pre_tokenizer}')
return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
def build_bpe(self, args):
logger.info(f'tokenizer: {self.data_cfg.bpe_tokenizer}')
return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
n_frames = [get_features_or_waveform(p).shape[0] for p in lines]
return (lines, n_frames)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
return SpeechToTextDataset('interactive', False, self.data_cfg, src_tokens, src_lengths) |
class TrainSetTransform():
def __init__(self, aug_mode):
self.aug_mode = aug_mode
self.transform = None
if (aug_mode == 0):
t = None
elif (aug_mode == 1):
t = [RandomRotation(max_theta=5, max_theta2=0, axis=np.array([0, 0, 1])), RandomFlip([0.25, 0.25, 0.0])]
else:
raise NotImplementedError('Unknown aug_mode: {}'.format(aug_mode))
if (t is None):
self.transform = None
else:
self.transform = transforms.Compose(t)
def __call__(self, e):
if (self.transform is not None):
e = self.transform(e)
return e |
def call_output(cmd):
print(f'Executing: {cmd}')
ret = check_output(cmd, shell=True)
print(ret)
return ret |
class GatherViewer(MjViewer):
def __init__(self, env):
self.env = env
super(GatherViewer, self).__init__()
green_ball_model = MjModel(osp.abspath(osp.join(MODEL_DIR, 'green_ball.xml')))
self.green_ball_renderer = EmbeddedViewer()
self.green_ball_model = green_ball_model
self.green_ball_renderer.set_model(green_ball_model)
red_ball_model = MjModel(osp.abspath(osp.join(MODEL_DIR, 'red_ball.xml')))
self.red_ball_renderer = EmbeddedViewer()
self.red_ball_model = red_ball_model
self.red_ball_renderer.set_model(red_ball_model)
green_ball_highlighted_model = MjModel(osp.abspath(osp.join(MODEL_DIR, 'green_ball_highlighted.xml')))
self.green_ball_highlighted_renderer = EmbeddedViewer()
self.green_ball_highlighted_model = green_ball_highlighted_model
self.green_ball_highlighted_renderer.set_model(green_ball_highlighted_model)
red_ball_highlighted_model = MjModel(osp.abspath(osp.join(MODEL_DIR, 'red_ball_highlighted.xml')))
self.red_ball_highlighted_renderer = EmbeddedViewer()
self.red_ball_highlighted_model = red_ball_highlighted_model
self.red_ball_highlighted_renderer.set_model(red_ball_highlighted_model)
def start(self):
super(GatherViewer, self).start()
self.green_ball_renderer.start(self.window)
self.red_ball_renderer.start(self.window)
self.green_ball_highlighted_renderer.start(self.window)
self.red_ball_highlighted_renderer.start(self.window)
def handle_mouse_move(self, window, xpos, ypos):
super(GatherViewer, self).handle_mouse_move(window, xpos, ypos)
self.green_ball_renderer.handle_mouse_move(window, xpos, ypos)
self.red_ball_renderer.handle_mouse_move(window, xpos, ypos)
self.green_ball_highlighted_renderer.handle_mouse_move(window, xpos, ypos)
self.red_ball_highlighted_renderer.handle_mouse_move(window, xpos, ypos)
def handle_scroll(self, window, x_offset, y_offset):
super(GatherViewer, self).handle_scroll(window, x_offset, y_offset)
self.green_ball_renderer.handle_scroll(window, x_offset, y_offset)
self.red_ball_renderer.handle_scroll(window, x_offset, y_offset)
self.green_ball_highlighted_renderer.handle_scroll(window, x_offset, y_offset)
self.red_ball_highlighted_renderer.handle_scroll(window, x_offset, y_offset)
def render(self):
super(GatherViewer, self).render()
tmpobjects = mjcore.MJVOBJECTS()
mjlib.mjlib.mjv_makeObjects(byref(tmpobjects), 1000)
for obj in self.env.objects_in_view:
(x, y, typ) = obj
qpos = np.zeros_like(self.green_ball_highlighted_model.data.qpos)
qpos[(0, 0)] = x
qpos[(1, 0)] = y
if (typ == APPLE):
self.green_ball_highlighted_model.data.qpos = qpos
self.green_ball_highlighted_model.forward()
self.green_ball_highlighted_renderer.render()
mjextra.append_objects(tmpobjects, self.green_ball_highlighted_renderer.objects)
else:
self.red_ball_highlighted_model.data.qpos = qpos
self.red_ball_highlighted_model.forward()
self.red_ball_highlighted_renderer.render()
mjextra.append_objects(tmpobjects, self.red_ball_highlighted_renderer.objects)
for obj in self.env.objects:
if (not (obj in self.env.objects_in_view)):
(x, y, typ) = obj
qpos = np.zeros_like(self.green_ball_model.data.qpos)
qpos[(0, 0)] = x
qpos[(1, 0)] = y
if (typ == APPLE):
self.green_ball_model.data.qpos = qpos
self.green_ball_model.forward()
self.green_ball_renderer.render()
mjextra.append_objects(tmpobjects, self.green_ball_renderer.objects)
else:
self.red_ball_model.data.qpos = qpos
self.red_ball_model.forward()
self.red_ball_renderer.render()
mjextra.append_objects(tmpobjects, self.red_ball_renderer.objects)
mjextra.append_objects(tmpobjects, self.objects)
mjlib.mjlib.mjv_makeLights(self.model.ptr, self.data.ptr, byref(tmpobjects))
mjlib.mjlib.mjr_render(0, self.get_rect(), byref(tmpobjects), byref(self.ropt), byref(self.cam.pose), byref(self.con))
try:
import OpenGL.GL as GL
except:
return
def draw_rect(x, y, width, height):
GL.glBegin(GL.GL_QUADS)
GL.glVertex2f(x, y)
GL.glVertex2f((x + width), y)
GL.glVertex2f((x + width), (y + height))
GL.glVertex2f(x, (y + height))
GL.glEnd()
def refresh2d(width, height):
GL.glViewport(0, 0, width, height)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GL.glOrtho(0.0, width, 0.0, height, 0.0, 1.0)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
GL.glLoadIdentity()
(width, height) = glfw.get_framebuffer_size(self.window)
refresh2d(width, height)
GL.glDisable(GL.GL_LIGHTING)
GL.glEnable(GL.GL_BLEND)
GL.glColor4f(0.0, 0.0, 0.0, 0.8)
draw_rect(10, 10, 300, 100)
(apple_readings, bomb_readings) = self.env.get_readings()
for (idx, reading) in enumerate(apple_readings):
if (reading > 0):
GL.glColor4f(0.0, 1.0, 0.0, reading)
draw_rect((20 * (idx + 1)), 10, 5, 50)
for (idx, reading) in enumerate(bomb_readings):
if (reading > 0):
GL.glColor4f(1.0, 0.0, 0.0, reading)
draw_rect((20 * (idx + 1)), 60, 5, 50) |
class FunctionGetFetches(object):
def __init__(self, inputs, outputs, updates=None, name=None, **session_kwargs):
updates = (updates or [])
if (not isinstance(inputs, (list, tuple))):
raise TypeError('`inputs` to a TensorFlow backend function should be a list or tuple.')
if (not isinstance(outputs, (list, tuple))):
raise TypeError('`outputs` of a TensorFlow backend function should be a list or tuple.')
if (not isinstance(updates, (list, tuple))):
raise TypeError('`updates` in a TensorFlow backend function should be a list or tuple.')
self.inputs = list(inputs)
self.outputs = list(outputs)
with tf.control_dependencies(self.outputs):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
(p, new_p) = update
updates_ops.append(tf.assign(p, new_p))
else:
updates_ops.append(update)
self.updates_op = tf.group(*updates_ops)
self.name = name
self.feed_dict = session_kwargs.pop('feed_dict', {})
self.fetches = session_kwargs.pop('fetches', [])
if (not isinstance(self.fetches, list)):
self.fetches = [self.fetches]
self.session_kwargs = session_kwargs
def __call__(self, inputs):
if (not isinstance(inputs, (list, tuple))):
raise TypeError('`inputs` should be a list or tuple.')
feed_dict = self.feed_dict.copy()
for (tensor, value) in zip(self.inputs, inputs):
if is_sparse(tensor):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1)), 1)
value = (indices, sparse_coo.data, sparse_coo.shape)
feed_dict[tensor] = value
fetches = ((self.outputs + [self.updates_op]) + self.fetches)
session = get_session()
updated = session.run(fetches=fetches, feed_dict=feed_dict, **self.session_kwargs)
return updated |
def check_balancing_schedule(balancing_schedule):
if callable(balancing_schedule):
try:
return_value = balancing_schedule({}, {}, 0, 0)
except Exception as e:
e_args = list(e.args)
e_args[0] += BALANCING_SCHEDULE_INFO
e.args = tuple(e_args)
raise e
else:
if (not isinstance(return_value, dict)):
raise TypeError(((f' The self-defined `balancing_schedule` must return a `dict`,' + f' got {type(return_value)}') + BALANCING_SCHEDULE_INFO))
return balancing_schedule
if (balancing_schedule in BALANCING_KIND):
return BALANCING_KIND_MAPPING[balancing_schedule]
else:
balancing_schedule_info = (balancing_schedule if isinstance(balancing_schedule, str) else type(balancing_schedule))
raise TypeError(f"'balancing_schedule' should be one of {BALANCING_KIND} or `callable`, got {balancing_schedule_info}.") |
def gen_data_step(decl_file: str, dest: str, rec_limit: int, depth_limit: int, weight_limit: int):
lean_cmd = ['lean']
lean_cmd += ['--run']
lean_cmd += ['./src/lean_step.lean']
lean_cmd += list(map(str, [decl_file, dest, rec_limit, depth_limit, weight_limit]))
path = Path(dest)
stdout_dest = os.path.join(str(path.parent), (str(path.stem) + '.out'))
with open(stdout_dest, 'w') as f:
subprocess.run(lean_cmd, stdout=f, stderr=f)
return decl_file |
def w2v_pad(protein, maxlen_, victor_size):
tokenizer = text.Tokenizer(num_words=10000, lower=False, filters='\u3000')
tokenizer.fit_on_texts(protein)
protein_ = sequence.pad_sequences(tokenizer.texts_to_sequences(protein), maxlen=maxlen_)
word_index = tokenizer.word_index
nb_words = len(word_index)
print(nb_words)
protVec_model = {}
with open('../dataset/embed/protVec_100d_3grams.csv', encoding='utf8') as f:
for line in f:
values = eval(line).rsplit('\t')
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
protVec_model[word] = coefs
print('add protVec finished....')
count = 0
embedding_matrix = np.zeros(((nb_words + 1), victor_size))
for (word, i) in word_index.items():
embedding_glove_vector = (protVec_model[word] if (word in protVec_model) else None)
if (embedding_glove_vector is not None):
count += 1
embedding_matrix[i] = embedding_glove_vector
else:
unk_vec = (np.random.random(victor_size) * 0.5)
unk_vec = (unk_vec - unk_vec.mean())
embedding_matrix[i] = unk_vec
del protVec_model
print(embedding_matrix.shape)
return (protein_, word_index, embedding_matrix) |
class Transmission(xmlr.Object):
def __init__(self, name=None, joint=None, actuator=None):
self.name = name
self.joint = joint
self.actuator = actuator |
def collate_train_baseline(batch):
if (batch[0][(- 1)] is not None):
return collate_eval(batch)
indice = [b[0] for b in batch]
image = torch.stack([b[1] for b in batch])
return (indice, image) |
def precision(tp, fp) -> float:
predicted_positives = (tp + fp)
if (predicted_positives <= 0):
return 0
return (tp / predicted_positives) |
def train_classifier(classifier: nn.Module, save_dir: str, train: List[Annotation], val: List[Annotation], documents: Dict[(str, List[List[int]])], model_pars: dict, class_interner: Dict[(str, int)], attention_optimizer=None, classifier_optimizer=None) -> Tuple[(nn.Module, dict)]:
logging.info(f'Beginning training classifier with {len(train)} annotations, {len(val)} for validation')
classifier_output_dir = os.path.join(save_dir, 'classifier')
os.makedirs(save_dir, exist_ok=True)
os.makedirs(classifier_output_dir, exist_ok=True)
model_save_file = os.path.join(classifier_output_dir, 'classifier.pt')
epoch_save_file = os.path.join(classifier_output_dir, 'classifier_epoch_data.pt')
(train_ids, train_classes, train_queries, train_docs, train_evidence_spans) = convert_for_training(train, documents, class_interner)
(val_ids, val_classes, val_queries, val_docs, val_evidence_spans) = convert_for_training(val, documents, class_interner)
if (not bool(model_pars['classifier']['has_query'])):
train_queries = None
val_queries = None
device = next(classifier.parameters()).device
if (attention_optimizer is None):
attention_optimizer = torch.optim.Adam(classifier.parameters(), lr=model_pars['classifier']['lr'])
if (classifier_optimizer is None):
classifier_optimizer = torch.optim.Adam(classifier.parameters(), lr=model_pars['classifier']['lr'])
attention_criterion = nn.BCELoss(reduction='sum')
criterion = nn.CrossEntropyLoss(reduction='sum')
batch_size = model_pars['classifier']['batch_size']
epochs = model_pars['classifier']['epochs']
attention_epochs = model_pars['classifier']['attention_epochs']
patience = model_pars['classifier']['patience']
max_grad_norm = model_pars['classifier'].get('max_grad_norm', None)
class_labels = [k for (k, v) in sorted(class_interner.items())]
results = {'attention_train_losses': [], 'attention_val_losses': [], 'train_loss': [], 'train_f1': [], 'train_acc': [], 'val_loss': [], 'val_f1': [], 'val_acc': []}
best_attention_epoch = (- 1)
best_classifier_epoch = (- 1)
best_attention_loss = float('inf')
best_classifier_loss = float('inf')
best_model_state_dict = None
start_attention_epoch = 0
start_classifier_epoch = 0
epoch_data = {}
if os.path.exists(epoch_save_file):
logging.info(f'Restoring model from {model_save_file}')
classifier.load_state_dict(torch.load(model_save_file))
epoch_data = torch.load(epoch_save_file)
start_attention_epoch = (epoch_data.get('attention_epoch', (- 1)) + 1)
start_classifier_epoch = (epoch_data.get('classifier_epoch', (- 1)) + 1)
best_attention_loss = epoch_data.get('best_attention_loss', float('inf'))
best_classifier_loss = epoch_data.get('best_classifier_loss', float('inf'))
if bool(epoch_data.get('done_attention', 0)):
start_attention_epoch = epochs
if bool(epoch_data.get('done_classifier', 0)):
start_classifier_epoch = epochs
results = epoch_data['results']
best_attention_epoch = start_attention_epoch
best_classifier_epoch = start_classifier_epoch
best_model_state_dict = OrderedDict({k: v.cpu() for (k, v) in classifier.state_dict().items()})
logging.info(f'Restoring training from attention epoch {start_attention_epoch} / {start_classifier_epoch}')
logging.info(f'Training classifier attention from epoch {start_attention_epoch} until epoch {attention_epochs}')
for attention_epoch in range(start_attention_epoch, attention_epochs):
epoch_train_loss = 0
epoch_train_tokens = 0
epoch_val_loss = 0
epoch_val_tokens = 0
for batch_start in range(0, len(train_ids), batch_size):
classifier.train()
attention_optimizer.zero_grad()
if (train_queries is None):
queries = None
else:
queries = train_queries[batch_start:(batch_start + batch_size)]
docs = train_docs[batch_start:(batch_start + batch_size)]
train_spans = train_evidence_spans[batch_start:(batch_start + batch_size)]
(_, _, _, unnormalized_document_attention, _) = classifier(queries, None, docs, return_attentions=True)
partially_normalized_document_attention = torch.sigmoid(unnormalized_document_attention.data.squeeze())
train_spans = PaddedSequence.autopad(train_spans, batch_first=True, device=unnormalized_document_attention.data.device)
batch_loss = attention_criterion(partially_normalized_document_attention, train_spans.data.float())
epoch_train_loss += batch_loss.item()
train_size = torch.sum(train_spans.batch_sizes).item()
epoch_train_tokens += train_size
batch_loss = (batch_loss / train_size)
batch_loss.backward()
attention_optimizer.step()
results['attention_train_losses'].append((epoch_train_loss / epoch_train_tokens))
logging.info(f'Epoch {attention_epoch} attention train loss {(epoch_train_loss / epoch_train_tokens)}')
with torch.no_grad():
classifier.eval()
for batch_start in range(0, len(val_ids), batch_size):
if (val_queries is None):
queries = None
else:
queries = val_queries[batch_start:(batch_start + batch_size)]
docs = val_docs[batch_start:(batch_start + batch_size)]
val_spans = val_evidence_spans[batch_start:(batch_start + batch_size)]
(_, _, _, unnormalized_document_attention, _) = classifier(queries, None, docs, return_attentions=True)
unnormalized_document_attention = torch.sigmoid(unnormalized_document_attention.data)
val_spans = PaddedSequence.autopad(val_spans, batch_first=True, device=device)
batch_loss = attention_criterion(unnormalized_document_attention.squeeze(), val_spans.data.float())
epoch_val_loss += batch_loss.item()
epoch_val_tokens += torch.sum(val_spans.batch_sizes).item()
epoch_val_loss = (epoch_val_loss / epoch_val_tokens)
results['attention_val_losses'].append(epoch_val_loss)
logging.info(f'Epoch {attention_epoch} attention val loss {epoch_val_loss}')
if (epoch_val_loss < best_attention_loss):
best_model_state_dict = OrderedDict({k: v.cpu() for (k, v) in classifier.state_dict().items()})
best_attention_epoch = attention_epoch
best_attention_loss = epoch_val_loss
epoch_data['attention_epoch'] = attention_epoch
epoch_data['results'] = results
epoch_data['best_attention_loss'] = best_attention_loss
epoch_data['best_classifier_loss'] = float('inf')
epoch_data['done_attention'] = 0
epoch_data['done_classifier'] = 0
torch.save(classifier.state_dict(), model_save_file)
torch.save(epoch_data, epoch_save_file)
logging.info(f'Epoch {attention_epoch} new best model with val loss {epoch_val_loss}')
if ((attention_epoch - best_attention_epoch) > patience):
logging.info(f'Exiting after epoch {attention_epoch} due to no improvement')
epoch_data['done_attention'] = 1
torch.save(epoch_data, epoch_save_file)
break
logging.info(f'Training classifier from epoch {start_classifier_epoch} until epoch {epochs}')
for classifier_epoch in range(start_classifier_epoch, epochs):
epoch_train_loss = 0
epoch_val_loss = 0
train_preds = []
train_truth = []
classifier.train()
for batch_start in range(0, len(train_ids), batch_size):
classifier.train()
classifier_optimizer.zero_grad()
targets = train_classes[batch_start:(batch_start + batch_size)]
train_truth.extend(targets)
targets = torch.tensor(targets, device=device)
if (train_queries is not None):
queries = train_queries[batch_start:(batch_start + batch_size)]
else:
queries = None
docs = train_docs[batch_start:(batch_start + batch_size)]
classes = classifier(queries, None, docs, return_attentions=False)
train_preds.extend((x.item() for x in torch.argmax(classes, dim=1)))
batch_loss = criterion(classes.squeeze(), targets)
epoch_train_loss += batch_loss.item()
batch_loss /= len(docs)
batch_loss.backward()
classifier_optimizer.step()
train_accuracy = accuracy_score(train_truth, train_preds)
train_f1 = classification_report(train_truth, train_preds, output_dict=True)
results['train_loss'].append((epoch_train_loss / len(train_ids)))
results['train_acc'].append(train_accuracy)
results['train_f1'].append(train_f1)
logging.info(f'Epoch {classifier_epoch} train loss {(epoch_train_loss / len(train_ids))}, accuracy: {train_accuracy}, f1: {train_f1}')
with torch.no_grad():
classifier.eval()
val_preds = []
val_truth = []
for batch_start in range(0, len(val_ids), batch_size):
targets = val_classes[batch_start:(batch_start + batch_size)]
val_truth.extend(targets)
if (val_queries is not None):
queries = val_queries[batch_start:(batch_start + batch_size)]
else:
queries = None
docs = val_docs[batch_start:(batch_start + batch_size)]
classes = classifier(queries, None, docs, return_attentions=False)
targets = torch.tensor(targets, device=classes.device)
val_preds.extend((x.item() for x in torch.argmax(classes, dim=1)))
batch_loss = criterion(classes, targets)
if (not torch.all((batch_loss == batch_loss))):
import pdb
pdb.set_trace()
epoch_val_loss += batch_loss.item()
batch_loss /= len(docs)
epoch_val_loss /= len(val_ids)
val_accuracy = accuracy_score(val_truth, val_preds)
val_f1 = classification_report(val_truth, val_preds, output_dict=True)
results['val_loss'].append(epoch_val_loss)
results['val_acc'].append(val_accuracy)
results['val_f1'].append(val_f1)
logging.info(f'Epoch {classifier_epoch} val loss {epoch_val_loss}, accuracy: {val_accuracy}, f1: {val_f1}')
if (epoch_val_loss < best_classifier_loss):
best_model_state_dict = OrderedDict({k: v.cpu() for (k, v) in classifier.state_dict().items()})
best_classifier_epoch = classifier_epoch
best_val_loss = epoch_val_loss
epoch_data['classifier_epoch'] = classifier_epoch
epoch_data['attention_epoch'] = best_attention_epoch
epoch_data['best_attention_loss'] = best_attention_loss
epoch_data['results'] = results
epoch_data['best_classifier_loss'] = best_val_loss
epoch_data['done_classifier'] = 0
epoch_data['done_attention'] = 1
torch.save(classifier.state_dict(), model_save_file)
torch.save(epoch_data, epoch_save_file)
logging.info(f'Epoch {classifier_epoch} new best model with val loss {epoch_val_loss}')
if ((classifier_epoch - best_classifier_epoch) > patience):
logging.info(f'Exiting after epoch {classifier_epoch} due to no improvement')
epoch_data['done_classifier'] = 1
torch.save(epoch_data, epoch_save_file)
break
return (classifier, results) |
()
('-r', '--results', type=click.Path(exists=True), help='Path of results.')
('-t', '--targets', type=click.Path(exists=True), help='Path of targets.')
('--train-labels', type=click.Path(exists=True), default=None, help='Path of labels for training set.')
('-a', type=click.FLOAT, default=0.55, help='Parameter A for propensity score.')
('-b', type=click.FLOAT, default=1.5, help='Parameter B for propensity score.')
def main(results, targets, train_labels, a, b):
(res, targets) = (np.load(results, allow_pickle=True), np.load(targets, allow_pickle=True))
mlb = MultiLabelBinarizer(sparse_output=True)
targets = mlb.fit_transform(targets)
print(',3,5:', get_p_1(res, targets, mlb), get_p_3(res, targets, mlb), get_p_5(res, targets, mlb))
print(',3,5:', get_n_1(res, targets, mlb), get_n_3(res, targets, mlb), get_n_5(res, targets, mlb))
if (train_labels is not None):
train_labels = np.load(train_labels, allow_pickle=True)
inv_w = get_inv_propensity(mlb.transform(train_labels), a, b)
print(',3,5:', get_psp_1(res, targets, inv_w, mlb), get_psp_3(res, targets, inv_w, mlb), get_psp_5(res, targets, inv_w, mlb))
print(',3,5:', get_psndcg_1(res, targets, inv_w, mlb), get_psndcg_3(res, targets, inv_w, mlb), get_psndcg_5(res, targets, inv_w, mlb)) |
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--fvcore', action='store_true', default=False)
parser.add_argument('--shape', type=int, nargs='+', default=[1024, 1024], help='input image size')
args = parser.parse_args()
return args |
def script_preset_(model: torch.nn.Module):
script_submodules_(model, [nn.Dropout, Attention, GlobalAttention, EvoformerBlock], attempt_trace=False, batch_dims=None) |
def build_model(config):
module_name = config.pop('name')
support_dict = ['DBNet', 'CRNN']
assert (module_name in support_dict)
module_class = eval(module_name)(**config)
return module_class |
def _init_dist_pytorch(backend, **kwargs):
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device((rank % num_gpus))
dist.init_process_group(backend=backend, **kwargs)
print(f'init distributed in rank {torch.distributed.get_rank()}') |
def test_he_uniform():
from lasagne.init import HeUniform
sample = HeUniform().sample((300, 200))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1) |
def shufflenet():
device = torch.device('cpu')
cfg_file = 'tests/configs/shufflenet/shufflenet_v1_3g1x.yaml'
cfg.merge_from_file(cfg_file)
model = build_recognizer(cfg, device)
print(model)
cfg_file = 'tests/configs/shufflenet/shufflenet_v2_torchvision.yaml'
cfg.merge_from_file(cfg_file)
model = build_recognizer(cfg, device)
print(model)
cfg_file = 'tests/configs/shufflenet/shufflenet_v2_x2_0.yaml'
cfg.merge_from_file(cfg_file)
model = build_recognizer(cfg, device)
print(model) |
class INSnipClient(SnipClient):
def init_optimizer(self):
self.optimizer = SGD(self.model.parameters(), lr=INIT_LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
self.optimizer_scheduler = lr_scheduler.StepLR(self.optimizer, step_size=STEP_SIZE, gamma=(0.5 ** (STEP_SIZE / LR_HALF_LIFE)))
self.optimizer_wrapper = OptimizerWrapper(self.model, self.optimizer, self.optimizer_scheduler)
def init_train_loader(self, tl):
self.train_loader = tl |
def build_compressed_embedding_pkl(name):
embeddings = []
weights_dir = os.path.join(experiment_path, str(experiment_id), 'weights')
with open(os.path.join(weights_dir, name), 'r') as f:
lines = f.readlines()
for line in lines:
tokens = line.strip().split()
v = [float(x) for x in tokens[1:]]
embeddings.append(v)
LM = np.array(embeddings)
pickle.dump(LM, open(os.path.join(weights_dir, 'CLM.pkl'), 'wb'))
print('LM size {} dumped'.format(LM.shape)) |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, stride=8):
self.inplanes = 128
super().__init__()
self.conv1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False), norm_layer(64), nn.ReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False), norm_layer(64), nn.ReLU(inplace=True), nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False))
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
if (stride == 16):
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=2, grids=[1, 2, 4])
elif (stride == 8):
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, grids=[1, 2, 4])
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, _BatchNorm):
m.weight.data.fill_(1)
if (m.bias is not None):
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, grids=None):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), norm_layer((planes * block.expansion)))
layers = []
if (grids is None):
grids = ([1] * blocks)
if ((dilation == 1) or (dilation == 2)):
layers.append(block(self.inplanes, planes, stride, dilation=1, downsample=downsample, previous_dilation=dilation))
elif (dilation == 4):
layers.append(block(self.inplanes, planes, stride, dilation=2, downsample=downsample, previous_dilation=dilation))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(dilation))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=(dilation * grids[i]), previous_dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def eebls_gpu_custom(t, y, dy, freqs, q_values, phi_values, ignore_negative_delta_sols=False, freq_batch_size=None, nstreams=5, max_memory=None, functions=None, **kwargs):
functions = (functions if (functions is not None) else compile_bls(**kwargs))
block_size = kwargs.get('block_size', _default_block_size)
ndata = len(t)
if (max_memory is None):
(free, total) = cuda.mem_get_info()
max_memory = int((0.9 * free))
if (freq_batch_size is None):
real_type_size = 4
mem0 = ((ndata * 3) * real_type_size)
nq = len(q_values)
nphi = len(phi_values)
mem0 += (nq + nphi)
mem0 += ((len(freqs) * 5) * real_type_size)
mem_per_f = ((((4 * nstreams) * nq) * nphi) * real_type_size)
freq_batch_size = int((float((max_memory - mem0)) / mem_per_f))
if (freq_batch_size == 0):
raise Exception('Not enough memory (freq_batch_size = 0)')
nbtot = ((len(q_values) * len(phi_values)) * freq_batch_size)
grid_size = int(np.ceil((float(nbtot) / block_size)))
w = np.power(dy, (- 2))
w /= sum(w)
ybar = np.dot(w, y)
YY = np.dot(w, np.power((np.array(y) - ybar), 2))
yw = ((np.array(y) - ybar) * np.array(w))
t_g = gpuarray.to_gpu(np.array(t).astype(np.float32))
yw_g = gpuarray.to_gpu(yw.astype(np.float32))
w_g = gpuarray.to_gpu(np.array(w).astype(np.float32))
freqs_g = gpuarray.to_gpu(np.array(freqs).astype(np.float32))
(yw_g_bins, w_g_bins, bls_tmp_gs, bls_tmp_sol_gs, streams) = ([], [], [], [], [])
for i in range(nstreams):
streams.append(cuda.Stream())
yw_g_bins.append(gpuarray.zeros(nbtot, dtype=np.float32))
w_g_bins.append(gpuarray.zeros(nbtot, dtype=np.float32))
bls_tmp_gs.append(gpuarray.zeros(nbtot, dtype=np.float32))
bls_tmp_sol_gs.append(gpuarray.zeros(nbtot, dtype=np.uint32))
bls_g = gpuarray.zeros(len(freqs), dtype=np.float32)
bls_sol_g = gpuarray.zeros(len(freqs), dtype=np.uint32)
bls_best_phi = gpuarray.zeros(len(freqs), dtype=np.float32)
bls_best_q = gpuarray.zeros(len(freqs), dtype=np.float32)
q_values_g = gpuarray.to_gpu(np.asarray(q_values).astype(np.float32))
phi_values_g = gpuarray.to_gpu(np.asarray(phi_values).astype(np.float32))
block = (block_size, 1, 1)
grid = (grid_size, 1)
nbatches = int(np.ceil((float(len(freqs)) / freq_batch_size)))
bls = np.zeros(len(freqs))
bin_func = functions['bin_and_phase_fold_custom']
bls_func = functions['binned_bls_bst']
max_func = functions['reduction_max']
store_func = functions['store_best_sols_custom']
for batch in range(nbatches):
imin = (freq_batch_size * batch)
imax = min([len(freqs), (freq_batch_size * (batch + 1))])
nf = (imax - imin)
j = (batch % nstreams)
yw_g_bin = yw_g_bins[j]
w_g_bin = w_g_bins[j]
bls_tmp_g = bls_tmp_gs[j]
bls_tmp_sol_g = bls_tmp_sol_gs[j]
stream = streams[j]
yw_g_bin.fill(np.float32(0), stream=stream)
w_g_bin.fill(np.float32(0), stream=stream)
bls_tmp_g.fill(np.float32(0), stream=stream)
bls_tmp_sol_g.fill(np.int32(0), stream=stream)
bin_grid = (int(np.ceil((float((len(t) * nf)) / block_size))), 1)
args = (bin_grid, block, stream)
args += (t_g.ptr, yw_g.ptr, w_g.ptr)
args += (yw_g_bin.ptr, w_g_bin.ptr, freqs_g.ptr)
args += (q_values_g.ptr, phi_values_g.ptr)
args += (np.uint32(len(q_values)), np.uint32(len(phi_values)))
args += (np.uint32(len(t)), np.uint32(nf))
args += (np.uint32((freq_batch_size * batch)),)
bin_func.prepared_async_call(*args)
nb = (len(q_values) * len(phi_values))
bls_grid = (int(np.ceil((float((nf * nb)) / block_size))), 1)
args = (bls_grid, block, stream)
args += (yw_g_bin.ptr, w_g_bin.ptr)
args += (bls_tmp_g.ptr, np.uint32((nf * nb)))
args += (np.uint32(ignore_negative_delta_sols),)
bls_func.prepared_async_call(*args)
args = (max_func, bls_tmp_g, bls_tmp_sol_g)
args += (nf, nb, stream, bls_g, bls_sol_g)
args += ((batch * freq_batch_size), block_size)
_reduction_max(*args)
store_grid = (int(np.ceil((float(nf) / block_size))), 1)
args = (store_grid, block, stream)
args += (bls_sol_g.ptr, bls_best_phi.ptr, bls_best_q.ptr)
args += (q_values_g.ptr, phi_values_g.ptr)
args += (np.uint32(len(q_values)), np.uint32(len(phi_values)))
args += (np.uint32(nf), np.uint32((batch * freq_batch_size)))
store_func.prepared_async_call(*args)
best_q = bls_best_q.get()
best_phi = bls_best_phi.get()
qphi_sols = list(zip(best_q, best_phi))
return ((bls_g.get() / YY), qphi_sols) |
class __FakeLocalTFRunner():
def __init__(*args, **kwargs):
raise ImportError('LocalTFRunner requires TensorFlow. To use it, please install TensorFlow.') |
_materialize('core')
class GELU(ElementWiseUnaryOp):
in_dtypes = [(i,) for i in DTYPE_GEN_FLOATS]
out_dtypes = [(i,) for i in DTYPE_GEN_FLOATS] |
def averagees_average_info(model):
stop_epoch_sum = 0.0
stop_acc_sum = 0.0
for suffix in [0, 1, 2, 3, 4, 75, 76, 77, 78, 79]:
(stop_epoch, stop_acc) = analysis.get_averagees_stopping_point(model=model, file_suffix=suffix)
stop_epoch_sum += stop_epoch
stop_acc_sum += stop_acc
stop_epoch_avg = float((stop_epoch_sum / 10.0))
stop_acc_sum = float((stop_acc_sum / 10.0))
return (stop_epoch_avg, stop_acc_sum) |
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Baseline_Exp1():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = False
state['patience'] = 20
return state |
def ReadFileGS(x_axis, tthread, batchInterval, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, txn_length, isCyclic, complexity):
(w, h) = (2, len(x_axis))
y = [[] for _ in range(w)]
for batchInterval in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathGS('OP_NS_A', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, txn_length, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[0].append(float(throughput))
for batchInterval in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathGS('OG_NS_A', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, txn_length, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[1].append(float(throughput))
print(y)
return y |
class SequentialPostProcessor(object):
operations: Sequence[Callable]
def __post_init__(self):
special_tokens = []
for operation in self.operations:
if hasattr(operation, 'special_tokens'):
special_tokens.extend(operation.special_tokens)
self.special_tokens = special_tokens
def __call__(self, df: Union[(pd.DataFrame, dict)]) -> Union[(pd.DataFrame, dict)]:
for operation in self.operations:
df = operation(df)
return df |
class Experiment():
def __init__(self, store_or_uri: Union[(str, Store)], name: str=None, description: str=None, _id: int=None):
if (name is None):
name = f'#{random.randint(0, 9999)}'
if (description is None):
description = f'Experiment: {name}'
if isinstance(store_or_uri, str):
self._store = Store(store_or_uri)
else:
self._store = store_or_uri
self._name = name
self._description = description
self._id = _id
self._trials = []
def store(self) -> Store:
return self._store
def id(self) -> int:
return self._id
def name(self) -> str:
return self._name
def description(self) -> str:
return self._description
def trials(self) -> List:
return self._trials
def run(self, trial_run_fn: FunctionType, trial_gen_fn: FunctionType, timeout: int=None, n_replicates: int=1, executor: Executor=None) -> Executor:
if (self._id is None):
(self._id, _) = self._store.get_or_create_experiment(self._name, self._description)
trial_params = []
for task in self._store.iter_tasks():
generated_trials = trial_gen_fn(task)
for generated_trial in generated_trials:
if isinstance(generated_trial, tuple):
method = generated_trial[0]
meta = generated_trial[1]
else:
method = generated_trial
meta = {}
if isinstance(method, str):
method = Method.from_name(method)
elif isinstance(method, Method):
pass
else:
raise TypeError(f'Cannot handle method type: {type(method)}')
(method_id, _) = self._store.get_or_create_method(method.name, method.description, method.version, method.params, method.env)
method = Method(method_id, method.name, method.description, method.version, method.params, method.env)
for replicate_num in range(n_replicates):
trial_param = {'experiment_id': self.id, 'task_id': task.id, 'method_id': method.id, 'replicate_num': replicate_num, 'meta': meta}
trial_params.append(trial_param)
self._trials.append(Trial(None, self, task, method, replicate_num, meta, []))
trial_ids = self._store.create_trials(trial_params)
for (_id, trial) in zip(trial_ids, self._trials):
trial._id = _id
if (executor is None):
executor = LocalMachine(self._store)
executor.submit(trial_run_fn, self._trials, timeout=timeout)
return executor |
def test_call_deps():
run_cell('def f(): return 0, 1, 2, 3')
run_cell('a, b, c, d = f()')
for sym in ('a', 'b', 'c', 'd'):
run_cell(f'assert deps({sym}) == [lift(f)]')
run_cell(f'assert users({sym}) == []')
run_cell('g = lambda: (0, 1, 2, 3)')
run_cell('w, x, y, z = g()')
for sym in ('w', 'x', 'y', 'z'):
run_cell(f'assert deps({sym}) == [lift(g)]')
run_cell(f'assert users({sym}) == []') |
class ResnetTester(nn.Module):
def __init__(self, model):
super(ResnetTester, self).__init__()
self.layers = model
self.classifier = nn.Linear(((2048 * 7) * 7), 200)
def forward(self, input):
features_in = self.layers(input)
features_in = features_in.view(features_in.shape[0], (- 1))
return self.classifier(features_in) |
def model2state_dict(file_path):
model = torch.load(file_path)
if (model['model'] is not None):
model_state_dict = model['model'].state_dict()
torch.save(model_state_dict, file_path.replace('.pth', 'state_dict.pth'))
else:
print(type(model))
print(model)
print('skip') |
def generate_df_pop_by_ags(df):
log.info('aggregate (sum) population by AGS')
df_pop = df.drop(columns=list((set(df.columns) - set(['ags', 'population_total']))))
df_pop = df_pop.rename(columns={'population_total': 'population'})
df_pop_by_ags = df_pop.groupby('ags').sum()
df_pop_by_ags.index = df_pop_by_ags.index.astype(int)
df_pop_by_ags.sort_index(inplace=True)
dropthis = df_pop_by_ags[(df_pop_by_ags.population == 0)]
log.info('delete these rows: %s', dropthis)
df_pop_by_ags = df_pop_by_ags[(df_pop_by_ags.population != 0)]
log.info('total population checksum: %s', df_pop_by_ags['population'].sum())
return df_pop_by_ags |
class ApplySameTransformInputKeyOnList(ApplySameTransformToKeyOnList):
def __init__(self, transform: Module, dim: int=1):
super().__init__('input', transform=transform, dim=dim)
def __repr__(self):
return f'{self.__class__.__name__}(transform={self._transform}, dim={self._dim})' |
def get_version():
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
_glfw.glfwGetVersion(major, minor, rev)
return (major_value.value, minor_value.value, rev_value.value) |
class Generator(object):
def __init__(self):
self.z_dim = 100
self.x_dim = 784
self.name = 'mnist/mlp/g_net'
def __call__(self, z):
with tf.variable_scope(self.name) as vs:
fc = z
fc = tcl.fully_connected(fc, 512, weights_initializer=tf.random_normal_initializer(stddev=0.02), weights_regularizer=tc.layers.l2_regularizer(2.5e-05), activation_fn=tcl.batch_norm)
fc = leaky_relu(fc)
fc = tcl.fully_connected(fc, 512, weights_initializer=tf.random_normal_initializer(stddev=0.02), weights_regularizer=tc.layers.l2_regularizer(2.5e-05), activation_fn=tcl.batch_norm)
fc = leaky_relu(fc)
fc = tcl.fully_connected(fc, 512, weights_initializer=tf.random_normal_initializer(stddev=0.02), weights_regularizer=tc.layers.l2_regularizer(2.5e-05), activation_fn=tcl.batch_norm)
fc = leaky_relu(fc)
fc = tc.layers.fully_connected(fc, 784, weights_initializer=tf.random_normal_initializer(stddev=0.02), weights_regularizer=tc.layers.l2_regularizer(2.5e-05), activation_fn=tf.sigmoid)
return fc
def vars(self):
return [var for var in tf.all_variables() if (self.name in var.name)] |
def sentence_tokenizer(sentences):
tokenized_sents = nltk.word_tokenize(sentences)
return tokenized_sents |
.parametrize('dataset_class,model_class,create_submission_f,apply_model', [(T4c22Dataset, DummyArangeNN_eta, create_submission_eta_plain_torch, apply_model_plain), (T4c22GeometricDataset, DummyArangeNN_eta, create_submission_eta_torch_geometric, apply_model_geometric)])
def test_create_submission_eta_city_plain_torch(caplog, dataset_class, model_class, create_submission_f, apply_model):
cities = ['london', 'melbourne', 'madrid']
date = '1970-01-01'
num_test_slots = 37
expected_score_files = ['gogogo.score']
loss_before_pandas = {}
with tempfile.TemporaryDirectory() as basedir:
basedir = Path(basedir)
submission_names = ['gogogo']
config = _create_dummy_competition_setup_with_arange_submissions(basedir, cities, date, num_test_slots, submission_names, seed=666, dataset_class=dataset_class, model_class=model_class, create_submission_f=create_submission_f, apply_model=apply_model)
torch.manual_seed(666)
for (city, (test_dataset, _)) in config.items():
(_, y_hat) = _inference_torch(ds=test_dataset, apply_model=apply_model, model=DummyArangeNN_eta(num_supersegments=len(test_dataset.torch_road_graph_mapping.supersegments)))
df_y = load_eta_labels(basedir, city=city, split='test', df_filter=None)
y = torch.cat([test_dataset.torch_road_graph_mapping._df_eta_to_torch(df_y[(df_y['test_idx'] == test_idx)]) for test_idx in range(num_test_slots)])
loss_f = torch.nn.L1Loss()
loss_before_pandas[city] = loss_f(y_hat, y)
golden_zip = (((basedir / 'withheld') / 'golden') / 'eta_golden.zip')
prediction_zip = ((basedir / 'submission') / 'gogogo.zip')
caplog.set_level(logging.INFO, logger='participants-prediction')
caplog.set_level(logging.INFO, logger='full-prediction')
for city in cities:
EXPECTED_NUM_ITEMS[T4c22Competitions.EXTENDED.value][city] = (num_test_slots * NUM_SUPERSEGMENTS)
main(['-g', str(golden_zip), '-i', str(prediction_zip), '-c', 'eta'])
print(list(basedir.rglob('**/*')))
log_file = (prediction_zip.parent / 'gogogo-full.log')
with open(log_file, 'r') as f:
content = f.read()
logging.info(content)
print(content)
log_file = (prediction_zip.parent / 'gogogo.log')
with open(log_file, 'r') as f:
content = f.read()
logging.info(content)
assert ('contact us for details' not in content), content
with (prediction_zip.parent / 'gogogo.score.json').open() as f:
content = json.load(f)
print(content)
for city in cities:
assert np.isclose(loss_before_pandas[city], content[city]['all']['loss'])
for file_name in expected_score_files:
score_file = (prediction_zip.parent / file_name)
assert os.path.exists(score_file), str(score_file)
with open(score_file, 'r') as f:
content = f.read()
logging.info(content)
assert np.isclose(float(content), np.mean([loss_before_pandas[city] for city in cities])), content |
class MetricGraphPrinter(AbstractBaseLogger):
def __init__(self, writer, key='train_loss', graph_name='Train Loss', group_name='metric'):
self.key = key
self.graph_label = graph_name
self.group_name = group_name
self.writer = writer
def log(self, *args, **kwargs):
if (self.key in kwargs):
self.writer.add_scalar(((self.group_name + '/') + self.graph_label), kwargs[self.key], kwargs['accum_iter'])
else:
self.writer.add_scalar(((self.group_name + '/') + self.graph_label), 0, kwargs['accum_iter'])
def complete(self, *args, **kwargs):
self.writer.close() |
def iou_boxes_polygons(boxes, polygons, w=0, h=0, xywh=True, ioubp=False):
if ((w * h) == 0):
p_boxes = [polygon_to_box(p) for p in polygons]
region = boxes_region((p_boxes + list(boxes)))
w = int((region[2] + 1))
h = int((region[3] + 1))
p_mask = polygons_to_mask(polygons, w, h)
b_mask = boxes_to_mask(boxes, w, h, xywh)
i = np.sum((np.logical_and(p_mask, b_mask) > 0), axis=None)
u = np.sum(((p_mask + b_mask) > 0), axis=None)
if (not ioubp):
if (i == 0):
return 0
else:
return ((i * 1.0) / u)
elif (i == 0):
return (0, 0, 0)
else:
b = np.sum((b_mask > 0), axis=None)
p = np.sum((p_mask > 0), axis=None)
return (((i * 1.0) / u), ((i * 1.0) / b), ((i * 1.0) / p)) |
def _find_rocsolver_config(rocm_install_path):
def rocsolver_version_numbers(path):
possible_version_files = ['include/rocsolver/rocsolver-version.h', 'rocsolver/include/rocsolver-version.h']
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if (not version_file):
raise ConfigError('rocsolver version file not found in {}'.format(possible_version_files))
major = _get_header_version(version_file, 'ROCSOLVER_VERSION_MAJOR')
minor = _get_header_version(version_file, 'ROCSOLVER_VERSION_MINOR')
patch = _get_header_version(version_file, 'ROCSOLVER_VERSION_PATCH')
return (major, minor, patch)
(major, minor, patch) = rocsolver_version_numbers(rocm_install_path)
rocsolver_config = {'rocsolver_version_number': _get_composite_version_number(major, minor, patch)}
return rocsolver_config |
class P1203Pv(object):
_COEFFS = {'u1': 72.61, 'u2': 0.32, 't1': 30.98, 't2': 1.29, 't3': 64.65, 'q1': 4.66, 'q2': (- 0.07), 'q3': 4.06, 'mode0': {'a1': 11.9983519, 'a2': (- 2.), 'a3': 41., 'a4': 0.}, 'mode1': {'a1': 5., 'a2': (- 1.), 'a3': 41.3585049, 'a4': 0, 'c0': (- 0.), 'c1': 0, 'c2': (- 3.), 'c3': 20.4098663}, 'htv_1': (- 0.60293), 'htv_2': 2.12382, 'htv_3': (- 0.36936), 'htv_4': 0.03409}
def degradation_due_to_upscaling(self, coding_res, display_res):
scale_factor = (display_res / coding_res)
scale_factor = max(scale_factor, 1)
u1 = self.coeffs['u1']
u2 = self.coeffs['u2']
deg_scal_v = (u1 * np.log10(((u2 * (scale_factor - 1.0)) + 1.0)))
deg_scal_v = utils.constrain(deg_scal_v, 0.0, 100.0)
return deg_scal_v
def degradation_due_to_frame_rate_reduction(self, deg_cod_v, deg_scal_v, framerate):
t1 = self.coeffs['t1']
t2 = self.coeffs['t2']
t3 = self.coeffs['t3']
deg_frame_rate_v = 0
if (framerate < 24):
deg_frame_rate_v = ((((100 - deg_cod_v) - deg_scal_v) * (t1 - (t2 * framerate))) / (t3 + framerate))
deg_frame_rate_v = utils.constrain(deg_frame_rate_v, 0.0, 100.0)
return deg_frame_rate_v
def degradation_integration(self, mos_cod_v, deg_cod_v, deg_scal_v, deg_frame_rate_v):
deg_all = utils.constrain(((deg_cod_v + deg_scal_v) + deg_frame_rate_v), 0.0, 100.0)
qv = (100 - deg_all)
return utils.mos_from_r(qv)
_cache()
def video_model_function_mode0(self, coding_res, display_res, bitrate_kbps_segment_size, framerate):
a1 = self.coeffs['mode0']['a1']
a2 = self.coeffs['mode0']['a2']
a3 = self.coeffs['mode0']['a3']
a4 = self.coeffs['mode0']['a4']
q1 = self.coeffs['q1']
q2 = self.coeffs['q2']
q3 = self.coeffs['q3']
quant = (a1 + (a2 * np.log(((a3 + np.log(bitrate_kbps_segment_size)) + np.log((((bitrate_kbps_segment_size * bitrate_kbps_segment_size) / (coding_res * framerate)) + a4))))))
mos_cod_v = (q1 + (q2 * np.exp((q3 * quant))))
mos_cod_v = utils.constrain(mos_cod_v, 1.0, 5.0)
deg_cod_v = (100.0 - utils.r_from_mos(mos_cod_v))
deg_cod_v = utils.constrain(deg_cod_v, 0.0, 100.0)
deg_scal_v = self.degradation_due_to_upscaling(coding_res, display_res)
deg_frame_rate_v = self.degradation_due_to_frame_rate_reduction(deg_cod_v, deg_scal_v, framerate)
score = self.degradation_integration(mos_cod_v, deg_cod_v, deg_scal_v, deg_frame_rate_v)
logger.debug(json.dumps({'coding_res': round(coding_res, 2), 'display_res': round(display_res, 2), 'bitrate_kbps_segment_size': round(bitrate_kbps_segment_size, 2), 'framerate': round(framerate, 2), 'mos_cod_v': round(mos_cod_v, 2), 'deg_cod_v': round(deg_cod_v, 2), 'deg_scal_v': round(deg_scal_v, 2), 'deg_frame_rate_v': round(deg_frame_rate_v, 2), 'score': round(score, 2)}, indent=True))
return score
def video_model_function_mode1(self, coding_res, display_res, bitrate_kbps_segment_size, framerate, frames, iframe_ratio=None):
a1 = self.coeffs['mode1']['a1']
a2 = self.coeffs['mode1']['a2']
a3 = self.coeffs['mode1']['a3']
a4 = self.coeffs['mode1']['a4']
q1 = self.coeffs['q1']
q2 = self.coeffs['q2']
q3 = self.coeffs['q3']
quant = (a1 + (a2 * np.log(((a3 + np.log(bitrate_kbps_segment_size)) + np.log((((bitrate_kbps_segment_size * bitrate_kbps_segment_size) / (coding_res * framerate)) + a4))))))
mos_cod_v = (q1 + (q2 * np.exp((q3 * quant))))
mos_cod_v = utils.constrain(mos_cod_v, 1.0, 5.0)
c0 = self.coeffs['mode1']['c0']
c1 = self.coeffs['mode1']['c1']
c2 = self.coeffs['mode1']['c2']
c3 = self.coeffs['mode1']['c3']
if (not iframe_ratio):
i_sizes = []
noni_sizes = []
for frame in frames:
frame_size = utils.calculate_compensated_size(frame['type'], frame['size'], frame['dts'])
if (frame['type'] == 'I'):
i_sizes.append(int(frame_size))
else:
noni_sizes.append(int(frame_size))
if (i_sizes and noni_sizes):
iframe_ratio = (np.mean(i_sizes) / np.mean(noni_sizes))
else:
iframe_ratio = 0
complexity = utils.sigmoid(c0, c1, c2, c3, iframe_ratio)
mos_cod_v += complexity
deg_cod_v = (100.0 - utils.r_from_mos(mos_cod_v))
deg_cod_v = utils.constrain(deg_cod_v, 0.0, 100.0)
deg_scal_v = self.degradation_due_to_upscaling(coding_res, display_res)
deg_frame_rate_v = self.degradation_due_to_frame_rate_reduction(deg_cod_v, deg_scal_v, framerate)
score = self.degradation_integration(mos_cod_v, deg_cod_v, deg_scal_v, deg_frame_rate_v)
logger.debug(json.dumps({'coding_res': round(coding_res, 2), 'display_res': round(display_res, 2), 'bitrate_kbps_segment_size': round(bitrate_kbps_segment_size, 2), 'framerate': round(framerate, 2), 'mos_cod_v': round(mos_cod_v, 2), 'deg_cod_v': round(deg_cod_v, 2), 'iframe_ratio': round(iframe_ratio, 2), 'complexity': round(complexity, 2), 'deg_scal_v': round(deg_scal_v, 2), 'deg_frame_rate_v': round(deg_frame_rate_v, 2), 'score': round(score, 2)}, indent=True))
return score
def video_model_function_mode2(self, coding_res, display_res, framerate, frames, quant=None, avg_qp_per_noni_frame=[]):
if (not quant):
if (not avg_qp_per_noni_frame):
types = []
qp_values = []
for frame in frames:
qp_values.append(frame['qpValues'])
frame_type = frame['type']
if (frame_type not in ['I', 'P', 'B', 'Non-I']):
raise P1203StandaloneError((('frame type ' + str(frame_type)) + ' not valid; must be I/P/B or I/Non-I'))
types.append(frame_type)
qppb = []
for (index, frame_type) in enumerate(types):
if (frame_type in ['P', 'B', 'Non-I']):
qppb.extend(qp_values[index])
avg_qp = np.mean(qppb)
else:
avg_qp = np.mean(avg_qp_per_noni_frame)
quant = (avg_qp / 51.0)
q1 = self.coeffs['q1']
q2 = self.coeffs['q2']
q3 = self.coeffs['q3']
mos_cod_v = (q1 + (q2 * math.exp((q3 * quant))))
mos_cod_v = max(min(mos_cod_v, 5), 1)
deg_cod_v = (100 - utils.r_from_mos(mos_cod_v))
deg_cod_v = max(min(deg_cod_v, 100), 0)
deg_scal_v = self.degradation_due_to_upscaling(coding_res, display_res)
deg_frame_rate_v = self.degradation_due_to_frame_rate_reduction(deg_cod_v, deg_scal_v, framerate)
score = self.degradation_integration(mos_cod_v, deg_cod_v, deg_scal_v, deg_frame_rate_v)
logger.debug(json.dumps({'coding_res': round(coding_res, 2), 'display_res': round(display_res, 2), 'framerate': round(framerate, 2), 'quant': round(quant, 2), 'mos_cod_v': round(mos_cod_v, 2), 'deg_cod_v': round(deg_cod_v, 2), 'deg_scal_v': round(deg_scal_v, 2), 'deg_frame_rate_v': round(deg_frame_rate_v, 2), 'score': round(score, 2)}, indent=True))
return score
def video_model_function_mode3(self, coding_res, display_res, framerate, frames, quant=None, avg_qp_per_noni_frame=[]):
if (not quant):
if (not avg_qp_per_noni_frame):
types = []
qp_values = []
for frame in frames:
qp_values.append(frame['qpValues'])
frame_type = frame['type']
if (frame_type not in ['I', 'P', 'B', 'Non-I']):
raise P1203StandaloneError((('frame type ' + str(frame_type)) + ' not valid; must be I/P/B or I/Non-I'))
types.append(frame_type)
qppb = []
for (index, frame_type) in enumerate(types):
if (frame_type in ['P', 'B', 'Non-I']):
qppb.extend(qp_values[index])
elif ((frame_type == 'I') and (len(qppb) > 0)):
if (len(qppb) > 1):
qppb[(- 1)] = qppb[(- 2)]
else:
qppb = []
avg_qp = np.mean(qppb)
else:
avg_qp = np.mean(avg_qp_per_noni_frame)
quant = (avg_qp / 51.0)
q1 = self.coeffs['q1']
q2 = self.coeffs['q2']
q3 = self.coeffs['q3']
mos_cod_v = (q1 + (q2 * math.exp((q3 * quant))))
mos_cod_v = max(min(mos_cod_v, 5), 1)
deg_cod_v = (100 - utils.r_from_mos(mos_cod_v))
deg_cod_v = max(min(deg_cod_v, 100), 0)
deg_scal_v = self.degradation_due_to_upscaling(coding_res, display_res)
deg_frame_rate_v = self.degradation_due_to_frame_rate_reduction(deg_cod_v, deg_scal_v, framerate)
score = self.degradation_integration(mos_cod_v, deg_cod_v, deg_scal_v, deg_frame_rate_v)
logger.debug(json.dumps({'coding_res': round(coding_res, 2), 'display_res': round(display_res, 2), 'framerate': round(framerate, 2), 'quant': round(quant, 2), 'mos_cod_v': round(mos_cod_v, 2), 'deg_cod_v': round(deg_cod_v, 2), 'deg_scal_v': round(deg_scal_v, 2), 'deg_frame_rate_v': round(deg_frame_rate_v, 2), 'score': round(score, 2)}, indent=True))
return score
def handheld_adjustment(self, score):
htv_1 = self.coeffs['htv_1']
htv_2 = self.coeffs['htv_2']
htv_3 = self.coeffs['htv_3']
htv_4 = self.coeffs['htv_4']
return max(min((((htv_1 + (htv_2 * score)) + (htv_3 * (score ** 2))) + (htv_4 * (score ** 3))), 5), 1)
def model_callback(self, output_sample_timestamp, frames):
logger.debug(('Output score at timestamp ' + str(output_sample_timestamp)))
output_sample_index = [i for (i, f) in enumerate(frames) if (f['dts'] < output_sample_timestamp)][(- 1)]
if (self.mode == 0):
if any((('representation' in f) for f in frames)):
frames = utils.get_chunk(frames, output_sample_index, type='video')
first_frame = frames[0]
bitrate = np.mean([f['bitrate'] for f in frames])
display_res = (first_frame.get('displaySize') or self.display_res)
score = self.video_model_function_mode0(utils.resolution_to_number(first_frame['resolution']), utils.resolution_to_number(display_res), bitrate, first_frame['fps'])
else:
score = self.video_model_function_mode0(utils.resolution_to_number(frames[output_sample_index]['resolution']), utils.resolution_to_number((frames[output_sample_index].get('displaySize') or self.display_res)), frames[output_sample_index]['bitrate'], frames[output_sample_index]['fps'])
else:
frames = utils.get_chunk(frames, output_sample_index, type='video')
first_frame = frames[0]
display_res = (first_frame.get('displaySize') or self.display_res)
if (self.mode == 1):
compensated_sizes = [utils.calculate_compensated_size(f['type'], f['size'], f['dts']) for f in frames]
duration = np.sum([f['duration'] for f in frames])
bitrate = (((np.sum(compensated_sizes) * 8) / duration) / 1000)
score = self.video_model_function_mode1(utils.resolution_to_number(first_frame['resolution']), utils.resolution_to_number(display_res), bitrate, first_frame['fps'], frames)
elif (self.mode == 2):
score = self.video_model_function_mode2(utils.resolution_to_number(first_frame['resolution']), utils.resolution_to_number(display_res), first_frame['fps'], frames)
elif (self.mode == 3):
score = self.video_model_function_mode3(utils.resolution_to_number(first_frame['resolution']), utils.resolution_to_number(display_res), first_frame['fps'], frames)
else:
raise P1203StandaloneError('Unsupported mode: {}'.format(self.mode))
if (self.device in ['mobile', 'handheld']):
score = self.handheld_adjustment(score)
self.o22.append(score)
def check_codec(self):
codecs = list(set([s['codec'] for s in self.segments]))
for c in codecs:
if (c != 'h264'):
raise P1203StandaloneError('Unsupported codec: {}'.format(c))
def _calculate_with_measurementwindow(self):
measurementwindow = MeasurementWindow()
measurementwindow.set_score_callback(self.model_callback)
self.mode = 0
for segment in self.segments:
if ('frames' not in segment.keys()):
self.mode = 0
break
if ('frames' in segment):
for frame in segment['frames']:
if (('frameType' not in frame.keys()) or ('frameSize' not in frame.keys())):
raise P1203StandaloneError("Frame definition must have at least 'frameType' and 'frameSize'")
if ('qpValues' in frame.keys()):
self.mode = 3
else:
self.mode = 1
break
logger.debug(('Evaluating stream in mode ' + str(self.mode)))
self.check_codec()
if (self.mode == 0):
dts = 0
for segment in self.segments:
segment_fps = min(segment['fps'], 120)
if (segment_fps != segment['fps']):
logger.warning('FPS of segment is higher than 120, capping to prevent incorrect results')
num_frames = int((segment['duration'] * segment_fps))
frame_duration = (1.0 / segment_fps)
for i in range(int(num_frames)):
frame = {'duration': frame_duration, 'dts': dts, 'bitrate': segment['bitrate'], 'codec': segment['codec'], 'fps': segment_fps, 'resolution': segment['resolution']}
if ('displaySize' in segment.keys()):
frame['displaySize'] = segment['displaySize']
if ('representation' in segment.keys()):
frame.update({'representation': segment['representation']})
measurementwindow.add_frame(frame)
dts += frame_duration
measurementwindow.stream_finished()
else:
dts = 0
for (segment_index, segment) in enumerate(self.segments):
num_frames_assumed = int((segment['duration'] * segment['fps']))
num_frames = len(segment['frames'])
if (num_frames != num_frames_assumed):
logger.warning(((('Segment specifies ' + str(num_frames)) + ' frames but based on calculations, there should be ') + str(num_frames_assumed)))
frame_duration = (1.0 / segment['fps'])
for i in range(int(num_frames)):
frame = {'duration': frame_duration, 'dts': dts, 'bitrate': segment['bitrate'], 'codec': segment['codec'], 'fps': segment['fps'], 'resolution': segment['resolution'], 'size': segment['frames'][i]['frameSize'], 'type': segment['frames'][i]['frameType']}
if ('displaySize' in segment.keys()):
frame['displaySize'] = segment['displaySize']
if ('representation' in segment.keys()):
frame.update({'representation': segment['representation']})
if (self.mode == 3):
qp_values = segment['frames'][i]['qpValues']
if (not qp_values):
raise P1203StandaloneError('No QP values for frame {i} of segment {segment_index}'.format(**locals()))
frame['qpValues'] = qp_values
measurementwindow.add_frame(frame)
dts += frame_duration
measurementwindow.stream_finished()
def _calculate_fast_mode(self):
if ((self.mode is not None) and (self.mode != 0)):
raise P1203StandaloneError(f'Fast mode only works with mode 0, but it is set to {self.mode}')
self.mode = 0
for segment in self.segments:
score = self.video_model_function_mode0(utils.resolution_to_number(segment['resolution']), utils.resolution_to_number(segment.get('displaySize', self.display_res)), segment['bitrate'], segment['fps'])
self.o22.extend(([score] * math.floor(segment['duration'])))
def calculate(self, fast_mode=False):
utils.check_segment_continuity(self.segments, 'video')
if fast_mode:
logger.warning('Using fast mode of the model, results may not be accurate to the second')
self._calculate_fast_mode()
else:
self._calculate_with_measurementwindow()
return {'video': {'streamId': self.stream_id, 'mode': self.mode, 'O22': self.o22}}
def __init__(self, segments, display_res='1920x1080', device='pc', stream_id=None, coeffs={}):
self.segments = segments
self.display_res = display_res
self.device = device
self.stream_id = stream_id
self.o22 = []
self.mode = None
self.coeffs = {**self._COEFFS, **coeffs} |
def available_classes(cls: type[Registry]) -> list[str]:
return list(cls.available_classes().keys()) |
def postprocess_args(args):
ROOTDIR = args.root_dir
if (args.dataset == 'touchdown'):
ft_file_map = {'resnet18': 'resnet18_view_fts.hdf5', 'vit_clip': 'vit_clip_view_fts.hdf5'}
args.img_ft_file = os.path.join(ROOTDIR, 'Touchdown', 'features', ft_file_map[args.features])
args.anno_dir = os.path.join(ROOTDIR, 'Touchdown', 'annotations', 'touchdown', 'data')
args.graph_dir = os.path.join(ROOTDIR, 'Touchdown', 'annotations', 'touchdown', 'graph')
else:
ft_file_map = {'imagenet': 'pth_resnet152_imagenet.hdf5', 'imagenet_caffe': 'caffe_resnet152_imagenet.hdf5', 'places365': 'caffe_resnet152_places365.hdf5', 'vitbase': 'pth_vit_base_patch16_224_imagenet.hdf5', 'vitbase_r2rfte2e': 'pth_vit_base_patch16_224_imagenet_r2r.e2e.ft.22k.hdf5', 'vitbase_r2rfte2e.aug': 'pth_vit_base_patch16_224_imagenet_r2r.e2e.dataaug.20k.hdf5', 'vitbase_in21k': 'pth_vit_base_patch16_224_in21k_imagenet.hdf5', 'vitbase_clip': 'pth_vit_base_patch32_224_clip.hdf5', 'vitbase_r2r.e2e.singlestage': 'pth_vit_base_patch16_224_imagenet_r2r.e2e.pt.25k.hdf5', 'vitbase_r2r.e2e.noobs': 'pth_vit_base_patch16_224_imagenet_r2r.e2e.noobs.20k.hdf5', 'vitbase_r2r.e2e.nosprel': 'pth_vit_base_patch16_224_imagenet_r2r.e2e.nosprel.19k.hdf5'}
args.img_ft_file = os.path.join(ROOTDIR, 'R2R', 'features', ft_file_map[args.features])
args.connectivity_dir = os.path.join(ROOTDIR, 'R2R', 'connectivity')
args.scan_data_dir = os.path.join(ROOTDIR, 'Matterport3D', 'v1_unzip_scans')
if (args.dataset == 'rxr'):
args.anno_dir = os.path.join(ROOTDIR, 'RxR', 'annotations')
else:
args.anno_dir = os.path.join(ROOTDIR, 'R2R', 'annotations')
args.ckpt_dir = os.path.join(args.output_dir, 'ckpts')
args.log_dir = os.path.join(args.output_dir, 'logs')
args.pred_dir = os.path.join(args.output_dir, 'preds')
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(args.ckpt_dir, exist_ok=True)
os.makedirs(args.log_dir, exist_ok=True)
os.makedirs(args.pred_dir, exist_ok=True)
if (args.vlnbert == 'cmt'):
del args.prefix_causal_attn
elif (args.vlnbert == 'mmt'):
del args.no_lang_ca
del args.act_pred_token
elif (args.vlnbert == 'causal.cmt'):
del args.prefix_causal_attn
del args.no_lang_ca
del args.act_pred_token
return args |
def get_current_user_path(path_in):
if (path_in == ''):
return ''
from os.path import expanduser
path = path_in.split('/')
new_path = ((expanduser('~') + '/') + '/'.join(path[3:]))
return str(new_path) |
def nano(num_processes):
def decorator(func):
return _Nano_Customized_Training(func, num_processes)
return decorator |
class NezhaForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def dbscan_with_masked_image(image, eps=0.5, min_samples=3):
if (len(image.shape) != 2):
raise ValueError('Input image must be grayscale!')
masked_points = np.where((image > 0))
if (len(masked_points) > 0):
X = np.column_stack(tuple((masked_points[1], masked_points[0])))
if (X.shape[0] > 0):
class_dict = dbscan_with_points(X, eps, min_samples)
return class_dict
else:
return dict()
else:
return dict() |
def main(opt):
model = torch.load(opt['model.model_path'])
model.eval()
model_opt_file = os.path.join(os.path.dirname(opt['model.model_path']), 'opt.json')
with open(model_opt_file, 'r') as f:
model_opt = json.load(f)
model_opt['model.x_dim'] = map(int, model_opt['model.x_dim'].split(','))
model_opt['log.fields'] = model_opt['log.fields'].split(',')
data_opt = {('data.' + k): v for (k, v) in filter_opt(model_opt, 'data').items()}
episode_fields = {'data.test_way': 'data.way', 'data.test_shot': 'data.shot', 'data.test_query': 'data.query', 'data.test_episodes': 'data.train_episodes'}
for (k, v) in episode_fields.items():
if (opt[k] != 0):
data_opt[k] = opt[k]
elif (model_opt[k] != 0):
data_opt[k] = model_opt[k]
else:
data_opt[k] = model_opt[v]
print('Evaluating {:d}-way, {:d}-shot with {:d} query examples/class over {:d} episodes'.format(data_opt['data.test_way'], data_opt['data.test_shot'], data_opt['data.test_query'], data_opt['data.test_episodes']))
torch.manual_seed(1234)
if data_opt['data.cuda']:
torch.cuda.manual_seed(1234)
data = data_utils.load(data_opt, ['test'])
if data_opt['data.cuda']:
model.cuda()
meters = {field: tnt.meter.AverageValueMeter() for field in model_opt['log.fields']}
model_utils.evaluate(model, data['test'], meters, desc='test')
for (field, meter) in meters.items():
(mean, std) = meter.value()
print('test {:s}: {:0.6f} +/- {:0.6f}'.format(field, mean, ((1.96 * std) / math.sqrt(data_opt['data.test_episodes'])))) |
def test_run_with_ignore_embedded_text():
example = EXAMPLES[2]
document = load_document(example.path, use_embedded_text=False)
pipe = pipeline('document-question-answering', model=CHECKPOINTS['LayoutLMv1'])
for qa in example.qa_pairs:
resp = pipe(question=qa.question, **document.context, top_k=1)
assert (nested_simplify(resp, decimals=4) == qa.answers['LayoutLMv1__use_embedded_text=False']) |
def test_env_instantiation():
env = ArgumentEnv('arg')
assert (env.arg == 'arg')
assert (env.calls == 1) |
((PT_VERSION.release < Version('2.1.0').release), 'Please use PyTroch 2.1.0 or higher version for executor backend')
class TestLLMQuantization(unittest.TestCase):
def test_qwen(self):
tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True)
sq_config = SmoothQuantConfig(calib_iters=3, calib_len=5, tokenizer=tokenizer, excluded_precisions=['bf16'])
model = AutoModelForCausalLM.from_pretrained('Qwen/Qwen-7B-Chat', quantization_config=sq_config, trust_remote_code=True, use_llm_runtime=False)
self.assertTrue(isinstance(model.model, torch.jit.ScriptModule))
def test_chatglm2(self):
tokenizer = AutoTokenizer.from_pretrained('THUDM/chatglm2-6b', trust_remote_code=True)
sq_config = SmoothQuantConfig(calib_iters=3, calib_len=5, tokenizer=tokenizer)
model = AutoModelForCausalLM.from_pretrained('THUDM/chatglm2-6b', quantization_config=sq_config, trust_remote_code=True, use_llm_runtime=False)
self.assertTrue(isinstance(model.model, torch.jit.ScriptModule))
def test_chatglm3(self):
tokenizer = AutoTokenizer.from_pretrained('THUDM/chatglm3-6b', trust_remote_code=True)
sq_config = SmoothQuantConfig(calib_iters=3, calib_len=5, tokenizer=tokenizer)
model = AutoModelForCausalLM.from_pretrained('THUDM/chatglm3-6b', quantization_config=sq_config, trust_remote_code=True, use_llm_runtime=False)
self.assertTrue(isinstance(model.model, torch.jit.ScriptModule)) |
def linear_rampup(current, rampup_length=16):
if (rampup_length == 0):
return 1.0
else:
current = np.clip((current / rampup_length), 0.0, 1.0)
return float(current) |
def create_meter_display(group_dict, ignore_start_with='_'):
def prune_dict(dictionary: dict, ignore='_'):
for (k, v) in dictionary.copy().items():
if isinstance(v, dict):
prune_dict(v, ignore)
elif k.startswith(ignore):
del dictionary[k]
def prune_nan(dictionary: dict, father_dictionary: dict=None):
for (k, v) in dictionary.copy().items():
if isinstance(v, dict):
prune_nan(v, dictionary)
elif math.isnan(v):
del dictionary[k]
if (father_dictionary is not None):
if (len(father_dictionary) == 1):
del father_dictionary
prune_dict(group_dict, ignore_start_with)
display = str(item2str(group_dict))
return display |
class TestTensorParallelOptimization(unittest.TestCase):
def tearDown(self):
destroy_parallel_group()
return super().tearDown()
def test_tensor_parallel_optimization(self):
_run_tensor_parallel_optimization() |
class Trainer():
def __init__(self) -> None:
self.task = ''
self.note = ''
self.ckpt = ''
self.dataset = 'ImageNet-LT'
self.nb_classes = 1000
self.epochs = 800
self.batch = 256
self.accum_iter = 4
self.device = '0,1,2,3'
self.model = 'mae_vit_base_patch16'
self.resume = ''
self.input_size = 224
self.drop_path = 0.1
self.clip_grad = None
self.weight_decay = 0.05
self.adamW2 = 0.95
self.lr = None
self.blr = 0.00015
self.layer_decay = 0.75
self.min_lr = 1e-06
self.warmup_epochs = 40
self.color_jitter = None
self.aa = 'rand-m9-mstd0.5-inc1'
self.smoothing = 0.1
self.reprob = 0.25
self.remode = 'pixel'
self.recount = 1
self.resplit = False
self.mixup = 0.8
self.cutmix = 1.0
self.cutmix_minmax = None
self.mixup_prob = 1.0
self.mixup_switch_prob = 0.5
self.mixup_mode = 'batch'
self.loss = 'ce'
self.bal_tau = 1.0
self.mask_ratio = 0.75
self.global_pool = True
self.attn_only = False
self.seed = 0
self.prit = 20
self.imbf = 100
self.num_workers = 16
self.master_port = 29500
def get_data_path(self):
if (self.dataset == 'ImageNet-LT'):
self.nb_classes = 1000
return IMAGENET_LT_PATH
if (self.dataset == 'iNat18'):
self.nb_classes = 8142
return INAT18_PATH
if (self.dataset == 'ImageNet-BAL'):
self.nb_classes = 1000
return IMAGENET_BAL_PATH
if (self.dataset == 'cifar10-LT'):
self.nb_classes = 10
return CIFAR10_PATH
if (self.dataset == 'cifar100-LT'):
self.nb_classes = 100
return CIFAR100_PATH
if (self.dataset == 'Place'):
self.nb_classes = 365
return PLCAE_PATH
return None
def pretrain(self):
assert (not ((self.task == '') or (self.note == ''))), 'Need basic setting ...'
os.environ['CUDA_VISIBLE_DEVICES'] = self.device
task = self.task
note = self.note
nodes = len(self.device.split(','))
data_path = self.get_data_path()
log_dir = os.path.join(WORK_PATH, f'exp/{task}/{self.dataset}/{note}')
ckpt_dir = os.path.join(WORK_PATH, f'ckpt/{task}/{self.dataset}/{note}')
exe_file = os.path.join(WORK_PATH, 'main_pretrain.py')
os.system(f'''python -m torch.distributed.launch --nproc_per_node={nodes} --master_port {self.master_port} {exe_file} --ckpt_dir '{ckpt_dir}' --log_dir '{log_dir}' --batch_size {self.batch} --input_size {self.input_size} --world_size {nodes} --accum_iter {self.accum_iter} --model {self.model} --resume '{self.resume}' --norm_pix_loss --mask_ratio {self.mask_ratio} --epochs {self.epochs} --warmup_epochs {self.warmup_epochs} --blr {self.blr} --weight_decay {self.weight_decay} --data_path '{data_path}' --dataset '{self.dataset}' --num_workers {self.num_workers}
''')
def finetune(self):
assert (not ((self.task == '') or (self.note == ''))), 'Need basic setting ...'
os.environ['CUDA_VISIBLE_DEVICES'] = self.device
nodes = len(self.device.split(','))
data_path = self.get_data_path()
log_dir = os.path.join(WORK_PATH, f'exp/{self.task}/{self.dataset}/{self.model}/{self.note}')
ckpt_dir = os.path.join(WORK_PATH, f'ckpt/{self.task}/{self.dataset}/{self.model}/{self.note}')
exe_file = os.path.join(WORK_PATH, 'main_finetune.py')
attn_only = ('--attn_only' if self.attn_only else '')
cls_type = ('--global_pool' if self.global_pool else '--cls_token')
os.system(f'''python -m torch.distributed.launch --nproc_per_node={nodes} --master_port {self.master_port} {exe_file} --ckpt_dir '{ckpt_dir}' --log_dir '{log_dir}' --finetune '{self.ckpt}' --resume '{self.resume}' --batch_size {self.batch} --input_size {self.input_size} --world_size {nodes} --model {self.model} --loss {self.loss} --bal_tau {self.bal_tau} --accum_iter {self.accum_iter} --epochs {self.epochs} --warmup_epochs {self.warmup_epochs} --blr {self.blr} --layer_decay {self.layer_decay} --weight_decay {self.weight_decay} --adamW2 {self.adamW2} --drop_path {self.drop_path} --reprob {self.reprob} --mixup {self.mixup} --cutmix {self.cutmix} --data_path {data_path} --dataset {self.dataset} --imbf {self.imbf} --nb_classes {self.nb_classes} --num_workers {self.num_workers} --prit {self.prit} {attn_only} {cls_type} --dist_eval
''')
def evaluate(self):
if (self.device != 'cpu'):
os.environ['CUDA_VISIBLE_DEVICES'] = self.device
task = self.task
note = self.note
nodes = len(self.device.split(','))
data_path = self.get_data_path()
log_dir = os.path.join(WORK_PATH, f'exp/{task}/{self.dataset}/{note}')
exe_file = os.path.join(WORK_PATH, 'main_finetune.py')
attn_only = ('--attn_only' if self.attn_only else '')
cls_type = ('--global_pool' if self.global_pool else '--cls_token')
os.system(f'''python -m torch.distributed.launch --nproc_per_node={nodes} --master_port {self.master_port} {exe_file} --log_dir '{log_dir}' --resume '{self.resume}' --finetune '{self.finetune}' --batch_size {self.batch} --input_size {self.input_size} --world_size {nodes} --model {self.model} --drop_path {self.drop_path} --data_path {data_path} --dataset {self.dataset} --nb_classes {self.nb_classes} --num_workers {self.num_workers} --prit {self.prit} {attn_only} {cls_type} --eval
''') |
def create_effects_augmentation_chain(effects, ir_dir_path=None, sample_rate=44100, shuffle=False, parallel=False, parallel_weight_factor=None):
fx_list = []
apply_prob = []
for cur_fx in effects:
if isinstance(cur_fx, tuple):
apply_prob.append(cur_fx[1])
cur_fx = cur_fx[0]
else:
apply_prob.append(1)
if (isinstance(cur_fx, AugmentationChain) or isinstance(cur_fx, Processor)):
fx_list.append(cur_fx)
elif (cur_fx.lower() == 'gain'):
fx_list.append(Gain())
elif ('eq' in cur_fx.lower()):
fx_list.append(Equaliser(n_channels=2, sample_rate=sample_rate))
elif ('comp' in cur_fx.lower()):
fx_list.append(Compressor(sample_rate=sample_rate))
elif ('expand' in cur_fx.lower()):
fx_list.append(Expander(sample_rate=sample_rate))
elif ('pan' in cur_fx.lower()):
fx_list.append(Panner())
elif ('image' in cur_fx.lower()):
fx_list.append(MidSideImager())
elif ('algorithmic' in cur_fx.lower()):
fx_list.append(AlgorithmicReverb(sample_rate=sample_rate))
elif ('reverb' in cur_fx.lower()):
if (ir_dir_path == None):
fx_list.append(AlgorithmicReverb(sample_rate=sample_rate))
else:
IR_paths = glob(f'{ir_dir_path}*/RT60_avg/[!0-]*')
IR_list = []
IR_dict = {}
for IR_path in IR_paths:
cur_rt = IR_path.split('/')[(- 1)]
if (cur_rt not in IR_dict):
IR_dict[cur_rt] = []
IR_dict[cur_rt].extend(create_dataset(path=IR_path, accepted_sampling_rates=[sample_rate], sources=['impulse_response'], mapped_sources={}, load_to_memory=True, debug=False)[0])
long_ir_list = []
for cur_rt in IR_dict:
cur_rt_len = int(cur_rt.split('-')[0])
if (cur_rt_len < 3000):
IR_list.append(IR_dict[cur_rt])
else:
long_ir_list.extend(IR_dict[cur_rt])
IR_list.append(long_ir_list)
fx_list.append(ConvolutionalReverb(IR_list, sample_rate))
else:
raise ValueError(f'make sure the target effects are in the Augment FX chain : received fx called {cur_fx}')
aug_chain_in = []
for (cur_i, cur_fx) in enumerate(fx_list):
normalize = (False if (isinstance(cur_fx, AugmentationChain) or (cur_fx.name == 'Gain')) else True)
aug_chain_in.append((cur_fx, apply_prob[cur_i], normalize))
return AugmentationChain(fxs=aug_chain_in, shuffle=shuffle, parallel=parallel, parallel_weight_factor=parallel_weight_factor) |
class RecurrentTransformerEncoderLayer(Module):
def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation='relu', event_dispatcher=''):
super(RecurrentTransformerEncoderLayer, self).__init__()
d_ff = (d_ff or (4 * d_model))
self.attention = attention
self.linear1 = Linear(d_model, d_ff)
self.linear2 = Linear(d_ff, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout = Dropout(dropout)
self.activation = (F.relu if (activation == 'relu') else F.gelu)
self.event_dispatcher = EventDispatcher.get(event_dispatcher)
def forward(self, x, state=None, memory=None):
state = check_state(state, memory)
(x2, state) = self.attention(x, x, x, state)
x = (x + self.dropout(x2))
y = x = self.norm1(x)
y = self.dropout(self.activation(self.linear1(y)))
y = self.dropout(self.linear2(y))
return (self.norm2((x + y)), state) |
def execute(code, stack, pos, storage, mmemory, data, trace, calldepth, debug, read_from_blockchain):
op = code[pos]['o']
halt = False
executed = True
step = code[pos]['id']
if (op not in allops):
print(('Unknown operation %s at pos %x' % (op, pos)))
return (pos, True)
if (allops[op][1] > len(stack)):
if debug:
print(('Not enough entries in the stack to execute the operation %8s at step %x: required %d, provided %d' % (op, code[pos]['id'], allops[op][1], len(stack))))
return (pos, True)
start_stack_size = len(stack)
final_stack_size = ((len(stack) - allops[op][1]) + allops[op][2])
args = []
if ((op.find('SWAP') < 0) and (op.find('DUP') < 0) and (op not in ['JUMPI'])):
for i in range(allops[op][1]):
args.append(stack.pop())
if (op in ['ISZERO', 'NOT']):
stack.append(unary(args[0], step, op))
elif (op in ['ADD', 'MUL', 'SUB', 'DIV', 'SDIV', 'MOD', 'SMOD', 'EXP', 'AND', 'OR', 'XOR', 'LT', 'GT', 'SLT', 'SGT', 'EQ']):
stack.append(binary(args[0], args[1], step, op))
elif (op in ['ADDMOD', 'MULMOD']):
stack.append(ternary(args[0], args[1], args[2], step, op))
elif (op == 'SIGNEXTEND'):
if ((not is_fixed(args[0])) or (not is_fixed(args[1]))):
stack.append({'type': 'undefined', 'step': step})
else:
o = get_value(args[1])
t = (256 - (8 * (get_value(args[0]) + 1)))
tbit = ((o >> t) & 1)
n = 0
for i in range(256):
n ^= ((tbit if (i <= t) else ((o >> i) & 1)) << i)
stack.append({'type': 'undefined', 'step': step, 'z3': BitVecVal(n, 256)})
elif (op == 'SHA3'):
addr = simplify(args[0]['z3'])
offset = simplify(args[1]['z3'])
exact_address = (addr.as_long() if is_bv_value(addr) else (- 1))
exact_offset = (offset.as_long() if is_bv_value(offset) else (- 1))
res = {'type': 'undefined', 'step': step}
if ((exact_address >= 0) and (exact_offset >= 0)):
if ((exact_offset % 32) == 0):
val = ''
all_good = True
for i in range((exact_offset / 32)):
if (((exact_address + (i * 32)) not in mmemory) or (not is_fixed(mmemory[(exact_address + (i * 32))]))):
all_good = False
break
val += ('%064x' % get_value(mmemory[(exact_address + (i * 32))]))
if all_good:
k = keccak_256()
k.update(val.encode('utf-8'))
digest = k.hexdigest()
res = {'type': 'constant', 'step': step, 'z3': BitVecVal(int(digest, 16), 256)}
if (MyGlobals.symbolic_sha and is_undefined(res)):
res = {'type': 'constant', 'step': step, 'z3': BitVec(((('sha-' + str(step)) + '-') + str(calldepth)), 256)}
stack.append(res)
elif (op.find('PUSH') >= 0):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(int(code[pos]['input'], 16), 256)})
elif (op.find('DUP') >= 0):
stack.append(copy.deepcopy(stack[(- int(op[3:]))]))
elif (op.find('SWAP') >= 0):
tmp1 = stack[(- 1)]
tmp2 = stack[((- int(op[4:])) - 1)]
stack[(- 1)] = tmp2
stack[((- int(op[4:])) - 1)] = tmp1
elif (op in MyGlobals.symbolic_vars):
stack.append({'type': 'constant', 'step': step, 'z3': BitVec(((op + '-') + str(calldepth)), 256)})
elif (op == 'NUMBER'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(int(get_params('block_number', ''), 16), 256)})
elif (op == 'GASLIMIT'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(int(get_params('gas_limit', ''), 16), 256)})
elif (op == 'TIMESTAMP'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(int(get_params('time_stamp', ''), 16), 256)})
elif (op == 'CALLVALUE'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(int(get_params('call_value', ''), 16), 256)})
elif (op == 'ADDRESS'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(int(get_params('contract_address', ''), 16), 256)})
elif (op == 'ORIGIN'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(int(get_params('contract_address', ''), 16), 256)})
elif (op == 'GASPRICE'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(int(get_params('gas_price', ''), 16), 256)})
elif (op == 'COINBASE'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(0, 256)})
elif (op == 'DIFFICULTY'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(0, 256)})
elif (op == 'CALLER'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(int(get_params('my_address', ''), 16), 256)})
elif (op == 'GAS'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(int(get_params('gas', ''), 16), 256)})
elif (op == 'MSIZE'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(len(mmemory), 256)})
elif (op == 'BLOCKHASH'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(291, 256)})
elif (op == 'BALANCE'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(int(get_params('contract_balance', ''), 10), 256)})
elif (op == 'POP'):
pass
elif (op.find('LOG') >= 0):
pass
elif (op == 'CODECOPY'):
pass
elif (op == 'JUMPDEST'):
if (not is_good_jump(code, pos, debug)):
return (pos, True)
elif (op in ['STOP', 'RETURN', 'REVERT', 'INVALID', 'SUICIDE']):
halt = True
elif (op in ['CALLDATALOAD']):
addr = args[0]
if is_fixed(addr):
addr = get_value(addr)
if (((('data-' + str(calldepth)) + '-') + str(addr)) not in data):
data[((('data-' + str(calldepth)) + '-') + str(addr))] = BitVec((((('input' + str(calldepth)) + '[') + str(addr)) + ']'), 256)
stack.append({'type': 'constant', 'step': step, 'z3': data[((('data-' + str(calldepth)) + '-') + str(addr))]})
elif is_undefined(addr):
if debug:
print(('\x1b[95m[-] In CALLDATALOAD the input address cannot be determined at step %x: \x1b[0m' % code[pos]['id']))
print(addr)
return (pos, True)
else:
stack.append(args[0])
return (pos, False)
elif (op in ['CALLDATASIZE']):
return (pos, False)
elif (op == 'CALL'):
if (is_fixed(args[5]) and is_fixed(args[6])):
addr = get_value(args[5])
value = get_value(args[6])
if (value < 10000):
for i in range((value / 32)):
mmemory[(addr + (32 * i))] = {'type': 'undefined', 'step': step}
stack.append({'type': 'constant', 'step': step, 'z3': (BitVec(('call_at_step_' + str(step)), 256) & 1)})
elif (op == 'CALLDATACOPY'):
memaddr = args[0]
datapos = args[1]
length = args[2]
if ((not is_fixed(memaddr)) or (not is_fixed(datapos)) or (not is_fixed(length))):
if debug:
print('\x1b[95m[-] In CALLDATACOPY the memory address or datapos or length cannot be determined \x1b[0m')
print(memaddr)
print(datapos)
print(length)
return (pos, True)
memaddr = get_value(memaddr)
datapos = get_value(datapos)
length = get_value(length)
if ((length % 32) != 0):
if debug:
print(('\x1b[95m[-] In CALLDATACOPY the length of array (%d) is not multiple of 32 \x1b[0m' % length))
return (pos, True)
for i in range((length / 32)):
data[(datapos + (32 * i))] = BitVec((((('input' + str(calldepth)) + '[') + str((datapos + (32 * i)))) + ']'), 256)
store_in_memory(mmemory, (memaddr + (32 * i)), {'type': 'constant', 'step': step, 'z3': data[(datapos + (32 * i))]})
elif (op == 'CALLCODE'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(0, 256)})
elif (op == 'DELEGATECALL'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(0, 256)})
elif (op == 'EXTCODESIZE'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(0, 256)})
elif (op == 'CREATE'):
stack.append({'type': 'constant', 'step': step, 'z3': BitVecVal(0, 256)})
elif (op == 'MLOAD'):
addr = args[0]
if is_undefined(addr):
if debug:
print(('\x1b[95m[-] The MLOAD address on %x cannot be determined\x1b[0m' % code[pos]['id']))
return (pos, True)
addr = simplify(addr['z3'])
if is_bv_value(addr):
exact_address = addr.as_long()
if (exact_address in mmemory):
res = copy.deepcopy(mmemory[exact_address])
else:
res = {'type': 'constant', 'step': step, 'z3': BitVecVal(0, 256)}
stack.append(res)
else:
if debug:
print(('\x1b[95m[-] The MLOAD address on %x cannot be determined\x1b[0m' % code[pos]['id']))
return (pos, True)
elif (op == 'MSTORE'):
addr = args[0]
if (is_undefined(addr) or (not is_bv_value(simplify(addr['z3'])))):
if debug:
print(('\x1b[95m[-] The MSTORE the write address on %x cannot be determined\x1b[0m' % code[pos]['id']))
return (pos, True)
t = copy.deepcopy(args[1])
addr = get_value(addr)
store_in_memory(mmemory, addr, t)
elif (op in ['MSTORE8']):
addr = args[0]
value = args[1]
if (not is_fixed(addr)):
if debug:
print(('\x1b[95m[-] The MSTORE8 the write address on %x cannot be determined\x1b[0m' % code[pos]['id']))
return (pos, True)
if (not is_fixed(value)):
if debug:
print(('\x1b[95m[-] The MSTORE8 value is undefined \x1b[0m' % code[pos]['id']))
return (pos, True)
ea = get_value(addr)
ev = (get_value(value) % 256)
if (((ea / 32) * 32) not in mmemory):
mmemory[((ea / 32) * 32)] = {'type': 'constant', 'step': step, 'z3': BitVecVal((ev << (31 - (ea % 32))), 256)}
elif is_fixed(mmemory[((ea / 32) * 32)]['z3']):
v = get_value(mmemory[((ea / 32) * 32)]['z3'])
v = ((v & ((~ BitVecVal(255, 256)) << (31 - (ea % 32)))) ^ (ev << (31 - (ea % 32))))
mmemory[((ea / 32) * 32)]['z3'] = v
elif (op == 'SLOAD'):
addr = args[0]
if is_undefined(addr):
if debug:
print(('\x1b[95m[-] The SLOAD address on %x cannot be determined\x1b[0m' % code[pos]['id']))
return (pos, True)
addr = simplify(addr['z3'])
if is_bv_value(addr):
exact_address = addr.as_long()
if (exact_address in storage):
total_values = len(storage[exact_address])
if (total_values == 0):
print(('In SLOAD the list at address %x has no elements ' % exact_address))
exit(0)
return (pos, True)
else:
res = copy.deepcopy(storage[exact_address][0])
else:
if ((MyGlobals.web3 is not None) and read_from_blockchain):
value = MyGlobals.web3.eth.getStorageAt(get_params('contract_address', ''), exact_address)
else:
value = '0'
t = {'type': 'constant', 'step': step, 'z3': BitVecVal(int(value, 16), 256)}
storage[exact_address] = [t]
res = copy.deepcopy(t)
stack.append(res)
elif MyGlobals.symbolic_load:
stack.append({'type': 'constant', 'step': step, 'z3': BitVec(((('sload-' + str(step)) + '-') + str(calldepth)), 256)})
else:
if debug:
print(('\x1b[95m[-] The SLOAD address on %x cannot be determined\x1b[0m' % code[pos]['id']))
return (pos, True)
elif (op == 'SSTORE'):
addr = args[0]
if is_undefined(addr):
if debug:
print(('\x1b[95m[-] The SSTORE address on %x cannot be determined\x1b[0m' % code[pos]['id']))
return (pos, True)
t = copy.deepcopy(args[1])
if is_bv_value(simplify(addr['z3'])):
va = get_value(addr)
storage[va] = [t]
elif MyGlobals.symbolic_load:
pass
else:
if debug:
print(('\x1b[95m[-] In SSTORE the write address cannot be determined at step %x: \x1b[0m' % code[pos]['id']))
print(addr)
return (pos, True)
elif (op == 'JUMP'):
addr = args[0]
if (not is_fixed(addr)):
if debug:
print('\x1b[95m[-] In JUMP the address cannot be determined \x1b[0m')
return (pos, True)
jump_dest = get_value(addr)
if (jump_dest <= 0):
if debug:
print(('\x1b[95m[-] The JUMP destination is not a valid address : %x\x1b[0m' % jump_dest))
return (pos, True)
new_position = find_pos(code, jump_dest)
if (new_position < 0):
if debug:
print(('\x1b[95m[-] The code has no such JUMP destination: %s at line %x\x1b[0m' % (hex(jump_dest), code[pos]['id'])))
return (pos, True)
if (not is_good_jump(code, new_position, debug)):
return (pos, True)
return (new_position, False)
elif (op == 'JUMPI'):
return (pos, False)
elif (op == 'BYTE'):
byte_no = args[0]
word = args[1]
if (is_undefined(word) or is_undefined(byte_no)):
res = {'type': 'undefined', 'step': step}
else:
res = {'type': 'constant', 'step': step, 'z3': ((word['z3'] >> (8 * (31 - byte_no['z3']))) & 255)}
stack.append(res)
else:
executed = False
if (executed and (final_stack_size != len(stack))):
print(('Incorrect final stack size after executing %s at step %x' % (op, step)))
print(len(stack))
print(final_stack_size)
exit(2)
return ((pos + 1), halt) |
def put_in_middle(str1, str2):
n = len(str1)
m = len(str2)
if (n <= m):
return str2
else:
start = ((n - m) // 2)
return ((str1[:start] + str2) + str1[(start + m):]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.