code stringlengths 101 5.91M |
|---|
def transfer_gradient_from_player_to_shared(player, shared_model, gpu_id):
for (param, shared_param) in zip(player.model.parameters(), shared_model.parameters()):
if shared_param.requires_grad:
if (param.grad is None):
shared_param._grad = torch.zeros(shared_param.shape)
elif (gpu_id < 0):
shared_param._grad = param.grad
else:
shared_param._grad = param.grad.cpu() |
def parse_args(args):
parser = argparse.ArgumentParser(description='hsp', formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--layout', type=str, required=True, help='layout name')
parser.add_argument('--k', type=int, default=18, help='number of selected policies')
parser.add_argument('--eval_result_dir', type=str, default='hsp/biased_eval')
parser.add_argument('--policy_pool_path', type=str, default='../policy_pool')
args = parser.parse_known_args(args)[0]
return args |
def test_efficientnet_backbone():
with pytest.raises(AssertionError):
EfficientNet(arch='c3')
model = EfficientNet(arch='b0', out_indices=(0, 1, 2, 3, 4, 5, 6))
model.train()
imgs = torch.randn(2, 3, 32, 32)
feat = model(imgs)
assert (len(feat) == 7)
assert (feat[0].shape == torch.Size([2, 32, 16, 16]))
assert (feat[1].shape == torch.Size([2, 16, 16, 16]))
assert (feat[2].shape == torch.Size([2, 24, 8, 8]))
assert (feat[3].shape == torch.Size([2, 40, 4, 4]))
assert (feat[4].shape == torch.Size([2, 112, 2, 2]))
assert (feat[5].shape == torch.Size([2, 320, 1, 1]))
assert (feat[6].shape == torch.Size([2, 1280, 1, 1])) |
.parametrize('n_ensembles', [2])
.parametrize('batch_size', [32])
.parametrize('reduction', ['min', 'max', 'mean', 'none'])
def test_reduce_ensemble(n_ensembles: int, batch_size: int, reduction: str) -> None:
y = torch.rand(n_ensembles, batch_size, 1)
ret = _reduce_ensemble(y, reduction)
if (reduction == 'min'):
assert (ret.shape == (batch_size, 1))
assert torch.allclose(ret, y.min(dim=0).values)
elif (reduction == 'max'):
assert (ret.shape == (batch_size, 1))
assert torch.allclose(ret, y.max(dim=0).values)
elif (reduction == 'mean'):
assert (ret.shape == (batch_size, 1))
assert torch.allclose(ret, y.mean(dim=0))
elif (reduction == 'none'):
assert (ret.shape == (n_ensembles, batch_size, 1))
assert (ret == y).all() |
def test_point_f1_score_nan():
expected = pd.DataFrame({'timestamp': [2, 3]})
observed = pd.DataFrame({'timestamp': [4, 5]})
returned = point_f1_score(expected, observed)
assert np.isnan(returned) |
class Buf(object):
def __init__(self):
self.head = []
self.tail = self.head
def append_left(self, item):
self.head = [item, self.head]
def append(self, item):
last = self.tail
self.tail = []
last.append(item)
last.append(self.tail)
def extend(self, other):
last = self.tail
last.extend(other.head)
self.tail = other.tail
def __iter__(self):
crnt = self.head
while crnt:
(yield crnt[0])
crnt = crnt[1] |
class MyReLU(torch.autograd.Function):
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp_min_(0)
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
grad_input = torch.ones_like(input, dtype=input.dtype, device=input.device)
grad_input[(input < 0)] = torch.exp(input[(input < 0)])
grad_input = (grad_input * grad_output)
return grad_input |
_function_dispatch(_count_dispatcher)
def rfind(a, sub, start=0, end=None):
return _vec_string(a, integer, 'rfind', ([sub, start] + _clean_args(end))) |
def test_set_schema_path_context(monkeypatch):
monkeypatch.setattr(pyhf.schema.variables, 'schemas', pyhf.schema.variables.schemas, raising=True)
new_path = pathlib.Path('a/new/path')
with pyhf.schema(new_path):
assert (pyhf.schema.path == new_path) |
def test_allowable_amino_acid_locations_do_not_contain_amino_acids_we_cant_create(msa_sampler):
actual_allowed = map_aa_idx_to_tok_set(msa_sampler)
non_single_standard = set('XBUXZO.')
assert actual_allowed.isdisjoint(non_single_standard) |
def test_case_3():
int_0 = 2423
queue_0 = module_0.Queue(int_0)
assert (f'{type(queue_0).__module__}.{type(queue_0).__qualname__}' == 'queue_example.Queue')
assert (queue_0.max == 2423)
assert (queue_0.head == 0)
assert (queue_0.tail == 0)
assert (queue_0.size == 0)
assert (f'{type(queue_0.data).__module__}.{type(queue_0.data).__qualname__}' == 'array.array')
assert (len(queue_0.data) == 2423)
none_type_0 = queue_0.dequeue()
bool_0 = queue_0.full()
assert (bool_0 is False)
with pytest.raises(AssertionError):
module_0.Queue(bool_0) |
def mtg_jamendo_read_file(tsv_file):
tracks = {}
tags = defaultdict(dict)
artist_ids = set()
albums_ids = set()
with open(tsv_file) as fp:
reader = csv.reader(fp, delimiter='\t')
next(reader, None)
for row in reader:
track_id = get_id(row[0])
tracks[track_id] = {'artist_id': get_id(row[1]), 'album_id': get_id(row[2]), 'path': row[3], 'duration': float(row[4]), 'tags': row[5:]}
tracks[track_id].update({category: set() for category in CATEGORIES})
artist_ids.add(get_id(row[1]))
albums_ids.add(get_id(row[2]))
for tag_str in row[5:]:
(category, tag) = tag_str.split(TAG_HYPHEN)
if (tag not in tags[category]):
tags[category][tag] = set()
tags[category][tag].add(track_id)
if (category not in tracks[track_id]):
tracks[track_id][category] = set()
tracks[track_id][category].update(set(tag.split(',')))
print('Reading: {} tracks, {} albums, {} artists'.format(len(tracks), len(albums_ids), len(artist_ids)))
extra = {'track_id_length': get_length(tracks.keys()), 'artist_id_length': get_length(artist_ids), 'album_id_length': get_length(albums_ids)}
return (tracks, tags, extra) |
class TestSimulator(unittest.TestCase):
def testScheduleNow(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
Simulator.ScheduleNow(callback, 'args')
Simulator.Run()
self.assertEqual(self._args_received, 'args')
self.assertEqual(self._cb_time.GetSeconds(), 0.0)
def testSchedule(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
Simulator.Schedule(Seconds(123), callback, 'args')
Simulator.Run()
self.assertEqual(self._args_received, 'args')
self.assertEqual(self._cb_time.GetSeconds(), 123.0)
def testScheduleDestroy(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
def null():
pass
Simulator.Schedule(Seconds(123), null)
Simulator.ScheduleDestroy(callback, 'args')
Simulator.Run()
Simulator.Destroy()
self.assertEqual(self._args_received, 'args')
self.assertEqual(self._cb_time.GetSeconds(), 123.0)
def testScheduleWithContext(self):
def callback(context, args):
self._context_received = context
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
self._context_received = None
Simulator.ScheduleWithContext(54321, Seconds(123), callback, 'args')
Simulator.Run()
self.assertEqual(self._context_received, 54321)
self.assertEqual(self._args_received, 'args')
self.assertEqual(self._cb_time.GetSeconds(), 123.0)
def testTimeComparison(self):
self.assert_((Seconds(123) == Seconds(123)))
self.assert_((Seconds(123) >= Seconds(123)))
self.assert_((Seconds(123) <= Seconds(123)))
self.assert_((Seconds(124) > Seconds(123)))
self.assert_((Seconds(123) < Seconds(124)))
def testTimeNumericOperations(self):
self.assertEqual((Seconds(10) + Seconds(5)), Seconds(15))
self.assertEqual((Seconds(10) - Seconds(5)), Seconds(5))
v1 = (int64x64_t(5.0) * int64x64_t(10))
self.assertEqual(v1, int64x64_t(50))
def testConfig(self):
Config.SetDefault('ns3::OnOffApplication::PacketSize', ns.core.UintegerValue(123))
def testSocket(self):
node = ns.network.Node()
internet = ns.internet.InternetStackHelper()
internet.Install(node)
self._received_packet = None
def rx_callback(socket):
assert (self._received_packet is None)
self._received_packet = socket.Recv()
sink = ns.network.Socket.CreateSocket(node, ns.core.TypeId.LookupByName('ns3::UdpSocketFactory'))
sink.Bind(ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), 80))
sink.SetRecvCallback(rx_callback)
source = ns.network.Socket.CreateSocket(node, ns.core.TypeId.LookupByName('ns3::UdpSocketFactory'))
source.SendTo(ns.network.Packet(19), 0, ns.network.InetSocketAddress(ns.network.Ipv4Address('127.0.0.1'), 80))
Simulator.Run()
self.assert_((self._received_packet is not None))
self.assertEqual(self._received_packet.GetSize(), 19)
def testAttributes(self):
queue = ns.network.DropTailQueue()
queue.SetAttribute('MaxPackets', ns.core.UintegerValue(123456))
limit = ns.core.UintegerValue()
queue.GetAttribute('MaxPackets', limit)
self.assertEqual(limit.Get(), 123456)
mobility = ns.mobility.RandomWaypointMobilityModel()
ptr = ns.core.PointerValue()
mobility.GetAttribute('PositionAllocator', ptr)
self.assertEqual(ptr.GetObject(), None)
pos = ns.mobility.ListPositionAllocator()
mobility.SetAttribute('PositionAllocator', ns.core.PointerValue(pos))
ptr = ns.core.PointerValue()
mobility.GetAttribute('PositionAllocator', ptr)
self.assert_((ptr.GetObject() is not None))
def testIdentity(self):
csma = ns.csma.CsmaNetDevice()
channel = ns.csma.CsmaChannel()
csma.Attach(channel)
c1 = csma.GetChannel()
c2 = csma.GetChannel()
self.assert_((c1 is c2))
def testTypeId(self):
typeId1 = ns.core.TypeId.LookupByNameFailSafe('ns3::UdpSocketFactory')
self.assertEqual(typeId1.GetName(), 'ns3::UdpSocketFactory')
self.assertRaises(KeyError, ns.core.TypeId.LookupByNameFailSafe, '__InvalidTypeName__')
def testCommandLine(self):
cmd = ns.core.CommandLine()
cmd.AddValue('Test1', 'this is a test option')
cmd.AddValue('Test2', 'this is a test option')
cmd.AddValue('Test3', 'this is a test option', variable='test_xxx')
cmd.Test1 = None
cmd.Test2 = None
cmd.test_xxx = None
class Foo():
pass
foo = Foo()
foo.test_foo = None
cmd.AddValue('Test4', 'this is a test option', variable='test_foo', namespace=foo)
cmd.Parse(['python', '--Test1=value1', '--Test2=value2', '--Test3=123', '--Test4=xpto'])
self.assertEqual(cmd.Test1, 'value1')
self.assertEqual(cmd.Test2, 'value2')
self.assertEqual(cmd.test_xxx, '123')
self.assertEqual(foo.test_foo, 'xpto')
def testSubclass(self):
class MyNode(ns.network.Node):
def __init__(self):
super(MyNode, self).__init__()
node = MyNode() |
def string_builder(string):
newstring = string
if string[0].isdigit():
newstring = ('_' + string)
out = re.sub('[^a-zA-Z0-9_]', '_', newstring)
return out |
def expert_reward(state, action):
state_action = tensor(np.hstack([state, action]), dtype=dtype)
with torch.no_grad():
return (- math.log(discrim_net(state_action)[0].item())) |
_level_function()
def argmin(array, axis=None, *, keepdims=False, mask_identity=True, highlevel=True, behavior=None, attrs=None):
(yield (array,))
return _impl(array, axis, keepdims, mask_identity, highlevel, behavior, attrs) |
def one_hot(index: torch.Tensor, n_cat: int) -> torch.Tensor:
onehot = torch.zeros(index.size(0), n_cat, device=index.device)
onehot.scatter_(1, index.type(torch.long), 1)
return onehot.type(torch.float32) |
def complex_conv_op(input, real_weight, imag_weight, bias, stride, padding, dilation, conv1d):
cat_real = torch.cat([real_weight, (- imag_weight)], dim=1)
cat_imag = torch.cat([imag_weight, real_weight], dim=1)
cat_complex = torch.cat([cat_real, cat_imag], dim=0)
if conv1d:
convfunc = F.conv1d
else:
convfunc = F.conv2d
return convfunc(input, cat_complex, bias, stride, padding, dilation) |
class TestLeakyRelu(hu.HypothesisTestCase):
def _get_inputs(self, N, C, H, W, order):
input_data = (np.random.rand(N, C, H, W).astype(np.float32) - 0.5)
input_data[np.logical_and((input_data >= 0), (input_data <= 0.051))] = 0.051
input_data[np.logical_and((input_data <= 0), (input_data >= (- 0.051)))] = (- 0.051)
if (order == 'NHWC'):
input_data = utils.NCHW2NHWC(input_data)
return (input_data,)
def _get_op(self, device_option, alpha, order, inplace=False):
outputs = [('output' if (not inplace) else 'input')]
op = core.CreateOperator('LeakyRelu', ['input'], outputs, alpha=alpha, device_option=device_option)
return op
def _feed_inputs(self, input_blobs, device_option):
names = ['input', 'scale', 'bias']
for (name, blob) in zip(names, input_blobs):
self.ws.create_blob(name).feed(blob, device_option=device_option)
(gc=hu.gcs['gc'], dc=hu.gcs['dc'], N=st.integers(2, 3), C=st.integers(2, 3), H=st.integers(2, 3), W=st.integers(2, 3), alpha=st.floats(0, 1), order=st.sampled_from(['NCHW', 'NHWC']), seed=st.integers(0, 1000))
def test_leaky_relu_gradients(self, gc, dc, N, C, H, W, order, alpha, seed):
np.random.seed(seed)
op = self._get_op(device_option=gc, alpha=alpha, order=order)
input_blobs = self._get_inputs(N, C, H, W, order)
self.assertDeviceChecks(dc, op, input_blobs, [0])
self.assertGradientChecks(gc, op, input_blobs, 0, [0])
(gc=hu.gcs['gc'], dc=hu.gcs['dc'], N=st.integers(2, 10), C=st.integers(3, 10), H=st.integers(5, 10), W=st.integers(7, 10), alpha=st.floats(0, 1), seed=st.integers(0, 1000))
def test_leaky_relu_layout(self, gc, dc, N, C, H, W, alpha, seed):
outputs = {}
for order in ('NCHW', 'NHWC'):
np.random.seed(seed)
input_blobs = self._get_inputs(N, C, H, W, order)
self._feed_inputs(input_blobs, device_option=gc)
op = self._get_op(device_option=gc, alpha=alpha, order=order)
self.ws.run(op)
outputs[order] = self.ws.blobs['output'].fetch()
np.testing.assert_allclose(outputs['NCHW'], utils.NHWC2NCHW(outputs['NHWC']), atol=0.0001, rtol=0.0001)
(gc=hu.gcs['gc'], dc=hu.gcs['dc'], N=st.integers(2, 10), C=st.integers(3, 10), H=st.integers(5, 10), W=st.integers(7, 10), order=st.sampled_from(['NCHW', 'NHWC']), alpha=st.floats(0, 1), seed=st.integers(0, 1000), inplace=st.booleans())
def test_leaky_relu_reference_check(self, gc, dc, N, C, H, W, order, alpha, seed, inplace):
np.random.seed(seed)
if (order != 'NCHW'):
assume((not inplace))
inputs = self._get_inputs(N, C, H, W, order)
op = self._get_op(device_option=gc, alpha=alpha, order=order, inplace=inplace)
def ref(input_blob):
result = input_blob.copy()
result[(result < 0)] *= alpha
return (result,)
self.assertReferenceChecks(gc, op, inputs, ref)
(gc=hu.gcs['gc'], dc=hu.gcs['dc'], N=st.integers(2, 10), C=st.integers(3, 10), H=st.integers(5, 10), W=st.integers(7, 10), order=st.sampled_from(['NCHW', 'NHWC']), alpha=st.floats(0, 1), seed=st.integers(0, 1000))
def test_leaky_relu_device_check(self, gc, dc, N, C, H, W, order, alpha, seed):
np.random.seed(seed)
inputs = self._get_inputs(N, C, H, W, order)
op = self._get_op(device_option=gc, alpha=alpha, order=order)
self.assertDeviceChecks(dc, op, inputs, [0])
(N=st.integers(2, 10), C=st.integers(3, 10), H=st.integers(5, 10), W=st.integers(7, 10), order=st.sampled_from(['NCHW', 'NHWC']), alpha=st.floats(0, 1), seed=st.integers(0, 1000))
def test_leaky_relu_model_helper_helper(self, N, C, H, W, order, alpha, seed):
np.random.seed(seed)
arg_scope = {'order': order}
model = model_helper.ModelHelper(name='test_model', arg_scope=arg_scope)
model.LeakyRelu('input', 'output', alpha=alpha)
input_blob = np.random.rand(N, C, H, W).astype(np.float32)
if (order == 'NHWC'):
input_blob = utils.NCHW2NHWC(input_blob)
self.ws.create_blob('input').feed(input_blob)
self.ws.create_net(model.param_init_net).run()
self.ws.create_net(model.net).run()
output_blob = self.ws.blobs['output'].fetch()
if (order == 'NHWC'):
output_blob = utils.NHWC2NCHW(output_blob)
assert (output_blob.shape == (N, C, H, W)) |
def cuda_setup(cuda=False, gpu_idx=0):
if (cuda and torch.cuda.is_available()):
device = torch.device('cuda')
torch.cuda.set_device(gpu_idx)
else:
device = torch.device('cpu')
return device |
def average(metrics, count=1.0):
if (world_size == 1):
return metrics
tensor = torch.tensor((list(metrics) + [1]), device='cuda', dtype=torch.float32)
tensor *= count
torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM)
return (tensor[:(- 1)] / tensor[(- 1)]).cpu().numpy().tolist() |
def pose_around(theta1, theta2, c2w):
c2w = ((trans_t(theta1).cpu() rot_theta(((theta2 / 180.0) * np.pi)).cpu()) c2w)
return c2w |
class LSTMModel(torch.nn.Module):
def __init__(self, diag_vocab_size, med_vocab_size, diag_embedding_size, med_embedding_size, diag_hidden_size, med_hidden_size, hidden_size, end_index, pad_index, bidirectional=True):
super().__init__()
self.pad_index = pad_index
self.end_index = end_index
self.diag_embedding = torch.nn.Linear(diag_vocab_size, diag_embedding_size, bias=False)
self.med_embedding = torch.nn.Linear(med_vocab_size, med_embedding_size, bias=False)
self.diag_encoder = torch.nn.LSTM(diag_embedding_size, diag_hidden_size, batch_first=True, bidirectional=bidirectional)
self.med_encoder = torch.nn.LSTM(med_embedding_size, med_hidden_size, batch_first=True, bidirectional=bidirectional)
if bidirectional:
diag_hidden_size = (diag_hidden_size * 2)
med_hidden_size = (med_hidden_size * 2)
self.attention_diag_encoder = torch.nn.Sequential(torch.nn.Linear(diag_hidden_size, 1), torch.nn.Tanh())
self.attention_med_encoder = torch.nn.Sequential(torch.nn.Linear(med_hidden_size, 1), torch.nn.Tanh())
self.lstm2hidden_ipw = torch.nn.Sequential(torch.nn.Linear(((med_hidden_size + diag_hidden_size) + 2), hidden_size), torch.nn.ReLU())
self.hidden2out_ipw = torch.nn.Linear(hidden_size, 1, bias=False)
def softmax_masked(self, inputs, mask, dim=1, epsilon=1e-07):
inputs_exp = torch.exp(inputs)
inputs_exp = (inputs_exp * mask.float())
inputs_exp_sum = inputs_exp.sum(dim=dim)
inputs_attention = (inputs_exp / (inputs_exp_sum.unsqueeze(dim) + epsilon))
return inputs_attention
def diag_encode(self, inputs):
inputs_mask = (inputs.sum(dim=(- 1)) != 0).long()
inputs_emb = self.diag_embedding(inputs.float())
(outputs, (h, c)) = self.diag_encoder(inputs_emb)
att_enc = self.attention_diag_encoder(outputs).squeeze((- 1))
att_normalized = self.softmax_masked(att_enc, inputs_mask)
hidden = torch.sum((outputs * att_normalized.unsqueeze((- 1))), dim=1)
original = torch.sum((inputs.float() * att_normalized.unsqueeze((- 1))), dim=1)
return (hidden, original)
def med_encode(self, inputs):
inputs_mask = (inputs.sum(dim=(- 1)) != 0).long()
inputs_emb = self.med_embedding(inputs.float())
(outputs, (h, c)) = self.med_encoder(inputs_emb)
att_enc = self.attention_med_encoder(outputs).squeeze((- 1))
att_normalized = self.softmax_masked(att_enc, inputs_mask)
hidden = torch.sum((outputs * att_normalized.unsqueeze((- 1))), dim=1)
original = torch.sum((inputs.float() * att_normalized.unsqueeze((- 1))), dim=1)
return (hidden, original)
def forward(self, confounder):
(diag_inputs, med_inputs, sexes, ages) = confounder
(diag_hidden, diag_original) = self.diag_encode(diag_inputs)
(med_hidden, med_original) = self.med_encode(med_inputs)
original = torch.cat((diag_original, med_original, sexes.float().view(sexes.size(0), 1), ages.float().view(ages.size(0), 1)), dim=1)
hidden = torch.cat((diag_hidden, med_hidden, sexes.float().view(sexes.size(0), 1), ages.float().view(ages.size(0), 1)), dim=1)
hidden = self.lstm2hidden_ipw(hidden)
outputs_logits_ipw = self.hidden2out_ipw(hidden)
return (outputs_logits_ipw.view(outputs_logits_ipw.size(0)), original) |
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels, inter_channels=inter_channels, dimension=3, sub_sample=sub_sample, bn_layer=bn_layer) |
def _check(gt_labels, pred_labels):
if (gt_labels.ndim != 1):
raise ValueError(('gt_labels must be 1D: shape is %r' % (gt_labels.shape,)))
if (pred_labels.ndim != 1):
raise ValueError(('pred_labels must be 1D: shape is %r' % (pred_labels.shape,)))
if (gt_labels.shape != pred_labels.shape):
raise ValueError(('gt_labels and pred_labels must have same size, got %d and %d' % (gt_labels.shape[0], pred_labels.shape[0])))
return (gt_labels, pred_labels) |
def iteration(summary, phase, global_step, epoch, num_epochs, step, num_steps, values, multiple_lines=False):
logger = get_logger()
msg = ((_current_total_formatter(epoch, num_epochs) + ' ') + _current_total_formatter(step, num_steps))
for (k, v) in values.items():
if isinstance(v, AverageMeter):
msg += ('\n' if multiple_lines else ('' + '\t{}={:.3f} ({:.3f})'.format(k, v.value.item(), v.mean.item())))
if (summary is not None):
summary.add_scalar('{}/{}'.format(phase, k), v.value.item(), global_step)
else:
msg += ('\n' if multiple_lines else ('' + '\t{}={:.3f}'.format(k, v)))
if (summary is not None):
summary.add_scalar('{}/{}'.format(phase, k), v, global_step)
logger.info(msg) |
class InfiniteDataLoader():
def __init__(self, dataset, weights, batch_size, num_workers):
super().__init__()
if (weights is None):
sampler = torch.utils.data.RandomSampler(dataset, replacement=True)
else:
sampler = torch.utils.data.WeightedRandomSampler(weights, replacement=True, num_samples=batch_size)
batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=batch_size, drop_last=True)
self._infinite_iterator = iter(torch.utils.data.DataLoader(dataset, num_workers=num_workers, batch_sampler=_InfiniteSampler(batch_sampler)))
def __iter__(self):
while True:
(yield next(self._infinite_iterator))
def __len__(self):
raise ValueError |
def main():
args = get_args()
data = np.load(args.dataset_path_input)
data = to_categorical(data, 2)
if (args.model == 'cvae_style'):
data = np.argmax(data, axis=(- 1))
data = np.expand_dims(data, axis=(- 1))
data = ((2 * data) - 1)
if args.split:
(x_train, x_test) = train_test_split(data, test_size=args.test_size, random_state=args.random_state)
convert_to(x_test, 'test_val', args.dataset_path_output)
else:
x_train = data
convert_to(x_train, 'train', args.dataset_path_output) |
_grad()
def eval(loader, model, std, mean, device):
batch_rmse_loss = 0
batch_mae_loss = 0
batch_mape_loss = 0
for (idx, (inputs, targets)) in enumerate(tqdm(loader)):
model.eval()
inputs = inputs.to(device)
targets = targets.to(device)
output = model(inputs)
out_unnorm = ((output.detach().cpu().numpy() * std) + mean)
target_unnorm = ((targets.detach().cpu().numpy() * std) + mean)
mae_loss = masked_mae_np(target_unnorm, out_unnorm, 0)
rmse_loss = masked_rmse_np(target_unnorm, out_unnorm, 0)
mape_loss = masked_mape_np(target_unnorm, out_unnorm, 0)
batch_rmse_loss += rmse_loss
batch_mae_loss += mae_loss
batch_mape_loss += mape_loss
return ((batch_rmse_loss / (idx + 1)), (batch_mae_loss / (idx + 1)), (batch_mape_loss / (idx + 1))) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--ensemble', type=bool, default=False, help='ensemble flag. If True, generate a logit file which is used in the ensemble part')
parser.add_argument('--split', type=str, default='test')
parser.add_argument('--input', type=str, default='saved_models/SAN_MEVF', help='input file directory for loading a model')
parser.add_argument('--output', type=str, default='results', help='output file directory for saving VQA answer prediction file')
parser.add_argument('--epoch', type=int, default=19, help='the best epoch')
parser.add_argument('--batch_size', type=int, default=1, help='batch size')
parser.add_argument('--model', type=str, default='SAN', choices=['BAN', 'SAN'], help='the model we use')
parser.add_argument('--rnn', type=str, default='LSTM', choices=['LSTM', 'GRU'], help='the RNN we use')
parser.add_argument('--gamma', type=int, default=2, help='glimpse in Bilinear Attention Networks')
parser.add_argument('--use_counter', action='store_true', default=False, help='use counter module')
parser.add_argument('--num_stacks', default=2, type=int, help='num of stacks in Stack Attention Networks')
parser.add_argument('--gpu', type=int, default=0, help='specify index of GPU using for training, to use CPU: -1')
parser.add_argument('--op', type=str, default='c', help='concatenated 600-D word embedding')
parser.add_argument('--num_hid', type=int, default=1024, help='dim of joint semantic features')
parser.add_argument('--activation', type=str, default='relu', choices=['relu'], help='the activation to use for final classifier')
parser.add_argument('--dropout', default=0.5, type=float, metavar='dropout', help='dropout of rate of final classifier')
parser.add_argument('--use_RAD', action='store_true', default=False, help='Using TDIUC dataset to train')
parser.add_argument('--RAD_dir', type=str, help='RAD dir')
parser.add_argument('--use_SLAKE', action='store_true', default=False, help='Using TDIUC dataset to train')
parser.add_argument('--SLAKE_dir', type=str, help='RAD dir')
parser.add_argument('--eps_cnn', default=1e-05, type=float, metavar='eps_cnn', help='eps - batch norm for cnn')
parser.add_argument('--momentum_cnn', default=0.05, type=float, metavar='momentum_cnn', help='momentum - batch norm for cnn')
parser.add_argument('--feat_dim', default=64, type=int, help='visual feature dim')
parser.add_argument('--feat_dim_clip', default=576, type=int, help='visual feature dim when clip included')
parser.add_argument('--autoencoder', action='store_true', default=False, help='End to end model?')
parser.add_argument('--ae_model_path', type=str, default='pretrained_ae.pth', help='the maml_model_path we use')
parser.add_argument('--maml', action='store_true', default=False, help='End to end model?')
parser.add_argument('--maml_model_path', type=str, default='pretrained_maml.weights', help='the maml_model_path we use')
parser.add_argument('--clip', action='store_true', default=False, help='Use clip or not.')
parser.add_argument('--clip_org', action='store_true', default=False, help='Use original clip or not.')
parser.add_argument('--clip_path', type=str, default='path/to/fine-tuned/PubMedCLIP', help='the clip_model_path we use')
parser.add_argument('--clip_vision_encoder', type=str, default='ViT-B/32', help='Use transformer or resnet')
args = parser.parse_args()
return args |
class BiTrainer(Trainer):
def _save(self, output_dir: Optional[str]=None):
output_dir = (output_dir if (output_dir is not None) else self.args.output_dir)
os.makedirs(output_dir, exist_ok=True)
logger.info('Saving model checkpoint to %s', output_dir)
if (not hasattr(self.model, 'save')):
raise NotImplementedError(f'MODEL {self.model.__class__.__name__} does not support save interface')
else:
self.model.save(output_dir)
if ((self.tokenizer is not None) and self.is_world_process_zero()):
self.tokenizer.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, 'training_args.bin'))
def get_train_dataloader(self) -> DataLoader:
if (self.train_dataset is None):
raise ValueError('Trainer: training requires a train_dataset.')
train_sampler = self._get_train_sampler()
return DataLoader(self.train_dataset, batch_size=self.args.train_batch_size, sampler=train_sampler, collate_fn=self.data_collator, drop_last=True, num_workers=self.args.dataloader_num_workers)
def compute_loss(self, model, inputs, return_outputs=False):
outputs = model(**inputs)
loss = outputs.loss
return ((loss, outputs) if return_outputs else loss)
def prediction_step(self, model: nn.Module, inputs: Tuple[Dict[(str, Union[(torch.Tensor, Any)])]], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None) -> Tuple[(Optional[float], Optional[torch.Tensor], Optional[torch.Tensor])]:
inputs = self._prepare_inputs(inputs)
if (ignore_keys is None):
if hasattr(self.model, 'config'):
ignore_keys = getattr(self.model.config, 'keys_to_ignore_at_inference', [])
else:
ignore_keys = []
with torch.no_grad():
if self.args.fp16:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
loss = None
if isinstance(outputs, dict):
logits = tuple((v for (k, v) in outputs.items() if (k not in ignore_keys)))
else:
logits = outputs
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if (len(logits) == 1):
logits = logits[0]
labels = None
return (loss, logits, labels) |
class CNN(nn.Module):
n_labels: int = 1
def __call__(self, x):
x = nn.Conv(features=32, kernel_size=(3, 3))(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=64, kernel_size=(3, 3))(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], (- 1)))
x = nn.Dense(features=256)(x)
x = nn.relu(x)
x = nn.Dense(features=self.n_labels)(x)
return x |
def test_diff(variable_x, variable_y, functional_hxy):
hxy_x = sn.diff(functional_hxy, variable_x)
hxy_y = sn.diff(functional_hxy, variable_x) |
def test_desc_to_dlpack():
mydata = np.arange(6).reshape(2, 3).astype(np.float32)
ptr = ctypes.c_void_p(mydata.__array_interface__['data'][0])
tensor = array_to_torch_tensor(ptr, dace.float32[(2, 3)])
assert np.allclose(tensor, mydata)
mydata += 1
assert np.allclose(tensor, mydata) |
class ImgVisualizer(Visualizer):
def __init__(self, img_rgb, meta, **kwargs):
super(ImgVisualizer, self).__init__(img_rgb, meta, **kwargs)
def draw_text(self, text, position, *, font_size=None, color='w', horizontal_alignment='center', vertical_alignment='bottom', box_facecolor='black', alpha=0.5):
if (not font_size):
font_size = self._default_font_size
(x, y) = position
self.output.ax.text(x, y, text, size=(font_size * self.output.scale), family='monospace', bbox={'facecolor': box_facecolor, 'alpha': alpha, 'pad': 0.7, 'edgecolor': 'none'}, verticalalignment=vertical_alignment, horizontalalignment=horizontal_alignment, color=color, zorder=10)
def draw_multiple_text(self, text_ls, box_coordinate, *, top_corner=True, font_size=None, color='w', box_facecolors='black', alpha=0.5):
if (not isinstance(box_facecolors, list)):
box_facecolors = ([box_facecolors] * len(text_ls))
assert (len(box_facecolors) == len(text_ls)), 'Number of colors provided is not equal to the number of text labels.'
if (not font_size):
font_size = self._default_font_size
text_box_width = (font_size + (font_size // 2))
if top_corner:
num_text_split = self._align_y_top(box_coordinate, len(text_ls), text_box_width)
y_corner = 1
else:
num_text_split = (len(text_ls) - self._align_y_bottom(box_coordinate, len(text_ls), text_box_width))
y_corner = 3
text_color_sorted = sorted(zip(text_ls, box_facecolors), key=(lambda x: x[0]), reverse=True)
if (len(text_color_sorted) != 0):
(text_ls, box_facecolors) = zip(*text_color_sorted)
else:
(text_ls, box_facecolors) = ([], [])
(text_ls, box_facecolors) = (list(text_ls), list(box_facecolors))
self.draw_multiple_text_upward(text_ls[:num_text_split][::(- 1)], box_coordinate, y_corner=y_corner, font_size=font_size, color=color, box_facecolors=box_facecolors[:num_text_split][::(- 1)], alpha=alpha)
self.draw_multiple_text_downward(text_ls[num_text_split:], box_coordinate, y_corner=y_corner, font_size=font_size, color=color, box_facecolors=box_facecolors[num_text_split:], alpha=alpha)
def draw_multiple_text_upward(self, text_ls, box_coordinate, *, y_corner=1, font_size=None, color='w', box_facecolors='black', alpha=0.5):
if (not isinstance(box_facecolors, list)):
box_facecolors = ([box_facecolors] * len(text_ls))
assert (len(box_facecolors) == len(text_ls)), 'Number of colors provided is not equal to the number of text labels.'
assert (y_corner in [1, 3]), 'Y_corner must be either 1 or 3'
if (not font_size):
font_size = self._default_font_size
(x, horizontal_alignment) = self._align_x_coordinate(box_coordinate)
y = box_coordinate[y_corner].item()
for (i, text) in enumerate(text_ls):
self.draw_text(text, (x, y), font_size=font_size, color=color, horizontal_alignment=horizontal_alignment, vertical_alignment='bottom', box_facecolor=box_facecolors[i], alpha=alpha)
y -= (font_size + (font_size // 2))
def draw_multiple_text_downward(self, text_ls, box_coordinate, *, y_corner=1, font_size=None, color='w', box_facecolors='black', alpha=0.5):
if (not isinstance(box_facecolors, list)):
box_facecolors = ([box_facecolors] * len(text_ls))
assert (len(box_facecolors) == len(text_ls)), 'Number of colors provided is not equal to the number of text labels.'
assert (y_corner in [1, 3]), 'Y_corner must be either 1 or 3'
if (not font_size):
font_size = self._default_font_size
(x, horizontal_alignment) = self._align_x_coordinate(box_coordinate)
y = box_coordinate[y_corner].item()
for (i, text) in enumerate(text_ls):
self.draw_text(text, (x, y), font_size=font_size, color=color, horizontal_alignment=horizontal_alignment, vertical_alignment='top', box_facecolor=box_facecolors[i], alpha=alpha)
y += (font_size + (font_size // 2))
def _align_x_coordinate(self, box_coordinate):
if (box_coordinate[0] > ((self.output.width * 5) // 6)):
return (box_coordinate[2], 'right')
return (box_coordinate[0], 'left')
def _align_y_top(self, box_coordinate, num_text, textbox_width):
dist_to_top = box_coordinate[1]
num_text_top = (dist_to_top // textbox_width)
if isinstance(num_text_top, torch.Tensor):
num_text_top = int(num_text_top.item())
return min(num_text, num_text_top)
def _align_y_bottom(self, box_coordinate, num_text, textbox_width):
dist_to_bottom = (self.output.height - box_coordinate[3])
num_text_bottom = (dist_to_bottom // textbox_width)
if isinstance(num_text_bottom, torch.Tensor):
num_text_bottom = int(num_text_bottom.item())
return min(num_text, num_text_bottom) |
def read_wtq_table(PATH):
all_table = []
for csv_file in range(200, 205):
tagged_path = ((PATH + str(csv_file)) + '-tagged/')
page_path = ((PATH + str(csv_file)) + '-page/')
for i in range(1000):
try:
table = {}
skip = False
with open(((page_path + str(i)) + '.json'), encoding='utf-8') as f:
data = json.load(f)
table_title = gen_name(data['title'])
if (table_title == ''):
continue
f = open(((tagged_path + str(i)) + '.tagged'), encoding='utf-8')
reader = csv.reader(f, delimiter='\t')
columns = []
values = [[] for _ in range(3)]
for row in reader:
if (row[0] == 'row'):
continue
index = int(row[0])
if (index == (- 1)):
lemmaToken = row[5].replace('|', ' ')
columns.append(lemmaToken)
elif (index <= 2):
lemmaToken = row[5].replace('|', ' ')
values[index].append(lemmaToken)
else:
break
values = reshape(values)
table['values'] = ([['all' for _ in range(3)]] + values)
table['column_types'] = (['text'] + getType(values))
table['name'] = table_title
title_prefix = '_'.join(table_title.split(' '))
table['columns'] = [((title_prefix + ' ') + hd) for hd in ([(table_title + ' *')] + columns)]
table['columns_original'] = (['*'] + columns)
f.close()
all_table.append(table)
except:
pass
return all_table |
def perceptual_loss(id_featureA, id_featureB):
cosine_d = torch.sum((id_featureA * id_featureB), dim=(- 1))
return (torch.sum((1 - cosine_d)) / cosine_d.shape[0]) |
def solve(*args, **keywords):
show = keywords.pop('show', False)
s = Solver()
s.set(**keywords)
s.add(*args)
if show:
print(s)
r = s.check()
if (r == unsat):
print('no solution')
elif (r == unknown):
print('failed to solve')
try:
print(s.model())
except Z3Exception:
return
else:
print(s.model()) |
def dispatch_on(*dispatch_args):
assert dispatch_args, 'No dispatch args passed'
dispatch_str = ('(%s,)' % ', '.join(dispatch_args))
def check(arguments, wrong=operator.ne, msg=''):
if wrong(len(arguments), len(dispatch_args)):
raise TypeError(('Expected %d arguments, got %d%s' % (len(dispatch_args), len(arguments), msg)))
def gen_func_dec(func):
argset = set(getfullargspec(func).args)
if (not (set(dispatch_args) <= argset)):
raise NameError(('Unknown dispatch arguments %s' % dispatch_str))
typemap = {}
def vancestors(*types):
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for (t, type_, ra) in zip(types, types_, ras):
if (issubclass(t, type_) and (type_ not in t.mro())):
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
check(types)
lists = []
for (t, vas) in zip(types, vancestors(*types)):
n_vas = len(vas)
if (n_vas > 1):
raise RuntimeError(('Ambiguous dispatch for %s: %s' % (t, vas)))
elif (n_vas == 1):
(va,) = vas
mro = type('t', (t, va), {}).mro()[1:]
else:
mro = t.mro()
lists.append(mro[:(- 1)])
return lists
def register(*types):
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, (' in ' + f.__name__))
typemap[types] = f
return f
return dec
def dispatch_info(*types):
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple((a.__name__ for a in anc)))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple((type(arg) for arg in dispatch_args))
try:
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations)
for types_ in combinations:
f = typemap.get(types_)
if (f is not None):
return f(*args, **kw)
return func(*args, **kw)
return FunctionMaker.create(func, ('return _f_(%s, %%(shortsignature)s)' % dispatch_str), dict(_f_=_dispatch), register=register, default=func, typemap=typemap, vancestors=vancestors, ancestors=ancestors, dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = ('dispatch_on' + dispatch_str)
return gen_func_dec |
def worker_init_function(worker_id: int) -> None:
(global_rank, process_seed) = (int(os.environ['LOCAL_RANK']), torch.initial_seed())
base_seed = (process_seed - worker_id)
seed_seq = np.random.SeedSequence([base_seed, worker_id, global_rank])
np.random.seed(seed_seq.generate_state(4))
(torch_seed_seq, random_seed_seq) = seed_seq.spawn(2)
torch.manual_seed(torch_seed_seq.generate_state(1, dtype=np.uint64)[0])
random_seed = (random_seed_seq.generate_state(2, dtype=np.uint64).astype(list) * [(1 << 64), 1]).sum()
random.seed(random_seed) |
def parse_arguments(parser: argparse.ArgumentParser):
parser = add_base_arguments(parser)
group_data = parser.add_argument_group('dataset')
group_data.add_argument('--dataset', type=str, default='conll2003', help='dataset name')
group_data.add_argument('--doc_level', default=False, action='store_true', help='whether to load data at document level')
group_data.add_argument('--pre_truecase', default=False, action='store_true', help='whether to pre-truecase data')
group_data.add_argument('--pre_subtokenize', default=False, action='store_true', help='whether to pre-subtokenize words in data')
group_data.add_argument('--pre_merge_enchars', default=False, action='store_true', help='whether to pre-merge English characters in data')
group_data.add_argument('--corrupt_rate', type=float, default=0.0, help='boundary corrupt rate')
group_data.add_argument('--remove_nested', default=False, action='store_true', help='whether to remove nested entities in the train/dev splits')
group_data.add_argument('--eval_inex', default=False, action='store_true', help='whether to evaluate internal/external-entity NER results')
group_data.add_argument('--save_preds', default=False, action='store_true', help='whether to save predictions on the test split (typically in case without ground truth)')
group_data.add_argument('--pipeline', default=False, action='store_true', help='whether to save predictions on all splits for pipeline modeling')
group_decoder = parser.add_argument_group('decoder configurations')
group_decoder.add_argument('--ck_decoder', type=str, default='sequence_tagging', help='chunk decoding method', choices=['sequence_tagging', 'span_classification', 'boundary_selection', 'specific_span'])
group_decoder.add_argument('--fl_gamma', type=float, default=0.0, help='Focal Loss gamma')
group_decoder.add_argument('--sl_epsilon', type=float, default=0.0, help='Label smoothing loss epsilon')
group_decoder.add_argument('--scheme', type=str, default='BIOES', help='sequence tagging scheme', choices=['BIOES', 'BIO2'])
group_decoder.add_argument('--no_crf', dest='use_crf', default=True, action='store_false', help='whether to use CRF')
group_decoder.add_argument('--agg_mode', type=str, default='max_pooling', help='aggregating mode')
group_decoder.add_argument('--max_span_size', type=int, default=10, help='maximum span size')
group_decoder.add_argument('--size_emb_dim', type=int, default=25, help='span size embedding dim')
group_decoder.add_argument('--inex_mkmmd_lambda', type=float, default=0.0, help='weight of internal/external-entity MK-MMD loss')
group_decoder.add_argument('--inex_chunk_priority_training', type=str, default='confidence', help='chunk priority in the training phase')
group_decoder.add_argument('--inex_chunk_priority_testing', type=str, default='confidence', help='chunk priority in the testing phase')
group_decoder.add_argument('--no_biaffine', dest='use_biaffine', default=True, action='store_false', help='whether to use biaffine')
group_decoder.add_argument('--affine_arch', type=str, default='FFN', help='affine encoder architecture')
group_decoder.add_argument('--no_biaffine_prod', dest='use_biaffine_prod', default=True, action='store_false', help='whether to use the production term in biaffine')
group_decoder.add_argument('--affine_dim', type=int, default=150, help='affine encoder hidden dim')
group_decoder.add_argument('--affine_num_layers', type=int, default=1, help='number of affine encoder layers')
group_decoder.add_argument('--neg_sampling_rate', type=float, default=1.0, help='Negative sampling rate')
group_decoder.add_argument('--neg_sampling_power_decay', type=float, default=0.0, help='Negative sampling rate power decay parameter')
group_decoder.add_argument('--neg_sampling_surr_rate', type=float, default=0.0, help='Extra negative sampling rate surrounding positive samples')
group_decoder.add_argument('--neg_sampling_surr_size', type=int, default=5, help='Extra negative sampling rate surrounding size')
group_decoder.add_argument('--nested_sampling_rate', type=float, default=1.0, help='Sampling rate for spans nested in positive samples')
group_decoder.add_argument('--sb_epsilon', type=float, default=0.0, help='Boundary smoothing loss epsilon')
group_decoder.add_argument('--sb_size', type=int, default=1, help='Boundary smoothing window size')
group_decoder.add_argument('--sb_adj_factor', type=float, default=1.0, help='Boundary smoothing probability adjust factor')
group_decoder.add_argument('--sse_no_share_weights_ext', dest='sse_share_weights_ext', default=True, action='store_false', help='whether to share weights between span-bert and bert encoders')
group_decoder.add_argument('--sse_no_share_weights_int', dest='sse_share_weights_int', default=True, action='store_false', help='whether to share weights across span-bert encoders')
group_decoder.add_argument('--sse_no_share_interm2', dest='sse_share_interm2', default=True, action='store_false', help='whether to share interm2 between span-bert and bert encoders')
group_decoder.add_argument('--sse_init_agg_mode', type=str, default='max_pooling', help='initial aggregating mode for span-bert enocder')
group_decoder.add_argument('--sse_init_drop_rate', type=float, default=0.2, help='dropout rate before initial aggregating')
group_decoder.add_argument('--sse_num_layers', type=int, default=(- 1), help='number of span-bert encoder layers (negative values are set to `None`)')
group_decoder.add_argument('--sse_max_span_size_cov_rate', type=float, default=0.995, help='coverage rate of maximum span size')
group_decoder.add_argument('--sse_max_span_size', type=int, default=(- 1), help='maximum span size (negative values are set to `None`)')
return parse_to_args(parser) |
def initialize_gpu_from_weights_file(model, weights_file, gpu_id=0):
logger.info('Loading weights from: {}'.format(weights_file))
ws_blobs = workspace.Blobs()
src_blobs = load_object(weights_file)
if ('cfg' in src_blobs):
saved_cfg = load_cfg(src_blobs['cfg'])
configure_bbox_reg_weights(model, saved_cfg)
if ('blobs' in src_blobs):
src_blobs = src_blobs['blobs']
unscoped_param_names = OrderedDict()
for blob in model.params:
unscoped_param_names[c2_utils.UnscopeName(str(blob))] = True
with c2_utils.NamedCudaScope(gpu_id):
for unscoped_param_name in unscoped_param_names.keys():
if ((unscoped_param_name.find(']_') >= 0) and (unscoped_param_name not in src_blobs)):
src_name = unscoped_param_name[(unscoped_param_name.find(']_') + 2):]
else:
src_name = unscoped_param_name
if (src_name not in src_blobs):
logger.info('{:s} not found'.format(src_name))
continue
dst_name = core.ScopedName(unscoped_param_name)
has_momentum = ((src_name + '_momentum') in src_blobs)
has_momentum_str = (' [+ momentum]' if has_momentum else '')
logger.debug('{:s}{:} loaded from weights file into {:s}: {}'.format(src_name, has_momentum_str, dst_name, src_blobs[src_name].shape))
if (dst_name in ws_blobs):
ws_blob = workspace.FetchBlob(dst_name)
assert (ws_blob.shape == src_blobs[src_name].shape), 'Workspace blob {} with shape {} does not match weights file shape {}'.format(src_name, ws_blob.shape, src_blobs[src_name].shape)
workspace.FeedBlob(dst_name, src_blobs[src_name].astype(np.float32, copy=False))
if has_momentum:
workspace.FeedBlob((dst_name + '_momentum'), src_blobs[(src_name + '_momentum')].astype(np.float32, copy=False))
for src_name in src_blobs.keys():
if ((src_name not in unscoped_param_names) and (not src_name.endswith('_momentum')) and (src_blobs[src_name] is not None)):
with c2_utils.CpuScope():
workspace.FeedBlob('__preserve__/{:s}'.format(src_name), src_blobs[src_name])
logger.debug('{:s} preserved in workspace (unused)'.format(src_name)) |
class NFSDataset(Dataset):
def __init__(self, name, dataset_root, load_img=False):
super(NFSDataset, self).__init__(name, dataset_root)
with open(os.path.join(dataset_root, (name + '.json')), 'r') as f:
meta_data = json.load(f)
pbar = tqdm(meta_data.keys(), desc=('loading ' + name), ncols=100)
self.videos = {}
for video in pbar:
pbar.set_postfix_str(video)
self.videos[video] = NFSVideo(video, dataset_root, meta_data[video]['video_dir'], meta_data[video]['init_rect'], meta_data[video]['img_names'], meta_data[video]['gt_rect'], None)
self.attr = {}
self.attr['ALL'] = list(self.videos.keys()) |
class NodeNameFilter(BaseNodeMatcher):
def __init__(self, node_name):
self.node_name = node_name
def apply(self, input_object: Any) -> bool:
if (input_object.name == self.node_name):
return True |
def _set_input_and_output_names(graph, input_names, output_names):
def set_names(node_list, name_list, descriptor):
if (name_list is None):
return
if (len(name_list) > len(node_list)):
raise RuntimeError(('number of %s names provided (%d) exceeded number of %ss (%d)' % (descriptor, len(name_list), descriptor, len(node_list))))
for (name, node) in zip(name_list, node_list):
if (node.debugName() != name):
node.setDebugName(name)
set_names(list(graph.inputs()), input_names, 'input')
set_names(list(graph.outputs()), output_names, 'output') |
class CustomBuildExtCommand(build_ext):
def run(self):
import numpy
self.include_dirs.append(numpy.get_include())
build_ext.run(self) |
class Message():
role: MessageRole
content: str
type: (MessageType | None) = None
def raw(self) -> MessageDict:
return {'role': self.role, 'content': self.content} |
def run_openpose(video_file, output_folder, staf_folder, vis=False):
pwd = os.getcwd()
os.chdir(staf_folder)
render = (1 if vis else 0)
display = (2 if vis else 0)
cmd = ['build/examples/openpose/openpose.bin', '--model_pose', 'BODY_21A', '--tracking', '1', '--render_pose', str(render), '--video', video_file, '--write_json', output_folder, '--display', str(display)]
print('Executing', ' '.join(cmd))
subprocess.call(cmd)
os.chdir(pwd) |
class Contiguous(Module):
def updateOutput(self, input):
if (not input.is_contiguous()):
self.output.resize_as_(input).copy_(input)
else:
self.output.set_(input)
return self.output
def updateGradInput(self, input, gradOutput):
if (not gradOutput.is_contiguous()):
self.gradInput.resize_as_(gradOutput).copy_(gradOutput)
else:
self.gradInput.set_(gradOutput)
return self.gradInput |
def asmatrix(*args, **kwargs):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', '.*the matrix subclass is not the recommended way.*')
return np.asmatrix(*args, **kwargs) |
def set_beta(args, epoch):
if (args.warmup == 0):
beta = 1.0
else:
beta = ((1.0 * epoch) / args.warmup)
if (beta > 1.0):
beta = 1.0
return beta |
def get_inferable_quantizer_kwargs(node_qc: BaseNodeQuantizationConfig, quantization_target: QuantizationTarget) -> Dict[(str, Any)]:
if (quantization_target == QuantizationTarget.Weights):
if (not isinstance(node_qc, NodeWeightsQuantizationConfig)):
Logger.error(f'Non-compatible node quantization config was given for quantization target Weights.')
quantization_method = node_qc.weights_quantization_method
if (quantization_method in [QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC]):
return {qi_keras_consts.NUM_BITS: node_qc.weights_n_bits, qi_keras_consts.THRESHOLD: list(node_qc.weights_quantization_params[THRESHOLD].flatten()), qi_keras_consts.PER_CHANNEL: node_qc.weights_per_channel_threshold, qi_keras_consts.CHANNEL_AXIS: node_qc.weights_channels_axis, qi_keras_consts.INPUT_RANK: len(node_qc.weights_quantization_params[THRESHOLD].shape)}
elif (quantization_method in [QuantizationMethod.UNIFORM]):
return {qi_keras_consts.NUM_BITS: node_qc.weights_n_bits, qi_keras_consts.PER_CHANNEL: node_qc.weights_per_channel_threshold, qi_keras_consts.MIN_RANGE: list(node_qc.weights_quantization_params[RANGE_MIN].flatten()), qi_keras_consts.MAX_RANGE: list(node_qc.weights_quantization_params[RANGE_MAX].flatten()), qi_keras_consts.CHANNEL_AXIS: node_qc.weights_channels_axis, qi_keras_consts.INPUT_RANK: len(node_qc.weights_quantization_params[RANGE_MIN].shape)}
elif (quantization_method in [QuantizationMethod.LUT_SYM_QUANTIZER, QuantizationMethod.LUT_POT_QUANTIZER]):
return {qi_keras_consts.NUM_BITS: node_qc.weights_n_bits, qi_keras_consts.PER_CHANNEL: node_qc.weights_per_channel_threshold, qi_keras_consts.LUT_VALUES: list(node_qc.weights_quantization_params[LUT_VALUES].flatten()), qi_keras_consts.THRESHOLD: list(node_qc.weights_quantization_params[SCALE_PER_CHANNEL].flatten()), qi_keras_consts.CHANNEL_AXIS: node_qc.weights_channels_axis, qi_keras_consts.INPUT_RANK: len(node_qc.weights_quantization_params[SCALE_PER_CHANNEL].shape)}
else:
Logger.critical(f'Not supported quantization method for inferable quantizers.')
elif (quantization_target == QuantizationTarget.Activation):
if (not isinstance(node_qc, NodeActivationQuantizationConfig)):
Logger.error(f'Non-compatible node quantization config was given for quantization target Activation.')
quantization_method = node_qc.activation_quantization_method
if (quantization_method in [QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC]):
return {qi_keras_consts.NUM_BITS: node_qc.activation_n_bits, qi_keras_consts.THRESHOLD: [node_qc.activation_quantization_params[THRESHOLD]], qi_keras_consts.SIGNED: node_qc.activation_quantization_params[SIGNED]}
elif (quantization_method in [QuantizationMethod.UNIFORM]):
return {qi_keras_consts.NUM_BITS: node_qc.activation_n_bits, qi_keras_consts.MIN_RANGE: [node_qc.activation_quantization_params[RANGE_MIN]], qi_keras_consts.MAX_RANGE: [node_qc.activation_quantization_params[RANGE_MAX]]}
elif (quantization_method in [QuantizationMethod.LUT_POT_QUANTIZER]):
return {qi_keras_consts.NUM_BITS: node_qc.activation_n_bits, qi_keras_consts.SIGNED: node_qc.activation_quantization_params[SIGNED], qi_keras_consts.LUT_VALUES: node_qc.activation_quantization_params[LUT_VALUES], qi_keras_consts.THRESHOLD: [node_qc.activation_quantization_params[THRESHOLD]]}
else:
Logger.critical(f'Not supported quantization method for inferable quantizers.')
else:
Logger.critical(f'{quantization_target} is not supported') |
def node_multiple_outputs_model(input_shape):
inputs = Input(shape=input_shape)
y = tf.split(inputs, num_or_size_splits=2, axis=0)
x1 = Conv2D(2, 3)(y[0])
x2 = Conv2D(2, 3)(y[1])
outputs = keras.layers.Concatenate()([x1, x2])
return keras.Model(inputs=inputs, outputs=outputs) |
def get_variable_by_name(prefix, net_name, var_name, iter_num=0):
return np.load(os.path.join(current_path, 'logdata', '{}-{}-{}-{}.npy'.format(prefix, net_name, var_name.replace('/', '-'), iter_num))) |
def get_predictions_br(system_pairs, systems, metric):
random.seed(666)
preds = {}
for pair in system_pairs:
sys1 = systems[pair[0]][metric]
sys2 = systems[pair[1]][metric]
n = len(sys1)
points = [i for i in range(0, n)]
is_better = 0
N = 1000
for i in range(N):
sample = choices(points, k=n)
(sys1_, sys2_) = ([], [])
while (len(sys1_) == 0):
for p in sample:
if ((sys1[p] is None) or (sys2[p] is None)):
continue
else:
sys1_.append(sys1[p])
sys2_.append(sys2[p])
sample = choices(points, k=n)
if (np.mean(sys1_) > np.mean(sys2_)):
is_better += 1
if ((is_better / N) >= 0.95):
preds[pair] = 0
elif ((is_better / N) <= 0.05):
preds[pair] = 1
else:
preds[pair] = 2
return preds |
def dist_init(port):
if (mp.get_start_method(allow_none=True) != 'spawn'):
mp.set_start_method('spawn')
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device((proc_id % num_gpus))
if ('[' in node_list):
beg = node_list.find('[')
pos1 = node_list.find('-', beg)
if (pos1 < 0):
pos1 = 1000
pos2 = node_list.find(',', beg)
if (pos2 < 0):
pos2 = 1000
node_list = node_list[:min(pos1, pos2)].replace('[', '')
addr = node_list[8:].replace('-', '.')
print(addr)
os.environ['MASTER_PORT'] = port
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend='nccl')
rank = dist.get_rank()
world_size = dist.get_world_size()
return (rank, world_size) |
class CombineAdapterFactory(LoggerAdapterFactory):
_adapter_factories: Sequence[LoggerAdapterFactory]
def __init__(self, adapter_factories: Sequence[LoggerAdapterFactory]):
self._adapter_factories = adapter_factories
def create(self, experiment_name: str) -> CombineAdapter:
return CombineAdapter([factory.create(experiment_name) for factory in self._adapter_factories]) |
def filenum_to_shard_51(filenum):
if ((filenum >= 1) and (filenum <= 815)):
return 0
if ((filenum >= 1001) and (filenum <= 1136)):
return 0
if ((filenum >= 886) and (filenum <= 931)):
return 1
if ((filenum >= 1148) and (filenum <= 1151)):
return 1
if ((filenum >= 816) and (filenum <= 885)):
return 2
if ((filenum >= 1137) and (filenum <= 1147)):
return 2
raise ValueError(('Unhandled filenum %d' % filenum)) |
class CyclicPermutationGroup(PermutationGroup_unique):
def __init__(self, n):
n = Integer(n)
if (n < 1):
raise ValueError(('n (=%s) must be >= 1' % n))
gens = tuple(range(1, (n + 1)))
PermutationGroup_generic.__init__(self, [gens], n)
def _repr_(self):
return ('Cyclic group of order %s as a permutation group' % self.order())
def is_commutative(self):
return True
def is_abelian(self):
return True
def as_AbelianGroup(self):
n = self.order()
a = list(factor(n))
invs = [(x[0] ** x[1]) for x in a]
G = AbelianGroup(len(a), invs)
return G |
_duration
_to_mask
def time_symmetrize(clip):
return concatenate_videoclips([clip, clip.fx(time_mirror)]) |
class TFAutoModelForSeq2SeqLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class RQ_DQ_reg(atomic_reg):
OP_NAME = 'RQ&DQ'
_fields_ = [('cmd_short', ctypes.c_uint64, 1), ('op_code', ctypes.c_uint64, 16), ('cmd_id_dep', ctypes.c_uint64, 24), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('opt_rq', ctypes.c_uint64, 1), ('tsk_opd_num', ctypes.c_uint64, 2), ('pad_mode', ctypes.c_uint64, 2), ('opt_res0_sign', ctypes.c_uint64, 1), ('rsvd0', ctypes.c_uint64, 3), ('pwr_step', ctypes.c_uint64, 4), ('intr_en', ctypes.c_uint64, 1), ('opt_res_add', ctypes.c_uint64, 1), ('opt_relu', ctypes.c_uint64, 1), ('opt_left_tran', ctypes.c_uint64, 1), ('opt_opd4_const', ctypes.c_uint64, 1), ('opt_kernel_rotate', ctypes.c_uint64, 1), ('opt_opd0_sign', ctypes.c_uint64, 1), ('opt_opd1_sign', ctypes.c_uint64, 1), ('opt_opd2_sign', ctypes.c_uint64, 1), ('opt_res0_prec', ctypes.c_uint64, 3), ('opt_opd0_prec', ctypes.c_uint64, 3), ('opt_opd1_prec', ctypes.c_uint64, 3), ('opt_opd2_prec', ctypes.c_uint64, 3), ('opt_opd0_const', ctypes.c_uint64, 1), ('opt_opd1_const', ctypes.c_uint64, 1), ('opt_opd2_const', ctypes.c_uint64, 1), ('short_res0_str', ctypes.c_uint64, 3), ('short_opd0_str', ctypes.c_uint64, 3), ('short_opd1_str', ctypes.c_uint64, 3), ('short_opd2_str', ctypes.c_uint64, 3), ('opt_res_add_sign', ctypes.c_uint64, 1), ('rsvd2', ctypes.c_uint64, 25), ('sym_range', ctypes.c_uint64, 1), ('opt_opd3_const', ctypes.c_uint64, 1), ('opt_opd5_const', ctypes.c_uint64, 1), ('opd0_x_ins0', ctypes.c_uint64, 4), ('opd0_y_ins0', ctypes.c_uint64, 4), ('opd1_x_ins0', ctypes.c_uint64, 4), ('opd1_y_ins0', ctypes.c_uint64, 4), ('opd0_up_pad', ctypes.c_uint64, 4), ('opd0_dn_pad', ctypes.c_uint64, 4), ('opd0_lf_pad', ctypes.c_uint64, 4), ('opd0_rt_pad', ctypes.c_uint64, 4), ('res_op_x_str', ctypes.c_uint64, 4), ('res_op_y_str', ctypes.c_uint64, 4), ('res0_h_shift', ctypes.c_uint64, 4), ('res0_w_shift', ctypes.c_uint64, 4), ('opd0_h_shift', ctypes.c_uint64, 4), ('opd0_w_shift', ctypes.c_uint64, 4), ('opd1_h_shift', ctypes.c_uint64, 4), ('opd1_w_shift', ctypes.c_uint64, 4), ('tsk_lane_num', ctypes.c_uint64, 64), ('res0_n', ctypes.c_uint64, 16), ('res0_c', ctypes.c_uint64, 16), ('res0_h', ctypes.c_uint64, 16), ('res0_w', ctypes.c_uint64, 16), ('opd0_n', ctypes.c_uint64, 16), ('opd0_c', ctypes.c_uint64, 16), ('opd0_h', ctypes.c_uint64, 16), ('opd0_w', ctypes.c_uint64, 16), ('opd1_n', ctypes.c_uint64, 16), ('opd1_c', ctypes.c_uint64, 16), ('opd1_h', ctypes.c_uint64, 16), ('opd1_w', ctypes.c_uint64, 16), ('res0_n_str', ctypes.c_uint64, 16), ('res0_c_str', ctypes.c_uint64, 16), ('opd0_n_str', ctypes.c_uint64, 16), ('opd0_c_str', ctypes.c_uint64, 16), ('opd1_n_str', ctypes.c_uint64, 16), ('opd1_c_str', ctypes.c_uint64, 16), ('opd2_n_str', ctypes.c_uint64, 16), ('opd2_c_str', ctypes.c_uint64, 16), ('res0_addr', ctypes.c_uint64, 32), ('opd0_addr', ctypes.c_uint64, 32), ('opd1_addr', ctypes.c_uint64, 32), ('opd2_addr', ctypes.c_uint64, 32), ('res0_h_str', ctypes.c_uint64, 32), ('res0_w_str', ctypes.c_uint64, 32), ('opd0_h_str', ctypes.c_uint64, 32), ('opd0_w_str', ctypes.c_uint64, 32), ('opd1_h_str', ctypes.c_uint64, 32), ('opd1_w_str', ctypes.c_uint64, 32), ('opd2_h_str', ctypes.c_uint64, 32), ('opd2_w_str', ctypes.c_uint64, 32), ('res1_addr', ctypes.c_uint64, 32), ('opd3_addr', ctypes.c_uint64, 32)]
cmd_short: int
op_code: int
cmd_id_dep: int
tsk_typ: int
tsk_eu_typ: int
opt_rq: int
tsk_opd_num: int
pad_mode: int
opt_res0_sign: int
rsvd0: int
pwr_step: int
intr_en: int
opt_res_add: int
opt_relu: int
opt_left_tran: int
opt_opd4_const: int
opt_kernel_rotate: int
opt_opd0_sign: int
opt_opd1_sign: int
opt_opd2_sign: int
opt_res0_prec: int
opt_opd0_prec: int
opt_opd1_prec: int
opt_opd2_prec: int
opt_opd0_const: int
opt_opd1_const: int
opt_opd2_const: int
short_res0_str: int
short_opd0_str: int
short_opd1_str: int
short_opd2_str: int
opt_res_add_sign: int
rsvd2: int
sym_range: int
opt_opd3_const: int
opt_opd5_const: int
opd0_x_ins0: int
opd0_y_ins0: int
opd1_x_ins0: int
opd1_y_ins0: int
opd0_up_pad: int
opd0_dn_pad: int
opd0_lf_pad: int
opd0_rt_pad: int
res_op_x_str: int
res_op_y_str: int
res0_h_shift: int
res0_w_shift: int
opd0_h_shift: int
opd0_w_shift: int
opd1_h_shift: int
opd1_w_shift: int
tsk_lane_num: int
res0_n: int
res0_c: int
res0_h: int
res0_w: int
opd0_n: int
opd0_c: int
opd0_h: int
opd0_w: int
opd1_n: int
opd1_c: int
opd1_h: int
opd1_w: int
res0_n_str: int
res0_c_str: int
opd0_n_str: int
opd0_c_str: int
opd1_n_str: int
opd1_c_str: int
opd2_n_str: int
opd2_c_str: int
res0_addr: int
opd0_addr: int
opd1_addr: int
opd2_addr: int
res0_h_str: int
res0_w_str: int
opd0_h_str: int
opd0_w_str: int
opd1_h_str: int
opd1_w_str: int
opd2_h_str: int
opd2_w_str: int
res1_addr: int
opd3_addr: int
length: int = 1024 |
def parse_xml(filename, lines):
new_lines = []
for (i, line) in enumerate(lines[7:]):
line = line.strip()
if (line.startswith('<S ID') or line.startswith('<ENDTIME>') or line.startswith('<END_TIME>')):
continue
if ((line == '</S>') or (line == '<HEADLINE>') or (line == '</HEADLINE>') or (line == '<TEXT>') or (line == '</TEXT>') or (line == '</BODY>') or (line == '<P>') or (line == '</P>') or (line == '</DOC>') or (line == '<TURN>') or (line == '</TURN>')):
continue
if (line[0] == '<'):
raise ValueError(('Unexpected XML tag in %s line %d: %s' % (filename, (i + 7), line)))
new_lines.append(line)
return new_lines |
class BCHUnderlyingGRSDecoder(Decoder):
def __init__(self, code, grs_decoder='KeyEquationSyndrome', **kwargs):
self._grs_code = code.bch_to_grs()
self._grs_decoder = self._grs_code.decoder(grs_decoder, **kwargs)
self._decoder_type = copy(self._grs_decoder.decoder_type())
super().__init__(code, code.ambient_space(), 'Vector')
def _repr_(self):
return ('Decoder through the underlying GRS code of %s' % self.code())
def _latex_(self):
return ('\\textnormal{Decoder through the underlying GRS code of } %s' % self.code()._latex_())
def grs_code(self):
return self._grs_code
def grs_decoder(self):
return self._grs_decoder
def bch_word_to_grs(self, c):
phi = self.code().field_embedding()
return vector([phi(x) for x in c])
def grs_word_to_bch(self, c):
C = self.code()
sec = C.field_embedding().section()
return vector([sec(x) for x in c])
def decode_to_code(self, y):
D = self.grs_decoder()
ygrs = self.bch_word_to_grs(y)
cgrs = D.decode_to_code(ygrs)
if ('list-decoder' in D.decoder_type()):
l = []
for c in cgrs:
try:
c_bch = self.grs_word_to_bch(c)
if (c_bch in self.code()):
l.append(c_bch)
except ValueError:
pass
return l
return self.grs_word_to_bch(cgrs)
def decoding_radius(self):
return self.grs_decoder().decoding_radius() |
class Decoder(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, output_size, num_layers, p):
super(Decoder, self).__init__()
self.dropout = nn.Dropout(p)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p)
self.fc = nn.Linear(hidden_size, output_size)
self.srs = SRS()
def forward(self, x, hidden, cell):
x = x.unsqueeze(0)
embedding = self.dropout(self.srs(self.embedding(x)))
(outputs, (hidden, cell)) = self.rnn(embedding, (hidden, cell))
predictions = self.fc(outputs)
predictions = predictions.squeeze(0)
return (predictions, hidden, cell) |
def update_alpha_parameters(model, layers, p, pi, print_info=True):
standarlization = (lambda x: ((x - torch.mean(x)) / torch.std(x)))
alpha_grad_attn = torch.stack([torch.cat([getattr(model.module.visual_encoder.blocks, str(i)).attn.alpha.grad for i in range(layers)]), torch.stack([getattr(model.module.text_encoder.encoder.layer, str(i)).attention.self.alpha.grad for i in range(layers)]), torch.stack([getattr(model.module.text_encoder.encoder.layer, str(i)).crossattention.self0.alpha.grad for i in range(layers)]), torch.stack([getattr(model.module.text_encoder.encoder.layer, str(i)).crossattention.self1.alpha.grad for i in range(layers)])])
alpha_grad_mlp = torch.stack([torch.stack([getattr(model.module.visual_encoder.blocks, str(i)).mlp.alpha.grad for i in range(layers)]), torch.stack([getattr(model.module.text_encoder.encoder.layer, str(i)).intermediate.alpha.grad for i in range(layers)])])
(alpha_grad_attn, alpha_grad_mlp) = (standarlization(alpha_grad_attn), standarlization(alpha_grad_mlp))
alpha_grad = torch.cat([alpha_grad_attn.view((- 1)), alpha_grad_mlp.view((- 1))])
(sorted_alpha_grad, indices) = torch.sort(alpha_grad, descending=True)
compression_weight = torch.ones_like(indices)
compression_weight[(indices < alpha_grad_attn.numel())] = 36
threshold = sorted_alpha_grad[torch.argmin(torch.abs((torch.cumsum(compression_weight, 0) - (torch.sum(compression_weight) * pi))))]
def update(module, grad):
mask = ((grad <= threshold) | (grad <= torch.min(grad)))
module.data.copy_((mask + ((~ mask) * (1 - (pi / p)))))
for i in range(layers):
update(getattr(model.module.visual_encoder.blocks, str(i)).attn.alpha, alpha_grad_attn[(0, i)].unsqueeze(0))
update(getattr(model.module.text_encoder.encoder.layer, str(i)).attention.self.alpha, alpha_grad_attn[(1, i)])
update(getattr(model.module.text_encoder.encoder.layer, str(i)).crossattention.self0.alpha, alpha_grad_attn[(2, i)])
update(getattr(model.module.text_encoder.encoder.layer, str(i)).crossattention.self1.alpha, alpha_grad_attn[(3, i)])
update(getattr(model.module.visual_encoder.blocks, str(i)).mlp.alpha, alpha_grad_mlp[(0, i)])
update(getattr(model.module.text_encoder.encoder.layer, str(i)).intermediate.alpha, alpha_grad_mlp[(1, i)])
if print_info:
(attn, mlp) = ([], [])
for i in range(layers):
attn.append(getattr(model.module.visual_encoder.blocks, str(i)).attn.alpha.flatten())
attn.append(getattr(model.module.text_encoder.encoder.layer, str(i)).attention.self.alpha.flatten())
attn.append(getattr(model.module.text_encoder.encoder.layer, str(i)).crossattention.self0.alpha.flatten())
attn.append(getattr(model.module.text_encoder.encoder.layer, str(i)).crossattention.self1.alpha.flatten())
mlp.append(getattr(model.module.visual_encoder.blocks, str(i)).mlp.alpha.flatten())
mlp.append(getattr(model.module.text_encoder.encoder.layer, str(i)).intermediate.alpha.flatten())
print('Current compression ratio of attn: ', (1 - torch.mean(torch.cat(attn))))
print('Current compression ratio of mlp: ', (1 - torch.mean(torch.cat(mlp))))
print('Current compression ratio: ', pi) |
def smallest_poly(F, prec=53, norm_type='norm', emb=None):
def insert_item(pts, item, index):
N = len(pts)
if (N == 0):
return [item]
elif (N == 1):
if (item[index] > pts[0][index]):
pts.insert(0, item)
else:
pts.append(item)
return pts
else:
left = 1
right = N
mid = ((left + right) // 2)
if (item[index] > pts[mid][index]):
return (insert_item(pts[:mid], item, index) + pts[mid:N])
else:
return (pts[:mid] + insert_item(pts[mid:N], item, index))
def coshdelta(z):
return ((z.norm() + 1) / (2 * z.imag()))
G = F
MG = matrix(ZZ, 2, 2, [1, 0, 0, 1])
(x, y) = G.parent().gens()
if (norm_type == 'norm'):
current_size = sum([(abs(i) ** 2) for i in G.coefficients()])
elif (norm_type == 'height'):
current_size = exp(max([c.global_height(prec=prec) for c in G.coefficients()]))
else:
raise ValueError('type must be norm or height')
(v0, th) = covariant_z0(G, prec=prec, emb=emb)
rep = (2 * CC.gen(0))
from math import isnan
if isnan(v0.abs()):
raise ValueError(('invalid covariant: %s' % v0))
R = get_bound_poly(G, prec=prec, norm_type=norm_type)
S = matrix(ZZ, 2, 2, [0, (- 1), 1, 0])
T = matrix(ZZ, 2, 2, [1, 1, 0, 1])
TI = matrix(ZZ, 2, 2, [1, (- 1), 0, 1])
count = 0
pts = [[G, v0, rep, MG, coshdelta(v0), 0]]
current_min = [G, v0, rep, MG, coshdelta(v0)]
while pts:
(G, v, rep, M, D, label) = pts.pop()
if (D > R):
break
count += 1
if (norm_type == 'norm'):
new_size = sum([(abs(i) ** 2) for i in G.coefficients()])
else:
new_size = exp(max([c.global_height(prec=prec) for c in G.coefficients()]))
if (new_size < current_size):
current_min = [G, v, rep, M, coshdelta(v)]
current_size = new_size
R = get_bound_poly(G, norm_type=norm_type, prec=prec, emb=emb)
if ((label != 1) and (min((rep + 1).norm(), (rep - 1).norm()) >= 1)):
z = ((- 1) / v)
new_pt = [G.subs({x: (- y), y: x}), z, ((- 1) / rep), (M * S), coshdelta(z), 1]
pts = insert_item(pts, new_pt, 4)
if (label != 3):
z = (v - 1)
new_pt = [G.subs({x: (x + y)}), z, (rep - 1), (M * T), coshdelta(z), 2]
pts = insert_item(pts, new_pt, 4)
if (label != 2):
z = (v + 1)
new_pt = [G.subs({x: (x - y)}), z, (rep + 1), (M * TI), coshdelta(z), 3]
pts = insert_item(pts, new_pt, 4)
return [current_min[0], current_min[3]] |
def makeSpiderHeader(im):
(nsam, nrow) = im.size
lenbyt = (nsam * 4)
labrec = int((1024 / lenbyt))
if ((1024 % lenbyt) != 0):
labrec += 1
labbyt = (labrec * lenbyt)
hdr = []
nvalues = int((labbyt / 4))
for i in range(nvalues):
hdr.append(0.0)
if (len(hdr) < 23):
return []
hdr[1] = 1.0
hdr[2] = float(nrow)
hdr[5] = 1.0
hdr[12] = float(nsam)
hdr[13] = float(labrec)
hdr[22] = float(labbyt)
hdr[23] = float(lenbyt)
hdr = hdr[1:]
hdr.append(0.0)
hdrstr = []
for v in hdr:
hdrstr.append(struct.pack('f', v))
return hdrstr |
_utils.test(arch=[ti.cpu, ti.cuda])
def test_break_in_real_func():
_func
def bar() -> int:
a = 0
for i in range(10):
if (i == 5):
break
a += 1
return a
def foo() -> int:
return bar()
assert (foo() == 5) |
def GetCOCOCatNames():
ClassNames = {}
ClassNames[0] = 'person'
ClassNames[1] = 'bicycle'
ClassNames[2] = 'car'
ClassNames[3] = 'motorcycle'
ClassNames[4] = 'airplane'
ClassNames[5] = 'bus'
ClassNames[6] = 'train'
ClassNames[7] = 'truck'
ClassNames[8] = 'boat'
ClassNames[9] = 'traffic light'
ClassNames[10] = 'fire hydrant'
ClassNames[11] = 'stop sign'
ClassNames[12] = 'parking meter'
ClassNames[13] = 'bench'
ClassNames[14] = 'bird'
ClassNames[15] = 'cat'
ClassNames[16] = 'dog'
ClassNames[17] = 'horse'
ClassNames[18] = 'sheep'
ClassNames[19] = 'cow'
ClassNames[20] = 'elephant'
ClassNames[21] = 'bear'
ClassNames[22] = 'zebra'
ClassNames[23] = 'giraffe'
ClassNames[24] = 'backpack'
ClassNames[25] = 'umbrella'
ClassNames[26] = 'handbag'
ClassNames[27] = 'tie'
ClassNames[28] = 'suitcase'
ClassNames[29] = 'frisbee'
ClassNames[30] = 'skis'
ClassNames[31] = 'snowboard'
ClassNames[32] = 'sports ball'
ClassNames[33] = 'kite'
ClassNames[34] = 'baseball bat'
ClassNames[35] = 'baseball glove'
ClassNames[36] = 'skateboard'
ClassNames[37] = 'surfboard'
ClassNames[38] = 'tennis racket'
ClassNames[39] = 'bottle'
ClassNames[40] = 'wine glass'
ClassNames[41] = 'cup'
ClassNames[42] = 'fork'
ClassNames[43] = 'knife'
ClassNames[44] = 'spoon'
ClassNames[45] = 'bowl'
ClassNames[46] = 'banana'
ClassNames[47] = 'apple'
ClassNames[48] = 'sandwich'
ClassNames[49] = 'orange'
ClassNames[50] = 'broccoli'
ClassNames[51] = 'carrot'
ClassNames[52] = 'hot dog'
ClassNames[53] = 'pizza'
ClassNames[54] = 'donut'
ClassNames[55] = 'cake'
ClassNames[56] = 'chair'
ClassNames[57] = 'couch'
ClassNames[58] = 'potted plant'
ClassNames[59] = 'bed'
ClassNames[60] = 'dining table'
ClassNames[61] = 'toilet'
ClassNames[62] = 'tv'
ClassNames[63] = 'laptop'
ClassNames[64] = 'mouse'
ClassNames[65] = 'remote'
ClassNames[66] = 'keyboard'
ClassNames[67] = 'cell phone'
ClassNames[68] = 'microwave'
ClassNames[69] = 'oven'
ClassNames[70] = 'toaster'
ClassNames[71] = 'sink'
ClassNames[72] = 'refrigerator'
ClassNames[73] = 'book'
ClassNames[74] = 'clock'
ClassNames[75] = 'vase'
ClassNames[76] = 'scissors'
ClassNames[77] = 'teddy bear'
ClassNames[78] = 'hair drier'
ClassNames[79] = 'toothbrush'
return ClassNames |
def setup(app: Sphinx) -> Dict[(str, Any)]:
app.add_autodocumenter(ModuleDocumenter)
app.add_autodocumenter(ClassDocumenter)
app.add_autodocumenter(ExceptionDocumenter)
app.add_autodocumenter(DataDocumenter)
app.add_autodocumenter(NewTypeDataDocumenter)
app.add_autodocumenter(FunctionDocumenter)
app.add_autodocumenter(DecoratorDocumenter)
app.add_autodocumenter(MethodDocumenter)
app.add_autodocumenter(AttributeDocumenter)
app.add_autodocumenter(PropertyDocumenter)
app.add_autodocumenter(NewTypeAttributeDocumenter)
app.add_config_value('autoclass_content', 'class', True, ENUM('both', 'class', 'init'))
app.add_config_value('autodoc_member_order', 'alphabetical', True, ENUM('alphabetical', 'bysource', 'groupwise'))
app.add_config_value('autodoc_class_signature', 'mixed', True, ENUM('mixed', 'separated'))
app.add_config_value('autodoc_default_options', {}, True)
app.add_config_value('autodoc_docstring_signature', True, True)
app.add_config_value('autodoc_mock_imports', [], True)
app.add_config_value('autodoc_typehints', 'signature', True, ENUM('signature', 'description', 'none', 'both'))
app.add_config_value('autodoc_typehints_description_target', 'all', True, ENUM('all', 'documented', 'documented_params'))
app.add_config_value('autodoc_type_aliases', {}, True)
app.add_config_value('autodoc_typehints_format', 'short', 'env', ENUM('fully-qualified', 'short'))
app.add_config_value('autodoc_warningiserror', True, True)
app.add_config_value('autodoc_inherit_docstrings', True, True)
app.add_event('autodoc-before-process-signature')
app.add_event('autodoc-process-docstring')
app.add_event('autodoc-process-signature')
app.add_event('autodoc-skip-member')
app.add_event('autodoc-process-bases')
app.setup_extension('sphinx.ext.autodoc.preserve_defaults')
app.setup_extension('sphinx.ext.autodoc.type_comment')
app.setup_extension('sphinx.ext.autodoc.typehints')
return {'version': sphinx.__display_version__, 'parallel_read_safe': True} |
def load_all(stream, Loader=None):
if (Loader is None):
load_warning('load_all')
Loader = FullLoader
loader = Loader(stream)
try:
while loader.check_data():
(yield loader.get_data())
finally:
loader.dispose() |
def imread(fname, dtype=None, img_num=None, **kwargs):
if isinstance(fname, str):
with open(fname, 'rb') as f:
im = Image.open(f)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
else:
im = Image.open(fname)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num) |
def build_network(opt):
opt = deepcopy(opt)
network_type = opt.pop('type')
net = ARCH_REGISTRY.get(network_type)(**opt)
logger = get_root_logger()
logger.info(f'Network [{net.__class__.__name__}] is created.')
return net |
def load_data(name):
if (name == 'bsds300'):
return datasets.BSDS300()
elif (name == 'power'):
return datasets.POWER()
elif (name == 'gas'):
return datasets.GAS()
elif (name == 'hepmass'):
return datasets.HEPMASS()
elif (name == 'miniboone'):
return datasets.MINIBOONE()
else:
raise ValueError('Unknown dataset') |
class RowLogger(Logger):
def __init__(self, filename, columns=None, append=False):
super(RowLogger, self).__init__(filename, columns=columns, append=append)
def initAppend(self, append):
if (append and os.path.exists(self.fname)):
with open(self.fname, 'r') as f:
line = f.readline()
if (line[(- 1)] == '\n'):
line = line[:(- 1)]
pieces = line.split(',')
assert (len(pieces) == len(self.col))
for i in range(len(pieces)):
assert (pieces[i] == self.col[i])
if (not append):
self.writeRow(self.col, 'w')
def add(self, *args):
super(RowLogger, self).add(*args)
self.writeRow(args, 'a')
def writeRow(self, row, mode):
with open(self.fname, mode) as f:
f.write(','.join(row))
f.write('\n') |
class StartingBlock(Block):
def __init__(self, x=0, y=0, h=1, w=1, value=(- 0.1), startingPoint=None):
super(StartingBlock, self).__init__(x, y, h, w)
self.color = '#00FF00FF'
self.name = 'StartingBlock'
self.value = value
self.startingPoint = [(x + (w / 2.0)), (y + (h / 2.0))]
if (startingPoint is not None):
self.set_starting_point(startingPoint[0], startingPoint[1])
def set_starting_point(self, x, y):
if self.is_inside(x, y):
self.startingPoint = [x, y]
else:
raise GridMapException(('Specified coordinate (%f, %f) is out of the range of the starting block ( %f <= x < %f, %f <= y < %f ).' % x), y, self.corners[0][0], self.corners[1][0], self.corners[0][1], self.corners[3][1])
def get_starting_point_coor(self):
coor = BlockCoor(self.startingPoint[0], self.startingPoint[1])
return coor
def get_starting_point_list(self):
return self.startingPoint |
class ClipCountAcc():
SIZE = 1
def from_reader(reader: _ResponseReader):
assert (reader.remaining() >= ClipCountAcc.SIZE)
rv = ClipCountAcc()
rv.value = reader.read_u8()
return rv
def __repr__(self):
return _pretty_print(self) |
def compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p):
d_cnt = 0
w_cnt = 0
w_cnt_h = 0
for uid in hyp_uid_to_tra:
ref = ref_uid_to_tra[uid].split()
if (g2p is not None):
hyp = g2p(hyp_uid_to_tra[uid])
hyp = [p for p in hyp if ((p != "'") and (p != ' '))]
hyp = [(p[:(- 1)] if p[(- 1)].isnumeric() else p) for p in hyp]
else:
hyp = hyp_uid_to_tra[uid].split()
d_cnt += editdistance.eval(ref, hyp)
w_cnt += len(ref)
w_cnt_h += len(hyp)
wer = (float(d_cnt) / w_cnt)
logger.debug(f'wer = {(wer * 100):.2f}%; num. of ref words = {w_cnt}; num. of hyp words = {w_cnt_h}; num. of sentences = {len(ref_uid_to_tra)}')
return wer |
class CudaRemoteModuleTest(CommonRemoteModuleTest):
_if_lt_x_gpu(1)
_utils.dist_init
def test_valid_device(self):
if (self.rank != 0):
return
dst_rank = ((self.rank + 1) % self.world_size)
dst_worker_name = dist_utils.worker_name(dst_rank)
for remote_module in self._create_remote_module_iter('{}/cuda:0'.format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR]):
device = rpc.rpc_sync(dst_worker_name, remote_device, (remote_module.module_rref,))
self.assertEqual(device.type, 'cuda')
self.assertEqual(device.index, 0)
for remote_module in self._create_remote_module_iter('rank:{}/cuda:0'.format(dst_rank), modes=[ModuleCreationMode.MODULE_CTOR]):
device = rpc.rpc_sync(dst_worker_name, remote_device, (remote_module.module_rref,))
self.assertEqual(device.type, 'cuda')
self.assertEqual(device.index, 0)
_if_lt_x_gpu(1)
_utils.dist_init
def test_invalid_devices(self):
if (self.rank != 0):
return
dst_worker_name = dist_utils.worker_name(((self.rank + 1) % self.world_size))
with self.assertRaisesRegex(RuntimeError, 'Expected one of .+ device type at start of device string'):
list((m.forward() for m in self._create_remote_module_iter('{}/foo'.format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR])))
with self.assertRaisesRegex(RuntimeError, 'CUDA error: invalid device ordinal'):
list((m.forward() for m in self._create_remote_module_iter('{}/cuda:100'.format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR])))
with self.assertRaisesRegex(RuntimeError, "Invalid device string: 'cpu2'"):
list((m.forward() for m in self._create_remote_module_iter('{}/cpu2'.format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR])))
with self.assertRaisesRegex(RuntimeError, 'Device string must not be empty'):
list((m.forward() for m in self._create_remote_module_iter('{}/'.format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR])))
with self.assertRaisesRegex(ValueError, "Could not parse remote_device: worker1/cuda:0/cuda:1. The valid format is '<workername>/<device>'"):
list((m.forward() for m in self._create_remote_module_iter('{}/cuda:0/cuda:1'.format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR])))
with self.assertRaisesRegex(ValueError, "Could not parse remote_device: /. The valid format is '<workername>/<device>'"):
list((m.forward() for m in self._create_remote_module_iter('/', modes=[ModuleCreationMode.MODULE_CTOR])))
with self.assertRaisesRegex(ValueError, "Could not parse remote_device: /cuda:0. The valid format is '<workername>/<device>'"):
list((m.forward() for m in self._create_remote_module_iter('/cuda:0', modes=[ModuleCreationMode.MODULE_CTOR])))
_if_lt_x_gpu(1)
_utils.dist_init
def test_input_moved_to_cuda_device(self):
if (self.rank != 0):
return
dst_worker_name = dist_utils.worker_name(((self.rank + 1) % self.world_size))
t1 = torch.ones(1)
args = (t1, 2)
t2 = (t1 * 2)
kwargs = dict(word=t2)
for remote_module in self._create_remote_module_iter('{}/cuda:0'.format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR]):
ret_fut = remote_module.forward_async(*args, **kwargs)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed((args + (t2,)))))
self.assertEqual(ret[0].device.type, 'cpu')
self.assertEqual(ret[2].device.type, 'cpu')
ret = remote_module.forward(*args, **kwargs)
self.assertEqual(ret, tuple(reversed((args + (t2,)))))
self.assertEqual(ret[0].device.type, 'cpu')
self.assertEqual(ret[2].device.type, 'cpu')
_if_lt_x_gpu(1)
_utils.dist_init
def test_input_moved_to_cuda_device_script(self):
if (self.rank != 0):
return
dst_worker_name = dist_utils.worker_name(((self.rank + 1) % self.world_size))
scripted_remote_module = next(self._create_remote_module_iter('{}/cuda:0'.format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]))
.script
def run_forward(scripted_remote_module: MyModuleInterface):
ret = scripted_remote_module.forward(torch.ones(1), 2, '3')
return ret
ret = run_forward(scripted_remote_module)
self.assertEqual(ret, ('3', 2, torch.ones(1)))
self.assertEqual(ret[2].device.type, 'cpu') |
class Timer():
def __init__(self, enable, cuda):
self._enable = enable
self._cuda = cuda
self._elapsed_ms = None
if self._cuda:
self._gpu_timer = pyrenderer.GpuTimer()
def elapsed_ms(self):
assert (self._elapsed_ms is not None), 'No timings recorded'
return self._elapsed_ms
def __enter__(self):
if (not self._enable):
return
if self._cuda:
pyrenderer.sync()
self._gpu_timer.start()
self._start = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if (not self._enable):
return
if self._cuda:
self._gpu_timer.stop()
pyrenderer.sync()
stop = time.time()
if self._cuda:
self._elapsed_ms = self._gpu_timer.elapsed_ms()
else:
self._elapsed_ms = ((stop - self._start) * 1000.0) |
class DiagonalNoiseModel(NoiseModel):
def __init__(self, information_diag: T.Optional[T.Sequence[sf.Scalar]]=None, sqrt_information_diag: T.Optional[T.Sequence[sf.Scalar]]=None) -> None:
if (sqrt_information_diag is not None):
self.sqrt_information_matrix = sf.Matrix.diag(sqrt_information_diag)
else:
assert (information_diag is not None), 'Either "information_diag" or "sqrt_information_diag" must be provided.'
self.sqrt_information_matrix = sf.Matrix.diag(information_diag).applyfunc(sf.sqrt)
def from_variances(cls, variances: T.Sequence[sf.Scalar]) -> DiagonalNoiseModel:
return cls(information_diag=[(1 / v) for v in variances])
def from_sigmas(cls, standard_deviations: T.Sequence[sf.Scalar]) -> DiagonalNoiseModel:
return cls(sqrt_information_diag=[(1 / s) for s in standard_deviations])
def whiten(self, unwhitened_residual: sf.Matrix.MatrixT) -> sf.Matrix.MatrixT:
return T.cast(sf.Matrix.MatrixT, (self.sqrt_information_matrix * unwhitened_residual)) |
def get_points_from_angles(distance, elevation, azimuth, degrees=True):
if (isinstance(distance, float) or isinstance(distance, int)):
if degrees:
elevation = math.radians(elevation)
azimuth = math.radians(azimuth)
return (((distance * math.cos(elevation)) * math.sin(azimuth)), (distance * math.sin(elevation)), (((- distance) * math.cos(elevation)) * math.cos(azimuth)))
else:
if degrees:
elevation = ((math.pi / 180.0) * elevation)
azimuth = ((math.pi / 180.0) * azimuth)
return torch.stack([((distance * torch.cos(elevation)) * torch.sin(azimuth)), (distance * torch.sin(elevation)), (((- distance) * torch.cos(elevation)) * torch.cos(azimuth))]).transpose(1, 0) |
def set_seed(seed: Optional[int]) -> None:
if (seed is not None):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed) |
_kl(Gumbel, Beta)
_kl(Gumbel, Exponential)
_kl(Gumbel, Gamma)
_kl(Gumbel, Pareto)
_kl(Gumbel, Uniform)
def _kl_gumbel_infinity(p, q):
return _infinite_like(p.loc) |
def main(args):
VERBOSE = False
parse_line_list = (lambda line, delimiter, T: [T(y) for y in [x.strip() for x in line.strip().split(delimiter)] if y])
if (len(args) < 2):
print('Error: no case or direction provided')
exit(1)
spOption = ''
wCalc = False
for arg in args:
if ('-up' in arg):
spOption = 'up'
elif ('-dn' in arg):
spOption = 'dn'
elif ('-w' in arg):
wCalc = True
case_name = args[0]
direction_args = {'x': [1, 0, 0], 'y': [0, 1, 0], 'z': [0, 0, 1]}
if (len(args) > 2):
if (args[1] not in direction_args):
print('Error: unknown direction', args[1])
exit(1)
direction = direction_args[args[1]]
else:
direction = direction_args['x']
Mmn = {}
phase_sums = []
f_win = open(((case_name + '.win') + spOption), 'r')
kmesh = parse_win_mp_grid(f_win)
f_win.close()
if wCalc:
f_nnkp = open(((case_name + '.nnkp') + spOption), 'r')
nnkpts = parse_nnkp_nnkpts(f_nnkp)
f_nnkp.close()
else:
(nnkpts, neighbour_graph) = determine_neighbours(kmesh, direction, [2, 1, 0])
if VERBOSE:
print(nnkpts)
f_mmn = open(((case_name + '.mmn') + spOption), 'r')
f_mmn.readline()
(n_energy, n_pairs, n_neighbours) = parse_mmn_info_line(f_mmn.readline())
for i in range((n_pairs * n_neighbours)):
(k1, k2, G) = parse_pair_info_line(f_mmn.readline())
if ((k1, k2, G[0], G[1], G[2]) in nnkpts):
Mmnk1k2 = numpy.zeros(shape=(n_energy, n_energy), dtype=complex)
for a in range(n_energy):
for b in range(n_energy):
element_value = parse_matrix_element_line(f_mmn.readline())
Mmnk1k2[(b, a)] = element_value
Mmn[k1] = Mmnk1k2
else:
for a in range(n_energy):
for b in range(n_energy):
parse_matrix_element_line(f_mmn.readline())
f_mmn.close()
if wCalc:
for (i, Mmni) in Mmn.items():
i = int(i)
if (i == 1):
L = Mmni
else:
L = numpy.matmul(L, Mmni)
(leign, vect) = numpy.linalg.eig(L)
del vect
wcc = numpy.array([((numpy.angle(z) / (2 * numpy.pi)) % 1) for z in leign])
idx = numpy.argsort(wcc)
wcc = wcc[idx]
numpy.set_printoptions(linewidth=numpy.inf)
numpy.savetxt('wcc_i.csv', [wcc], delimiter=',', footer='', comments='', fmt='%f')
psin = ((wcc * 2) * numpy.pi)
psi = sum(psin)
print('[ BerryPI ]', 'Berry phase sum (rad) =', psi)
return
testdat = open('test.dat', 'w')
for (k, neighbours) in neighbour_graph.items():
k_prev = neighbours[0]
k_next = neighbours[1]
if (k_next is None):
kpath = []
kpath.append(k)
kpath.append(k_prev)
while k_prev:
neighbours = neighbour_graph[k_prev]
k_prev = neighbours[0]
kpath.append(k_prev)
kpath = kpath[:(- 1)]
kpath.reverse()
L = Mmn[kpath[0]]
if (len(kpath) > 1):
for ki in kpath[1:]:
Mmni = Mmn[ki]
L = numpy.matmul(L, Mmni)
(leign, vect) = numpy.linalg.eig(L)
del vect
psin = numpy.array([(numpy.angle(z) % (2 * numpy.pi)) for z in leign])
psi = sum(psin)
phase_sums.append((kpath[0], psi))
testdat.close()
phase_sums.sort(key=(lambda x: x[0]))
f_pathphase = open(((case_name + '.pathphase') + spOption), 'w')
f_pathphase.write(('%4d\n' % len(phase_sums)))
f_pathphase.write((' %2d %2d %2d\n' % (direction[0], direction[1], direction[2])))
for (k, phase_sum) in phase_sums:
f_pathphase.write((' %6d %.12f\n' % (k, phase_sum)))
f_pathphase.close()
return phase_sums |
def to_delta_state(line):
delta_state = {'inform': {}, 'request': {}}
try:
if ((line == 'None') or (line.strip() == '') or (line.strip() == ';')):
return delta_state
(inform, request) = [[y.strip() for y in x.strip().split(',')] for x in line.split(';')]
inform_pairs = {}
for i in inform:
try:
(k, v) = i.split(':')
inform_pairs[k.strip()] = v.strip()
except:
pass
delta_state = {'inform': inform_pairs, 'request': request}
except:
pass
finally:
return delta_state |
def lm_rank(strs, probs):
if (lm is None):
return strs[0]
a = FLAGS.alpha
lmscores = [(lm.score(s) / (1 + len(s.split()))) for s in strs]
probs = [(p / (len(s) + 1)) for (s, p) in zip(strs, probs)]
rescores = [(((1 - a) * p) + (a * l)) for (l, p) in zip(lmscores, probs)]
rerank = [rs[0] for rs in sorted(enumerate(rescores), key=(lambda x: x[1]))]
generated = strs[rerank[(- 1)]]
lm_score = lmscores[rerank[(- 1)]]
nw_score = probs[rerank[(- 1)]]
score = rescores[rerank[(- 1)]]
return generated |
def require_world_size(world_size):
if (int(os.environ['WORLD_SIZE']) < world_size):
return sandcastle_skip(('Test requires world size of %d' % world_size))
return (lambda func: func) |
class _GlobalPooling2D(Layer):
_global_pooling_support
def __init__(self, data_format=None, **kwargs):
super(_GlobalPooling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if (self.data_format == 'channels_last'):
return (input_shape[0], input_shape[3])
else:
return (input_shape[0], input_shape[1])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(_GlobalPooling2D, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
def get_model_conditional(batch_size, max_seq_length, input_size, hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout):
inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])
inputs_cond = tf.placeholder(tf.int32, [batch_size, max_seq_length])
cont_train = True
if (pretrain == 'pre'):
cont_train = False
embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], (- 0.1), 0.1), name='embedding_matrix', trainable=cont_train)
embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)
embedded_inputs_cond = tf.nn.embedding_lookup(embedding_matrix, inputs_cond)
inputs_list = [tf.squeeze(x) for x in tf.split(1, max_seq_length, embedded_inputs)]
inputs_cond_list = [tf.squeeze(x) for x in tf.split(1, max_seq_length, embedded_inputs_cond)]
drop_prob = None
if dropout:
drop_prob = 0.1
lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)
start_state = tf.zeros([batch_size, lstm_encoder.state_size])
(outputs, states) = lstm_encoder(inputs_list, start_state, 'LSTM')
(outputs_cond, states_cond) = lstm_encoder(inputs_cond_list, states[(- 1)], 'LSTMcond')
outputs_fin = outputs_cond[(- 1)]
if (tanhOrSoftmax == 'tanh'):
model = Projector(target_size, non_linearity=tf.nn.tanh, bias=True)(outputs_fin)
else:
model = Projector(target_size, non_linearity=tf.nn.softmax, bias=True)(outputs_fin)
return (model, [inputs, inputs_cond]) |
.parametrize('categorical_as_dictionary', [False, True])
.parametrize('through', [through_arrow, through_parquet])
.parametrize('extensionarray', [False, True])
def test_dictionary_encoding(tmp_path, categorical_as_dictionary, through, extensionarray):
akarray = ak.contents.IndexedArray(ak.index.Index64(np.array([3, 2, 2, 2, 0, 1, 3], dtype=np.uint64)), ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3]), parameters={'which': 'inner'}), parameters={'__array__': 'categorical', 'which': 'outer'})
if (not (extensionarray and categorical_as_dictionary and (through is through_parquet))):
(schema_arrow, array_form) = through(akarray, extensionarray, tmp_path, categorical_as_dictionary=categorical_as_dictionary)
predicted_form = ak._connect.pyarrow.form_handle_arrow(schema_arrow, pass_empty_field=True)
assert (predicted_form == array_form) |
class XLMTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = XLMTokenizer
def setUp(self):
super(XLMTokenizationTest, self).setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, 'w') as fp:
fp.write('\n'.join(merges))
def get_tokenizer(self, **kwargs):
return XLMTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = u'lower newer'
output_text = u'lower newer'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = XLMTokenizer(self.vocab_file, self.merges_file)
text = 'lower'
bpe_tokens = ['low', 'er</w>']
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + ['<unk>'])
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
.slow
def test_sequence_builders(self):
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
text = tokenizer.encode('sequence builders', add_special_tokens=False)
text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == (([1] + text) + [1]))
assert (encoded_pair == (((([1] + text) + [1]) + text_2) + [1])) |
class StochasticFrameSkip(gym.Wrapper):
def __init__(self, env, n, stickprob, seed):
print(stickprob)
gym.Wrapper.__init__(self, env)
self.n = n
self.stickprob = stickprob
self.curac = None
self.rng = np.random.RandomState(seed)
self.supports_want_render = hasattr(env, 'supports_want_render')
def reset(self, **kwargs):
self.curac = None
return self.env.reset(**kwargs)
def step(self, ac):
done = False
totrew = 0
actual_rewards = []
for i in range(self.n):
if (self.curac is None):
self.curac = ac
elif (i == 0):
if (self.rng.rand() > self.stickprob):
self.curac = ac
elif (i == 1):
self.curac = ac
if (self.supports_want_render and (i < (self.n - 1))):
(ob, rew, done, info) = self.env.step(self.curac, want_render=False)
else:
(ob, rew, done, info) = self.env.step(self.curac)
totrew += rew
actual_rewards.append(rew)
if done:
break
info['actual_rewards'] = actual_rewards
return (ob, totrew, done, info)
def seed(self, s):
self.rng.seed(s) |
def train(sess, model, train_url, test_url, batch_size, vocab_size, analytical, alternate_epochs=1, lexicon=[], result_file='test.txt', B=1, warm_up_period=100):
(train_set, train_count) = utils.data_set(train_url)
(test_set, test_count) = utils.data_set(test_url)
train_size = len(train_set)
validation_size = int((train_size * 0.1))
dev_set = train_set[:validation_size]
dev_count = train_count[:validation_size]
train_set = train_set[validation_size:]
train_count = train_count[validation_size:]
optimize_jointly = True
dev_batches = utils.create_batches(len(dev_set), batch_size, shuffle=False)
test_batches = utils.create_batches(len(test_set), batch_size, shuffle=False)
warm_up = 0
min_alpha = 1e-05
curr_B = B
best_print_ana_ppx = .0
early_stopping_iters = 30
no_improvement_iters = 0
stopped = False
epoch = (- 1)
while (not stopped):
epoch += 1
train_batches = utils.create_batches(len(train_set), batch_size, shuffle=True)
if (warm_up < 1.0):
warm_up += (1.0 / warm_up_period)
else:
warm_up = 1.0
if optimize_jointly:
optim = model.optim_all
print_mode = 'updating encoder and decoder'
elif (switch == 0):
optim = model.optim_dec
print_mode = 'updating decoder'
else:
optim = model.optim_enc
print_mode = 'updating encoder'
for i in range(alternate_epochs):
loss_sum = 0.0
ana_loss_sum = 0.0
ppx_sum = 0.0
kld_sum_train = 0.0
ana_kld_sum_train = 0.0
word_count = 0
doc_count = 0
recon_sum = 0.0
for idx_batch in train_batches:
(data_batch, count_batch, mask) = utils.fetch_data(train_set, train_count, idx_batch, vocab_size)
input_feed = {model.x.name: data_batch, model.mask.name: mask, model.keep_prob.name: 0.75, model.warm_up.name: warm_up, model.min_alpha.name: min_alpha, model.B.name: curr_B}
(_, (loss, recon, kld_train, ana_loss, ana_kld_train)) = sess.run((optim, [model.true_objective, model.recons_loss, model.kld, model.analytical_objective, model.analytical_kld]), input_feed)
loss_sum += np.sum(loss)
ana_loss_sum += np.sum(ana_loss)
kld_sum_train += (np.sum(kld_train) / np.sum(mask))
ana_kld_sum_train += (np.sum(ana_kld_train) / np.sum(mask))
word_count += np.sum(count_batch)
count_batch = np.add(count_batch, 1e-12)
ppx_sum += np.sum(np.divide(loss, count_batch))
doc_count += np.sum(mask)
recon_sum += np.sum(recon)
print_loss = (recon_sum / len(train_batches))
dec_vars = utils.variable_parser(tf.trainable_variables(), 'decoder')
phi = dec_vars[0]
phi = sess.run(phi)
utils.print_top_words(phi, lexicon, result_file=None)
print_ppx = np.exp((loss_sum / word_count))
print_ana_ppx = np.exp((ana_loss_sum / word_count))
print_ppx_perdoc = np.exp((ppx_sum / doc_count))
print_kld_train = (kld_sum_train / len(train_batches))
print_ana_kld_train = (ana_kld_sum_train / len(train_batches))
print('| Epoch train: {:d} |'.format((epoch + 1)), print_mode, '{:d}'.format(i), '| Corpus ppx: {:.5f}'.format(print_ppx), '| Per doc ppx: {:.5f}'.format(print_ppx_perdoc), '| KLD: {:.5}'.format(print_kld_train), '| Loss: {:.5}'.format(print_loss), '| ppx anal.: {:.5f}'.format(print_ana_ppx), '|KLD anal.: {:.5f}'.format(print_ana_kld_train))
loss_sum = 0.0
kld_sum_dev = 0.0
ppx_sum = 0.0
word_count = 0
doc_count = 0
recon_sum = 0.0
print_ana_ppx = 0.0
ana_loss_sum = 0.0
for idx_batch in dev_batches:
(data_batch, count_batch, mask) = utils.fetch_data(dev_set, dev_count, idx_batch, vocab_size)
input_feed = {model.x.name: data_batch, model.mask.name: mask, model.keep_prob.name: 1.0, model.warm_up.name: 1.0, model.min_alpha.name: min_alpha, model.B.name: B}
(loss, recon, kld_dev, ana_kld, ana_loss) = sess.run([model.objective, model.recons_loss, model.kld, model.analytical_kld, model.analytical_objective], input_feed)
loss_sum += np.sum(loss)
ana_loss_sum += np.sum(ana_loss)
kld_sum_dev += (np.sum(kld_dev) / np.sum(mask))
word_count += np.sum(count_batch)
count_batch = np.add(count_batch, 1e-12)
ppx_sum += np.sum(np.divide(loss, count_batch))
doc_count += np.sum(mask)
recon_sum += np.sum(recon)
print_ana_ppx = np.exp((ana_loss_sum / word_count))
print_ppx = np.exp((loss_sum / word_count))
print_ppx_perdoc = np.exp((ppx_sum / doc_count))
print_kld_dev = (kld_sum_dev / len(dev_batches))
print_loss = (recon_sum / len(dev_batches))
if (print_ppx < best_print_ana_ppx):
no_improvement_iters = 0
best_print_ana_ppx = print_ppx
tf.train.Saver().save(sess, 'models/improved_model')
else:
no_improvement_iters += 1
print('no_improvement_iters', no_improvement_iters, 'best ppx', best_print_ana_ppx)
if (no_improvement_iters >= early_stopping_iters):
stopped = True
print('stop training after', epoch, 'iterations,no_improvement_iters', no_improvement_iters)
print('load stored model')
tf.train.Saver().restore(sess, 'models/improved_model')
print('| Epoch dev: {:d} |'.format((epoch + 1)), '| Perplexity: {:.9f}'.format(print_ppx), '| Per doc ppx: {:.5f}'.format(print_ppx_perdoc), '| KLD: {:.5}'.format(print_kld_dev), '| Loss: {:.5}'.format(print_loss))
if FLAGS.test:
loss_sum = 0.0
kld_sum_test = 0.0
ppx_sum = 0.0
word_count = 0
doc_count = 0
recon_sum = 0.0
ana_loss_sum = 0.0
ana_kld_sum_test = 0.0
for idx_batch in test_batches:
(data_batch, count_batch, mask) = utils.fetch_data(test_set, test_count, idx_batch, vocab_size)
input_feed = {model.x.name: data_batch, model.mask.name: mask, model.keep_prob.name: 1.0, model.warm_up.name: 1.0, model.min_alpha.name: min_alpha, model.B.name: B}
(loss, recon, kld_test, ana_loss, ana_kld_test) = sess.run([model.objective, model.recons_loss, model.kld, model.analytical_objective, model.analytical_kld], input_feed)
loss_sum += np.sum(loss)
kld_sum_test += (np.sum(kld_test) / np.sum(mask))
ana_loss_sum += np.sum(ana_loss)
ana_kld_sum_test += (np.sum(ana_kld_test) / np.sum(mask))
word_count += np.sum(count_batch)
count_batch = np.add(count_batch, 1e-12)
ppx_sum += np.sum(np.divide(loss, count_batch))
doc_count += np.sum(mask)
recon_sum += np.sum(recon)
print_loss = (recon_sum / len(test_batches))
print_ppx = np.exp((loss_sum / word_count))
print_ppx_perdoc = np.exp((ppx_sum / doc_count))
print_kld_test = (kld_sum_test / len(test_batches))
print_ana_ppx = np.exp((ana_loss_sum / word_count))
print_ana_kld_test = (ana_kld_sum_test / len(train_batches))
print('| Epoch test: {:d} |'.format((epoch + 1)), '| Perplexity: {:.9f}'.format(print_ppx), '| Per doc ppx: {:.5f}'.format(print_ppx_perdoc), '| KLD: {:.5}'.format(print_kld_test), '| Loss: {:.5}'.format(print_loss), '| ppx anal.: {:.5f}'.format(print_ana_ppx), '|KLD anal.: {:.5f}'.format(print_ana_kld_test))
if stopped:
print('calculate topic coherence (might take a few minutes)')
coherence = utils.topic_coherence(test_set, phi, lexicon)
print('topic coherence', str(coherence)) |
def chunk_pair_distance(chunk1: tuple, chunk2: tuple, overlap_distance: int=(- 1)):
((_, s1, e1), (_, s2, e2)) = (chunk1, chunk2)
if (e1 <= s2):
return (s2 - e1)
elif (e2 <= s1):
return (s1 - e2)
else:
return overlap_distance |
class GATT(nn.Module):
def __init__(self, in_features, out_features, hidden_features, n_layers, n_heads, activation=F.leaky_relu, dropout=0.0):
super(GATT, self).__init__()
self.in_features = in_features
self.hidden_features = hidden_features
self.out_features = out_features
self.n_heads = n_heads
self.activation = activation
self.adj = torch.ones([opt.subseq_length, opt.subseq_length]).float().cuda()
self.norm_list = nn.ModuleList([nn.LayerNorm(self.in_features), nn.LayerNorm((self.hidden_features * self.n_heads)), nn.LayerNorm((self.hidden_features * self.n_heads))])
self.pde_layers = nn.ModuleList([GraphAttentionLayer(in_features, hidden_features, n_heads=self.n_heads), GraphAttentionLayer((hidden_features * n_heads), hidden_features, n_heads=self.n_heads), GraphAttentionLayer((hidden_features * n_heads), hidden_features, n_heads=self.n_heads)])
self.ode_layers = nn.ModuleList([ODEFunc(dim=(hidden_features * n_heads)), ODEFunc(dim=(hidden_features * n_heads)), ODEFunc(dim=(hidden_features * n_heads))])
self.integration_time = torch.tensor([0, 1]).float().cuda()
def forward(self, x):
(b, subseq_length, c) = x.shape
identity_gatt = x
for (i, pde_layer) in enumerate(self.pde_layers):
norm = self.norm_list[i]
ode_layer = self.ode_layers[i]
x = norm(x)
x = odeint(func=pde_layer, y0=x, t=self.integration_time, method='euler')[(- 1)]
x = odeint(func=ode_layer, y0=x, t=self.integration_time, method='euler')[(- 1)]
return x |
.parametrize('max_iter', range(1, 5))
def test_labeled_iter(max_iter):
st = SelfTrainingClassifier(KNeighborsClassifier(), max_iter=max_iter)
st.fit(X_train, y_train_missing_labels)
amount_iter_0 = len(st.labeled_iter_[(st.labeled_iter_ == 0)])
assert (amount_iter_0 == n_labeled_samples)
assert (np.max(st.labeled_iter_) <= st.n_iter_ <= max_iter) |
def removing_general():
all_ok_files = ['defense_1_ok.txt', 'defense_2_ok.txt', 'defense_3_ok.txt', 'defense_4_ok.txt', 'defense_5_ok.txt', 'eiffel_1_ok.txt', 'eiffel_2_ok.txt', 'eiffel_3_ok.txt', 'eiffel_4_ok.txt', 'eiffel_5_ok.txt', 'invalides_1_ok.txt', 'invalides_2_ok.txt', 'invalides_3_ok.txt', 'invalides_4_ok.txt', 'invalides_5_ok.txt', 'louvre_1_ok.txt', 'louvre_2_ok.txt', 'louvre_3_ok.txt', 'louvre_4_ok.txt', 'louvre_5_ok.txt', 'moulinrouge_1_ok.txt', 'moulinrouge_2_ok.txt', 'moulinrouge_3_ok.txt', 'moulinrouge_4_ok.txt', 'moulinrouge_5_ok.txt', 'museedorsay_1_ok.txt', 'museedorsay_2_ok.txt', 'museedorsay_3_ok.txt', 'museedorsay_4_ok.txt', 'museedorsay_5_ok.txt', 'notredame_1_ok.txt', 'notredame_2_ok.txt', 'notredame_3_ok.txt', 'notredame_4_ok.txt', 'notredame_5_ok.txt', 'pantheon_1_ok.txt', 'pantheon_2_ok.txt', 'pantheon_3_ok.txt', 'pantheon_4_ok.txt', 'pantheon_5_ok.txt', 'pompidou_1_ok.txt', 'pompidou_2_ok.txt', 'pompidou_3_ok.txt', 'pompidou_4_ok.txt', 'pompidou_5_ok.txt', 'sacrecoeur_1_ok.txt', 'sacrecoeur_2_ok.txt', 'sacrecoeur_3_ok.txt', 'sacrecoeur_4_ok.txt', 'sacrecoeur_5_ok.txt', 'triomphe_1_ok.txt', 'triomphe_2_ok.txt', 'triomphe_3_ok.txt', 'triomphe_4_ok.txt', 'triomphe_5_ok.txt']
corrupted = ['paris_louvre_000136.jpg', 'paris_louvre_000146.jpg', 'paris_moulinrouge_000422.jpg', 'paris_museedorsay_001059.jpg', 'paris_notredame_000188.jpg', 'paris_pantheon_000284.jpg', 'paris_pantheon_000960.jpg', 'paris_pantheon_000974.jpg', 'paris_pompidou_000195.jpg', 'paris_pompidou_000196.jpg', 'paris_pompidou_000201.jpg', 'paris_pompidou_000467.jpg', 'paris_pompidou_000640.jpg', 'paris_sacrecoeur_000299.jpg', 'paris_sacrecoeur_000330.jpg', 'paris_sacrecoeur_000353.jpg', 'paris_triomphe_000662.jpg', 'paris_triomphe_000833.jpg', 'paris_triomphe_000863.jpg', 'paris_triomphe_000867.jpg']
with open('lab/all_ok.txt', 'w+') as outfile:
for fname in all_ok_files:
with open(('lab/' + fname)) as infile:
for line in infile:
outfile.write(line)
with open('lab/all_ok.txt', 'r') as f:
all_ok_file = f.readlines()
for filename in os.listdir('jpg/1'):
try:
im = Image.open(('jpg/1/' + filename))
im.thumbnail((450, 450), Image.ANTIALIAS)
im.save(('jpg/1/' + filename))
fname = filename[:(- 4)]
if (filename in corrupted):
os.remove(('jpg/1/' + filename))
elif ('general' in fname):
to_remove = True
for line in all_ok_file:
if (fname in line.strip('\n')):
to_remove = False
break
if (to_remove == True):
os.remove(('jpg/1/' + filename))
except:
pass |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--prompt_dir', default=None, type=str, required=True, help='directory to prompt file (.txt)')
parser.add_argument('--eng', default=None, type=str, required=True, help='engine')
parser.add_argument('--num_test', default=(- 1), type=int, help='number of samples tested. -1 if on all test samples')
parser.add_argument('--seed', default=1357, type=int, help='random seed')
parser.add_argument('--temp', default=0.0, type=float, help='temperature for generation')
parser.add_argument('--max_tokens', default=256, type=int, help='max # of tokens for generation')
parser.add_argument('--test_ind', default=None, type=str, help='dir to test indices. If not provided, randomly choose.')
parser.add_argument('--suffix', default='', type=str, help='')
args = parser.parse_args()
print(args)
file = args.prompt_dir
assert file.endswith('.txt')
prompt_name = os.path.basename(file)[:(- 4)]
print(file, prompt_name)
with open(file, encoding='utf-8') as f:
prompt = f.read().strip()
data = pd.read_csv('Bamboogle Prerelease - Sheet1.csv', encoding='utf-8')
qa_pairs = list(zip(data.Question, data.Answer))
print('loading dataset complete. altogether', len(qa_pairs), 'questions')
NUM_TEST = args.num_test
if (NUM_TEST == (- 1)):
qa_pairs_test = qa_pairs
elif (args.test_ind is None):
np.random.seed(args.seed)
rand_indices = np.random.choice(len(qa_pairs), NUM_TEST, replace=False)
qa_pairs_test = [qa_pairs[i] for i in rand_indices]
else:
with open(args.test_ind, 'r') as f:
test_ind = json.load(f)
assert (len(test_ind) == NUM_TEST)
qa_pairs_test = [qa_pairs[i] for i in test_ind]
print('testing on', len(qa_pairs_test), 'samples')
with open('api_key.txt', 'r') as f:
openai.api_key = f.read().strip()
file_name = 'result_bamboogle/bamboogle_{}.eng{}.sample{}.seed{}.temp{}.{}.jsonl'.format(prompt_name, args.eng, NUM_TEST, args.seed, args.temp, args.suffix)
writer = jsonlines.open(file_name, mode='w')
count = 0
for (question, answer) in qa_pairs_test:
count += 1
print('currently', prompt_name, '#', count)
result = dict()
result['question'] = question
result['answer'] = answer
max_tokens = args.max_tokens
if (prompt_name == 'standard'):
max_tokens = 30
result[('ans_' + prompt_name)] = get_answer_from_gpt(prompt, question, eng=args.eng, max_tokens=max_tokens, temperature=args.temp)
writer.write(result)
writer.write(prompt)
writer.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.