code stringlengths 101 5.91M |
|---|
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
model = get_model(args)
patch_size = model.encoder.patch_embed.patch_size
print(('Patch size = %s' % str(patch_size)))
args.window_size = ((args.num_frames // 2), (args.input_size // patch_size[0]), (args.input_size // patch_size[1]))
args.patch_size = patch_size
dataset_train = build_pretraining_dataset(args)
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_rank = global_rank
num_training_steps_per_epoch = ((len(dataset_train) // args.batch_size) // num_tasks)
sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True)
print(('Sampler_train = %s' % str(sampler_train)))
if ((global_rank == 0) and (args.log_dir is not None)):
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, worker_init_fn=utils.seed_worker)
model.to(device)
model_without_ddp = model
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
print(('Model = %s' % str(model_without_ddp)))
print('number of params: {} M'.format((n_parameters / 1000000.0)))
total_batch_size = (args.batch_size * utils.get_world_size())
args.lr = ((args.lr * total_batch_size) / 256)
args.min_lr = ((args.min_lr * total_batch_size) / 256)
args.warmup_lr = ((args.warmup_lr * total_batch_size) / 256)
print(('LR = %.8f' % args.lr))
print(('Batch size = %d' % total_batch_size))
print(('Number of training steps = %d' % num_training_steps_per_epoch))
print(('Number of training examples per epoch = %d' % (total_batch_size * num_training_steps_per_epoch)))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
print('Use step level LR & WD scheduler!')
lr_schedule_values = utils.cosine_scheduler(args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps)
if (args.weight_decay_end is None):
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch)
print(('Max WD = %.7f, Min WD = %.7f' % (max(wd_schedule_values), min(wd_schedule_values))))
utils.auto_load_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
torch.cuda.empty_cache()
print(f'Start training for {args.epochs} epochs')
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if (log_writer is not None):
log_writer.set_step((epoch * num_training_steps_per_epoch))
train_stats = train_one_epoch(model, data_loader_train, optimizer, device, epoch, loss_scaler, args.clip_grad, log_writer=log_writer, start_steps=(epoch * num_training_steps_per_epoch), lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values, patch_size=patch_size[0], normlize_target=args.normlize_target, num_samples=args.num_samples, use_frame_diff_as_target=args.use_frame_diff_as_target, frame_diff_group_size=args.frame_diff_group_size, target_diff_weight=args.target_diff_weight)
if args.output_dir:
if ((((epoch + 1) % args.save_ckpt_freq) == 0) or ((epoch + 1) == args.epochs)):
utils.save_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch)
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters}
if (args.output_dir and utils.is_main_process()):
if (log_writer is not None):
log_writer.flush()
with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f:
f.write((json.dumps(log_stats) + '\n'))
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str)) |
class ResnetV2(tf.keras.Model):
def __init__(self, num_units=(3, 4, 6, 3), num_outputs=1000, filters_factor=4, strides=(1, 2, 2, 2), **kwargs):
super(ResnetV2, self).__init__(**kwargs)
num_blocks = len(num_units)
num_filters = tuple((((16 * filters_factor) * (2 ** b)) for b in range(num_blocks)))
self._root = self._create_root_block(num_filters=num_filters[0])
self._blocks = []
for (b, (f, u, s)) in enumerate(zip(num_filters, num_units, strides), 1):
n = 'block{}'.format(b)
self._blocks.append(self._create_block(num_units=u, num_filters=f, stride=s, name=n))
self._pre_head = [normalization.GroupNormalization(name='group_norm'), ReLU(), tf.keras.layers.GlobalAveragePooling2D()]
self._head = None
if num_outputs:
self._head = tf.keras.layers.Dense(units=num_outputs, use_bias=True, kernel_initializer='zeros', trainable=self.trainable, name='head/dense')
def _create_root_block(self, num_filters, conv_size=7, conv_stride=2, pool_size=3, pool_stride=2):
layers = [PaddingFromKernelSize(conv_size), StandardizedConv2D(filters=num_filters, kernel_size=conv_size, strides=conv_stride, trainable=self.trainable, use_bias=False, name='standardized_conv2d'), PaddingFromKernelSize(pool_size), tf.keras.layers.MaxPool2D(pool_size=pool_size, strides=pool_stride, padding='valid')]
return tf.keras.Sequential(layers, name='root_block')
def _create_block(self, num_units, num_filters, stride, name):
layers = []
for i in range(1, (num_units + 1)):
layers.append(BottleneckV2Unit(num_filters=num_filters, stride=(stride if (i == 1) else 1), name=('unit%02d' % i)))
return tf.keras.Sequential(layers, name=name)
def compute_output_shape(self, input_shape):
current_shape = self._root.compute_output_shape(input_shape)
for block in self._blocks:
current_shape = block.compute_output_shape(current_shape)
for layer in self._pre_head:
current_shape = layer.compute_output_shape(current_shape)
if (self._head is not None):
(batch_size, features) = current_shape.as_list()
current_shape = (batch_size, 1, 1, features)
current_shape = self._head.compute_output_shape(current_shape).as_list()
current_shape = (current_shape[0], current_shape[3])
return tf.TensorShape(current_shape)
def call(self, x):
x = self._root(x)
for block in self._blocks:
x = block(x)
for layer in self._pre_head:
x = layer(x)
if (self._head is not None):
x = self._head(x)
return x |
class MixedIterator(object):
def __init__(self, files, batch_size=10000):
self.files = files
self.types = set(map((lambda x: x['text_type']), files))
self.batch_size = batch_size
def __iter__(self):
iterators = list(map((lambda x: SingleFileBatchIterator(x, self.types, self.batch_size)), self.files))
while True:
if (len(iterators) == 0):
break
for iterator in iterators:
it_batch = iterator.get_next_batch()
if (not it_batch):
iterators.remove(iterator)
continue
for line in it_batch:
(yield line) |
def oneHotVector(num, domain, vector):
number_of_options = 6
if (domain != 'train'):
idx = domains.index(domain)
if (num == 0):
vector[(idx * 6):((idx * 6) + 6)] = np.array([1, 0, 0, 0, 0, 0])
elif (num == 1):
vector[(idx * 6):((idx * 6) + 6)] = np.array([0, 1, 0, 0, 0, 0])
elif (num == 2):
vector[(idx * 6):((idx * 6) + 6)] = np.array([0, 0, 1, 0, 0, 0])
elif (num == 3):
vector[(idx * 6):((idx * 6) + 6)] = np.array([0, 0, 0, 1, 0, 0])
elif (num == 4):
vector[(idx * 6):((idx * 6) + 6)] = np.array([0, 0, 0, 0, 1, 0])
elif (num >= 5):
vector[(idx * 6):((idx * 6) + 6)] = np.array([0, 0, 0, 0, 0, 1])
else:
idx = domains.index(domain)
if (num == 0):
vector[(idx * 6):((idx * 6) + 6)] = np.array([1, 0, 0, 0, 0, 0])
elif (num <= 2):
vector[(idx * 6):((idx * 6) + 6)] = np.array([0, 1, 0, 0, 0, 0])
elif (num <= 5):
vector[(idx * 6):((idx * 6) + 6)] = np.array([0, 0, 1, 0, 0, 0])
elif (num <= 10):
vector[(idx * 6):((idx * 6) + 6)] = np.array([0, 0, 0, 1, 0, 0])
elif (num <= 40):
vector[(idx * 6):((idx * 6) + 6)] = np.array([0, 0, 0, 0, 1, 0])
elif (num > 40):
vector[(idx * 6):((idx * 6) + 6)] = np.array([0, 0, 0, 0, 0, 1])
return vector |
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
cls.add_method('Cleanup', 'void', [], is_static=True)
return |
_numpy_output(non_zero=True, positive=True)
def test_augfloordiv(A: dace.int64[(5, 5)], B: dace.int64[(5, 5)]):
B //= A
return B |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, in_channels=1):
self.inplanes = 32
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=7, stride=1, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 32, layers[0])
self.layer2 = self._make_layer(block, 64, layers[1], stride=2)
self.layer3 = self._make_layer(block, 128, layers[2], stride=2)
self.layer4 = self._make_layer(block, 256, layers[3], stride=2)
self.avgpool = nn.AvgPool2d((8, 38), stride=1)
self.avg_bn = nn.BatchNorm1d(256)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def pop_path_info(environ, charset='utf-8', errors='replace'):
path = environ.get('PATH_INFO')
if (not path):
return None
script_name = environ.get('SCRIPT_NAME', '')
old_path = path
path = path.lstrip('/')
if (path != old_path):
script_name += ('/' * (len(old_path) - len(path)))
if ('/' not in path):
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = (script_name + path)
rv = wsgi_get_bytes(path)
else:
(segment, path) = path.split('/', 1)
environ['PATH_INFO'] = ('/' + path)
environ['SCRIPT_NAME'] = (script_name + segment)
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True) |
def three_squares(n):
n = ZZ(n)
if (n <= 0):
if (n == 0):
z = ZZ.zero()
return (z, z, z)
raise ValueError(('%s is not a sum of 3 squares' % n))
if (n.nbits() <= 32):
from sage.rings import sum_of_squares
return sum_of_squares.three_squares_pyx(n)
e = (n.valuation(2) // 2)
m = (ZZ.one() << e)
N = (n >> (2 * e))
(x, r) = N.sqrtrem()
if (not r):
z = ZZ.zero()
return (z, z, (x * m))
if ((N % 4) == 1):
if (x % 2):
x -= 1
while (x >= 0):
p = (N - (x * x))
if p.is_pseudoprime():
break
x -= 2
elif ((N % 4) == 2):
if ((x % 2) == 0):
x -= 1
while (x >= 0):
p = (N - (x * x))
if p.is_pseudoprime():
break
x -= 2
elif ((N % 8) == 3):
if ((x % 2) == 0):
x -= 1
while (x >= 0):
p = ((N - (x * x)) >> 1)
if p.is_pseudoprime():
break
x -= 2
else:
raise ValueError(('%s is not a sum of 3 squares' % n))
if (x < 0):
if (N > 10000):
from warnings import warn
warn(('Brute forcing sum of 3 squares for large N = %s' % N), RuntimeWarning)
x = N.isqrt()
while True:
try:
(a, b) = two_squares((N - (x * x)))
break
except ValueError:
x -= 1
assert (x >= 0)
if (x >= b):
return ((a * m), (b * m), (x * m))
elif (x >= a):
return ((a * m), (x * m), (b * m))
else:
return ((x * m), (a * m), (b * m)) |
def test_is_normalized(os_default, os_camera_full, os_structured_full, os_custom_keys_norm):
assert os_default.is_normalized()
assert os_custom_keys_norm.is_normalized()
assert (not os_camera_full.is_normalized())
assert (not os_structured_full.is_normalized()) |
class SAMG(nn.Module):
def __init__(self, in_channels, out_channels, rel_reduction=8, mid_reduction=1):
super(SAMG, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if ((in_channels == 3) or (in_channels == 6)):
self.rel_channels = 8
self.mid_channels = 16
else:
self.rel_channels = (in_channels // rel_reduction)
self.mid_channels = (in_channels // mid_reduction)
self.conv1 = nn.Conv2d(self.in_channels, self.rel_channels, kernel_size=1)
self.conv2 = nn.Conv2d(self.in_channels, self.rel_channels, kernel_size=1)
self.conv3 = nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1)
self.conv4 = nn.Conv2d(self.rel_channels, self.out_channels, kernel_size=1)
self.tanh = nn.Tanh()
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
def forward(self, x, A=None, alpha=1):
(x1, x2, x3) = (self.conv1(x).mean((- 2)), self.conv2(x).mean((- 2)), self.conv3(x))
x1 = self.tanh((x1.unsqueeze((- 1)) - x2.unsqueeze((- 2))))
x1 = ((self.conv4(x1) * alpha) + (A.unsqueeze(0).unsqueeze(0) if (A is not None) else 0))
x1 = torch.einsum('ncuv,nctv->nctu', x1, x3)
return x1 |
.experimental
def test_raises_fit(log, user_features, item_features, model):
with pytest.raises(ValueError, match='features for .*'):
model.fit(log.filter((sf.col('user_idx') != 0)), user_features.filter((sf.col('user_idx') != 1)), item_features) |
def split_train_dev_test(data):
(g2i, i2g) = ({'m': 0, 'f': 1}, {1: 'f', 0: 'm'})
all_profs = list(set([d['raw_title'].lower() for d in data]))
p2i = {p: i for (i, p) in enumerate(sorted(all_profs))}
i2p = {i: p for (i, p) in enumerate(sorted(all_profs))}
all_data = []
for entry in data:
(gender, prof) = (entry['gender'].lower(), entry['raw_title'].lower())
(raw, start_index) = (entry['raw'], entry['start_pos'])
all_data.append({'g': g2i[gender], 'p': p2i[prof], 'text': raw, 'start': start_index})
(train_dev, test) = sklearn.model_selection.train_test_split(all_data, test_size=0.2, random_state=0)
(train, dev) = sklearn.model_selection.train_test_split(train_dev, test_size=0.3, random_state=0)
print('Train size: {}; Dev size: {}; Test size: {}'.format(len(train), len(dev), len(test)))
return ((train, dev, test), (g2i, i2g, p2i, i2p)) |
.environment
class ONNXRuntimeCUDA():
cmake_minimum_version = None
cmake_packages = []
cmake_variables = {}
cmake_compile_flags = []
cmake_link_flags = []
cmake_files = []
state_fields = ['OrtMemoryInfo* ort_cuda_mem_info;', 'OrtMemoryInfo* ort_cuda_pinned_mem_info;']
dependencies = [ONNXRuntime]
cmake_libraries = []
cmake_includes = []
headers = {}
max_concurrent_streams = None
use_streams = False
def init_code(_):
_setup_env()
if (ONNXRuntimeCUDA.use_streams and (ONNXRuntimeCUDA.max_concurrent_streams == 0)):
raise ValueError(f'When ORT_USE_STREAMS is true, the environment requires a static number of max_concurrent_streams, got {ONNXRuntimeCUDA.max_concurrent_streams}')
if ONNXRuntimeCUDA.use_streams:
providers_setup_code = '\n'.join((f'''
{{
OrtCUDAProviderOptions options = {{
.device_id = 0,
.do_copy_in_default_stream = 0,
.user_compute_stream = __state->gpu_context->streams[{i}],
}};
__ort_check_status(__state->ort_api,
__state->ort_api->SessionOptionsAppendExecutionProvider_CUDA(__state->ort_session_options, &options));
}}
''' for i in range((ONNXRuntimeCUDA.max_concurrent_streams + 1))))
else:
assert (ONNXRuntimeCUDA.max_concurrent_streams == (- 1))
providers_setup_code = '\n {\n OrtCUDAProviderOptions options = {\n .device_id = 0,\n .has_user_compute_stream = 1,\n .user_compute_stream = nullptr,\n };\n __ort_check_status(__state->ort_api,\n __state->ort_api->SessionOptionsAppendExecutionProvider_CUDA(__state->ort_session_options, &options));\n }\n '
init_code = f'''
__ort_check_status(__state->ort_api, __state->ort_api->CreateMemoryInfo("Cuda",
/*allocator_type=*/OrtDeviceAllocator, /*device=*/0, /*mem_type=*/OrtMemTypeDefault, &__state->ort_cuda_mem_info));
__ort_check_status(__state->ort_api, __state->ort_api->CreateMemoryInfo("CudaPinned",
/*allocator_type=*/OrtDeviceAllocator, /*device=*/0, /*mem_type=*/OrtMemTypeCPU, &__state->ort_cuda_pinned_mem_info));
{providers_setup_code}
// overwrite the CPU ORT session with the CUDA session
__state->ort_api->ReleaseKernelSession(__state->ort_session);
__ort_check_status(__state->ort_api,
__state->ort_api->CreateKernelSession(__state->ort_session_options, &__state->ort_session, /*opset_version=*/12));
'''
return init_code
finalize_code = '\n __state->ort_api->ReleaseMemoryInfo(__state->ort_cuda_mem_info);\n __state->ort_api->ReleaseMemoryInfo(__state->ort_cuda_pinned_mem_info);\n ' |
def espnet_hubert_base_iter0(*args, refresh=False, **kwargs):
url = '
config_url = '
(ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh)
return espnet_hubert_custom(ckpt, config) |
class Tensor():
ID = 0
def __init__(self, shape, name: str=None, ttype='neuron', data=None, dtype: str='float32', is_const=False):
self.id = int(Tensor.ID)
self.shape = (shape if isinstance(shape, list) else [shape])
self.name = (('BMTensor' + str(self.id)) if (name is None) else name)
assert (ttype.lower() in ['neuron', 'coeff'])
self.ttype = ttype.lower()
assert (dtype.lower() in ['float32', 'float16', 'int32', 'uint32', 'int16', 'uint16', 'int8', 'uint8'])
self.dtype = dtype.lower()
self.buffer = data
self.is_const = is_const
self.is_quantized: bool = False
self.quantization()
Tensor.ID += 1
def quantization(self, scale: Union[(float, List[float])]=None, zero_point: Union[(int, List[int])]=None):
if (self.is_quantized is False):
self.is_quantized = ((scale is not None) or (zero_point is not None))
self.scale = scale
self.zero_point = zero_point
else:
if (self.scale is None):
self.scale = scale
elif (scale is not None):
assert (self.scale == scale)
if (self.zero_point is None):
self.zero_point = zero_point
elif (zero_point is not None):
assert (self.zero_point == zero_point)
def __repr__(self):
s = 'tensor (\n{modstr}\n)'
modstr = [self.id, self.name, self.shape, self.ttype, self.dtype, self.buffer]
if self.is_quantized:
modstr += [self.scale, self.zero_point]
return s.format(modstr=_indent(modstr, 2)) |
.experimental
.parametrize('batch_size', BATCH_SIZES)
def test_actor_get_action(ddpg_actor_param, batch_size):
(actor, param) = ddpg_actor_param
user_num = param['user_num']
batch_size = min(batch_size, user_num)
item_num = param['item_num']
items = torch.tensor(range(item_num)).repeat((batch_size, 1))
action = torch.randint(high=item_num, size=(batch_size,))
action_emb = actor.state_repr.item_embeddings(action)
discrete_actions = actor.get_action(action_emb, items, torch.ones_like(items))
assert (action == discrete_actions).prod() |
class FeedForward(nn.Module):
def __init__(self, dim, mult=4, dropout=0.0):
super().__init__()
self.net = nn.Sequential(nn.Linear(dim, ((dim * mult) * 2)), GEGLU(), nn.Dropout(dropout), nn.Linear((dim * mult), dim))
def forward(self, x, **kwargs):
return self.net(x) |
class DataLoaderTest(object):
def __init__(self, data_path, tokenizer, args, cuda=True, batch_size=64):
self.cuda = cuda
self.batch_size = batch_size
self.tokenizer = tokenizer
self.max_len = args.max_len
self.evi_num = args.evi_num
self.threshold = args.threshold
self.data_path = data_path
(inputs, ids, evi_list) = self.read_file(data_path)
self.inputs = inputs
self.ids = ids
self.evi_list = evi_list
self.total_num = len(inputs)
self.total_step = np.ceil(((self.total_num * 1.0) / batch_size))
self.step = 0
def process_sent(self, sentence):
sentence = re.sub(' \\-LSB\\-.*?\\-RSB\\-', '', sentence)
sentence = re.sub('\\-LRB\\- \\-RRB\\- ', '', sentence)
sentence = re.sub(' -LRB-', ' ( ', sentence)
sentence = re.sub('-RRB-', ' )', sentence)
sentence = re.sub('--', '-', sentence)
sentence = re.sub('``', '"', sentence)
sentence = re.sub("''", '"', sentence)
return sentence
def process_wiki_title(self, title):
title = re.sub('_', ' ', title)
title = re.sub(' -LRB-', ' ( ', title)
title = re.sub('-RRB-', ' )', title)
title = re.sub('-COLON-', ':', title)
return title
def read_file(self, data_path):
inputs = list()
ids = list()
evi_list = list()
with open(data_path) as fin:
for (step, line) in enumerate(fin):
instance = json.loads(line.strip())
claim = instance['claim']
id = instance['id']
for evidence in instance['evidence']:
ids.append(id)
inputs.append([self.process_sent(claim), self.process_sent(evidence[2])])
evi_list.append(evidence)
return (inputs, ids, evi_list)
def shuffle(self):
np.random.shuffle(self.examples)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __len__(self):
return self._n_batch
def next(self):
if (self.step < self.total_step):
inputs = self.inputs[(self.step * self.batch_size):((self.step + 1) * self.batch_size)]
ids = self.ids[(self.step * self.batch_size):((self.step + 1) * self.batch_size)]
evi_list = self.evi_list[(self.step * self.batch_size):((self.step + 1) * self.batch_size)]
(inp, msk, seg) = tok2int_list(inputs, self.tokenizer, self.max_len, (- 1))
inp_tensor_input = Variable(torch.LongTensor(inp))
msk_tensor_input = Variable(torch.LongTensor(msk))
seg_tensor_input = Variable(torch.LongTensor(seg))
if self.cuda:
inp_tensor_input = inp_tensor_input.cuda()
msk_tensor_input = msk_tensor_input.cuda()
seg_tensor_input = seg_tensor_input.cuda()
self.step += 1
return (inp_tensor_input, msk_tensor_input, seg_tensor_input, ids, evi_list)
else:
self.step = 0
raise StopIteration() |
_module()
class ShikraTextProcess(BaseTextProcessFunc):
def __call__(self, conv: Conversation, preprocessor: Dict[(str, Any)], mode: str, **tokenize_kwargs) -> Dict[(str, Any)]:
tokenizer = preprocessor['text']
assert isinstance(tokenizer, LlamaTokenizer), 'only work for LlamaTokenizer'
_truncation_size = tokenize_kwargs.pop('truncation_size', None)
_kwargs = {'return_tensors': 'pt'}
_kwargs.update(tokenize_kwargs)
if (conv.sep_style == SeparatorStyle.ADD_COLON_TWO):
if (mode in ['train']):
ret = self.tk_conv_colon_two_train(conv, tokenizer, **_kwargs)
else:
ret = self.tk_conv_colon_two_eval(conv, tokenizer, **_kwargs)
else:
raise ValueError(f'''unrecognized conv_style: {conv.sep_style}.
the conv is {conv}''')
if (_truncation_size is None):
return ret
if (len(ret['input_ids']) <= _truncation_size):
return ret
origin_len = len(ret['input_ids'])
ids_to_remove_num = (origin_len - _truncation_size)
ids_should_not_remove = list(map(tokenizer.convert_tokens_to_ids, (DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN)))
back_no_image = all(((ids not in ids_should_not_remove) for ids in ret['input_ids'][_truncation_size:]))
if back_no_image:
tgt_ids = list(range(_truncation_size))
else:
ids_to_remove = set()
for idx in range((origin_len - 1), (- 1), (- 1)):
if (ret['input_ids'][idx] not in ids_should_not_remove):
ids_to_remove.add(idx)
if (len(ids_to_remove) >= ids_to_remove_num):
break
tgt_ids = [_ for _ in range(origin_len) if (_ not in ids_to_remove)]
logger.warning(f'truncate sample size from {origin_len} to {len(tgt_ids)}.')
assert (len(tgt_ids) == _truncation_size), f"{len(tgt_ids)}, {_truncation_size}, {ret['input_ids'].tolist()}"
truncated_ret = {k: v[tgt_ids] for (k, v) in ret.items()}
return truncated_ret
def tk_conv_colon_two_train(self, conv, tokenizer, **kwargs):
conversation = conv.get_prompt()
input_ids = tokenizer([conversation], **kwargs).input_ids[0]
target = copy.deepcopy(input_ids)
assert (conv.sep_style == SeparatorStyle.ADD_COLON_TWO)
sep = ((conv.sep + conv.roles[1]) + ': ')
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
for (i, rou) in enumerate(rounds):
if (rou == ''):
break
parts = rou.split(sep)
if (len(parts) != 2):
break
parts[0] += sep
round_len = len(tokenizer(rou).input_ids)
instruction_len = (len(tokenizer(parts[0]).input_ids) - 2)
target[cur_len:(cur_len + instruction_len)] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if (cur_len < tokenizer.model_max_length):
if (cur_len != total_len):
target[:] = IGNORE_INDEX
warnings.warn(f'''WARNING: tokenization mismatch: {cur_len} vs. {total_len}. (ignored):
{conversation}''')
return dict(input_ids=input_ids, attention_mask=input_ids.ne(tokenizer.pad_token_id), labels=target)
def tk_conv_colon_two_eval(self, conv, tokenizer, **kwargs):
assert (len(conv.messages) >= 2)
target = conv.get_prompt()
conv.messages[(- 1)][(- 1)] = ''
conversation = conv.get_prompt()
input_ids = tokenizer([conversation], **kwargs).input_ids[0]
target = tokenizer([target], add_special_tokens=False, **kwargs).input_ids[0]
target[(target == tokenizer.pad_token_id)] = IGNORE_INDEX
return dict(input_ids=input_ids, attention_mask=input_ids.ne(tokenizer.pad_token_id), labels=target) |
class ExtractionThread(Thread):
def __init__(self, filename, parameters):
Thread.__init__(self)
self.filename = filename
self.parameters = parameters
job_semaphore.acquire()
def run(self):
print(('Processing "%s" ..' % self.filename), file=sys.stderr)
try:
index = cindex.Index(cindex.conf.lib.clang_createIndex(False, True))
tu = index.parse(self.filename, self.parameters)
extract(self.filename, tu.cursor, '')
finally:
job_semaphore.release() |
def load_system(source_dir):
pyproject = os.path.join(source_dir, 'pyproject.toml')
with open(pyproject) as f:
pyproject_data = toml.load(f)
return pyproject_data['build-system'] |
def crop_largest_square(image, aspect_ratio=1):
(width, height) = image.size
new_width = min(width, int((height * aspect_ratio)))
new_height = min(height, int((width / aspect_ratio)))
left = ((width - new_width) / 2)
top = ((height - new_height) / 2)
right = ((width + new_width) / 2)
bottom = ((height + new_height) / 2)
cropped_image = image.crop((left, top, right, bottom))
return cropped_image |
class Logger():
def __init__(self, name, trainer, validators=(), output_prefix=None, encoding='utf-8'):
self.name = name
self.trainer = trainer
self.validators = validators
self.output_prefix = output_prefix
self.encoding = encoding
def log(self, step=0):
if ((self.trainer is not None) or (len(self.validators) > 0)):
print('{0}'.format(self.name))
if (self.trainer is not None):
print(' - Training: {0:10.2f} ({1:.2f}s: {2:.2f}tok/s src, {3:.2f}tok/s trg; epoch {4} '.format(float(self.trainer.perplexity_per_word()), self.trainer.total_time(), self.trainer.words_per_second()[0], self.trainer.words_per_second()[1], self.trainer.corpus.epoch))
(print('sentstats(ds,ts,overlap): {0} )'.format(self.trainer.sent_stats())) if self.trainer.backbool else print('', end=''))
self.trainer.reset_stats()
sys.stdout.flush()
for (id, validator) in enumerate(self.validators):
t = time.time()
perplexity = validator.perplexity()
print(' - Validation: {0:10.2f} ({1:.2f}s)'.format(float(perplexity), (time.time() - t)))
if (self.output_prefix is not None):
f = open('{0}.{1}.{2}.txt'.format(self.output_prefix, id, step), mode='w', encoding=self.encoding, errors='surrogateescape')
for line in validator.translate():
print(line, file=f)
f.close()
sys.stdout.flush() |
_utils.test()
def test_assign_ann_over():
def func_ann_over():
my_int = ti.i32
d: my_int = 2
d: ti.f32 = 2.0
with pytest.raises(ti.TaichiCompilationError):
func_ann_over() |
class TraceNode(Node):
def __init__(self, trace, depth=0):
super().__init__(x=list(trace), depth=depth, feature_extract_fn=extract_cumul)
def expand(self, expansions=None):
del expansions
counter = ExpansionCounter.get_default()
counter.increment()
children = []
for i in range(len(self.src)):
new_trace = insert_dummy_packets(self.src, [i])
if new_trace:
node = TraceNode(new_trace[0], depth=(self.depth + 1))
children.append(node)
return children |
def exportable_test_case(constructor_mock, function_mock):
test_case = dtc.DefaultTestCase(ModuleTestCluster(0))
int_stmt = IntPrimitiveStatement(test_case, 5)
constructor_stmt = ConstructorStatement(test_case, constructor_mock, {'y': int_stmt.ret_val})
constructor_stmt.add_assertion(ass.ObjectAssertion(constructor_stmt.ret_val, 5))
float_stmt = FloatPrimitiveStatement(test_case, 42.23)
function_stmt = FunctionStatement(test_case, function_mock, {'z': float_stmt.ret_val})
function_stmt.add_assertion(ass.FloatAssertion(function_stmt.ret_val, 42.23))
test_case.add_statement(int_stmt)
test_case.add_statement(constructor_stmt)
test_case.add_statement(float_stmt)
test_case.add_statement(function_stmt)
return tcc.TestCaseChromosome(test_case) |
def _update_playable_dice(playable_dice: Array, played_dice_num: Array, dice: Array, action: Array) -> Array:
_n = played_dice_num
die_array = jnp.array(([(action % 6)] * 4), dtype=jnp.int32)
dice_indices: Array = jnp.array([0, 1, 2, 3], dtype=jnp.int32)
def _update_for_diff_dice(die: Array, idx: Array, playable_dice: Array):
return (((die == playable_dice[idx]) * (- 1)) + ((die != playable_dice[idx]) * playable_dice[idx]))
return (((dice[0] == dice[1]) * playable_dice.at[(3 - _n)].set((- 1))) + ((dice[0] != dice[1]) * jax.vmap(_update_for_diff_dice)(die_array, dice_indices, jnp.tile(playable_dice, (4, 1))).astype(jnp.int32))) |
def rundocs(filename=None, raise_on_error=True):
from numpy.compat import npy_load_module
import doctest
if (filename is None):
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
m = npy_load_module(name, filename)
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
msg = []
if raise_on_error:
out = (lambda s: msg.append(s))
else:
out = None
for test in tests:
runner.run(test, out=out)
if ((runner.failures > 0) and raise_on_error):
raise AssertionError(('Some doctests failed:\n%s' % '\n'.join(msg))) |
def last_boxed_only_string(string: str) -> Optional[str]:
idx = string.rfind('\\boxed')
if (idx < 0):
idx = string.rfind('\\fbox')
if (idx < 0):
return None
i = idx
right_brace_idx = None
num_left_braces_open = 0
while (i < len(string)):
if (string[i] == '{'):
num_left_braces_open += 1
if (string[i] == '}'):
num_left_braces_open -= 1
if (num_left_braces_open == 0):
right_brace_idx = i
break
i += 1
if (right_brace_idx is None):
retval = None
else:
retval = string[idx:(right_brace_idx + 1)]
return retval |
class GroupViTModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def train(train_loader, model, criterion, optimizer, epoch, opt):
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
for (idx, (images, ta, sa)) in enumerate(train_loader):
data_time.update((time.time() - end))
images = torch.cat([images[0], images[1]], dim=0)
if torch.cuda.is_available():
images = images.cuda(non_blocking=True)
ta = ta.cuda(non_blocking=True)
sa = sa.cuda(non_blocking=True)
bsz = ta.shape[0]
warmup_learning_rate(opt, epoch, idx, len(train_loader), optimizer)
features = model(images)
(f1, f2) = torch.split(features, [bsz, bsz], dim=0)
features = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)], dim=1)
loss = criterion(features, ta, sa, opt.group_norm, opt.method, epoch)
losses.update(loss.item(), bsz)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if (((idx + 1) % opt.print_freq) == 0):
print('Train: [{0}][{1}/{2}]\tBT {batch_time.val:.3f} ({batch_time.avg:.3f})\tDT {data_time.val:.3f} ({data_time.avg:.3f})\tloss {loss.val:.3f} ({loss.avg:.3f})'.format(epoch, (idx + 1), len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses))
sys.stdout.flush()
return losses.avg |
def plot_compare(input, output, reduce_x, reduce_y):
output_temp = np.repeat(output, reduce_x, axis=0)
output_temp = np.repeat(output_temp, reduce_y, axis=1)
(fig, (ax1, ax2)) = plt.subplots(1, 2, sharey=True)
ax1.imshow(input)
ax2.imshow(output_temp)
plt.show()
plt.clf() |
.parametrize('dtype', [ti.f32])
.parametrize('solver_type', ['LLT', 'LU'])
_utils.test(arch=ti.cuda)
def test_gpu_sparse_solver2(dtype, solver_type):
np_dtype = ti.lang.util.to_numpy_type(dtype)
n = 10
A = np.random.rand(n, n)
A_psd = (np.dot(A, A.transpose()) + np.eye(n)).astype(np_dtype)
Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=300, dtype=dtype)
b = ti.ndarray(dtype, shape=n)
def fill(Abuilder: ti.types.sparse_matrix_builder(), InputArray: ti.types.ndarray(), b: ti.types.ndarray()):
for (i, j) in ti.ndrange(n, n):
Abuilder[(i, j)] += InputArray[(i, j)]
for i in range(n):
b[i] = (i + 1)
fill(Abuilder, A_psd, b)
A = Abuilder.build()
solver = ti.linalg.SparseSolver(dtype=dtype, solver_type=solver_type)
solver.analyze_pattern(A)
solver.factorize(A)
x = solver.solve(b)
res = np.linalg.solve(A_psd, b.to_numpy())
for i in range(n):
assert (x[i] == test_utils.approx(res[i], rel=1.0)) |
def split_dataset(X, y, img_names, split=0.1):
(itr, ival, ite, trs, vals, tes) = ([], [], [], set(), set(), set())
for (i, name) in enumerate(img_names):
pid = int(name.split('_')[1])
if (pid in trs):
itr.append(i)
elif (pid in vals):
ival.append(i)
elif (pid in tes):
ite.append(i)
else:
rid = np.random.rand()
if (rid < 0.8):
itr.append(i)
trs.add(pid)
elif ((rid >= 0.8) and (rid < 0.9)):
ival.append(i)
vals.add(pid)
else:
ite.append(i)
tes.add(pid)
return (itr, ival, ite) |
def p_sizeof(s):
pos = s.position()
s.next()
s.expect('(')
if looking_at_expr(s):
operand = p_test(s)
node = ExprNodes.SizeofVarNode(pos, operand=operand)
else:
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, empty=1)
node = ExprNodes.SizeofTypeNode(pos, base_type=base_type, declarator=declarator)
s.expect(')')
return node |
class CoerceToPyTypeNode(CoercionNode):
type = py_object_type
target_type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if (not arg.type.create_to_py_utility_code(env)):
error(arg.pos, ("Cannot convert '%s' to Python object" % arg.type))
elif arg.type.is_complex:
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if (type is py_object_type):
if (arg.type.is_string or arg.type.is_cpp_string):
self.type = default_str_type(env)
elif (arg.type.is_pyunicode_ptr or arg.type.is_unicode_char):
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
self.target_type = self.type
elif (arg.type.is_string or arg.type.is_cpp_string):
if ((type not in (bytes_type, bytearray_type)) and (not env.directives['c_string_encoding'])):
error(arg.pos, ("default encoding required for conversion from '%s' to '%s'" % (arg.type, type)))
self.type = self.target_type = type
else:
self.target_type = type
gil_message = 'Converting to Python object'
def may_be_none(self):
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if ((arg_type == PyrexTypes.c_bint_type) or (arg_type.is_pyobject and (arg_type.name == 'bool'))):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln(('%s; %s' % (self.arg.type.to_py_call_code(self.arg.result(), self.result(), self.target_type), code.error_goto_if_null(self.result(), self.pos))))
code.put_gotref(self.py_result()) |
_grad()
def evaluate(model, dataloader, device):
print('Evaluating...')
model.eval()
metrics = Metrics(dataloader.dataset.n_classes, dataloader.dataset.ignore_label, device)
for (images, labels) in tqdm(dataloader):
images = images.to(device)
labels = labels.to(device)
preds = model(images).softmax(dim=1)
metrics.update(preds, labels)
(ious, miou) = metrics.compute_iou()
(acc, macc) = metrics.compute_pixel_acc()
(f1, mf1) = metrics.compute_f1()
return (acc, macc, f1, mf1, ious, miou) |
def main(args):
if ((not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train)):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
args.save_path = (('log/%s/%s/%s-%s/%s' % (args.dataset, args.model, args.hidden_dim, args.gamma, time.time())) if (args.save_path == None) else args.save_path)
writer = SummaryWriter(args.save_path)
set_logger(args)
dataset = LinkPropPredDataset(name='ogbl-biokg')
split_edge = dataset.get_edge_split()
(train_triples, valid_triples, test_triples) = (split_edge['train'], split_edge['valid'], split_edge['test'])
nrelation = (int(max(train_triples['relation'])) + 1)
entity_dict = dict()
cur_idx = 0
for key in dataset[0]['num_nodes_dict']:
entity_dict[key] = (cur_idx, (cur_idx + dataset[0]['num_nodes_dict'][key]))
cur_idx += dataset[0]['num_nodes_dict'][key]
nentity = sum(dataset[0]['num_nodes_dict'].values())
evaluator = Evaluator(name=args.dataset)
args.nentity = nentity
args.nrelation = nrelation
logging.info(('Model: %s' % args.model))
logging.info(('Dataset: %s' % args.dataset))
logging.info(('#entity: %d' % nentity))
logging.info(('#relation: %d' % nrelation))
logging.info(('#train: %d' % len(train_triples['head'])))
logging.info(('#valid: %d' % len(valid_triples['head'])))
logging.info(('#test: %d' % len(test_triples['head'])))
(train_count, train_true_head, train_true_tail) = (defaultdict((lambda : 4)), defaultdict(list), defaultdict(list))
for i in tqdm(range(len(train_triples['head']))):
(head, relation, tail) = (train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i])
(head_type, tail_type) = (train_triples['head_type'][i], train_triples['tail_type'][i])
train_count[(head, relation, head_type)] += 1
train_count[(tail, ((- relation) - 1), tail_type)] += 1
train_true_head[(relation, tail)].append(head)
train_true_tail[(head, relation)].append(tail)
kge_model = KGEModel(model_name=args.model, nentity=nentity, nrelation=nrelation, hidden_dim=args.hidden_dim, gamma=args.gamma, double_entity_embedding=args.double_entity_embedding, double_relation_embedding=args.double_relation_embedding, evaluator=evaluator)
logging.info('Model Parameter Configuration:')
for (name, param) in kge_model.named_parameters():
logging.info(('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad))))
if args.cuda:
kge_model = kge_model.cuda()
if args.init_checkpoint:
logging.info(('Loading checkpoint %s...' % args.init_checkpoint))
checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
entity_dict = checkpoint['entity_dict']
if args.do_train:
train_dataloader_head = DataLoader(TrainDataset(train_triples, nentity, nrelation, args.negative_sample_size, 'head-batch', train_count, train_true_head, train_true_tail, entity_dict), batch_size=args.batch_size, shuffle=True, num_workers=max(1, (args.cpu_num // 2)), collate_fn=TrainDataset.collate_fn)
train_dataloader_tail = DataLoader(TrainDataset(train_triples, nentity, nrelation, args.negative_sample_size, 'tail-batch', train_count, train_true_head, train_true_tail, entity_dict), batch_size=args.batch_size, shuffle=True, num_workers=max(1, (args.cpu_num // 2)), collate_fn=TrainDataset.collate_fn)
train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)
current_learning_rate = args.learning_rate
optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), kge_model.parameters()), lr=current_learning_rate)
if args.warm_up_steps:
warm_up_steps = args.warm_up_steps
else:
warm_up_steps = (args.max_steps // 2)
if args.init_checkpoint:
init_step = checkpoint['step']
kge_model.load_state_dict(checkpoint['model_state_dict'])
if args.do_train:
current_learning_rate = checkpoint['current_learning_rate']
warm_up_steps = checkpoint['warm_up_steps']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
else:
logging.info(('Ramdomly Initializing %s Model...' % args.model))
init_step = 0
step = init_step
logging.info('Start Training...')
logging.info(('init_step = %d' % init_step))
logging.info(('batch_size = %d' % args.batch_size))
logging.info(('negative_adversarial_sampling = %d' % args.negative_adversarial_sampling))
logging.info(('hidden_dim = %d' % args.hidden_dim))
logging.info(('gamma = %f' % args.gamma))
logging.info(('negative_adversarial_sampling = %s' % str(args.negative_adversarial_sampling)))
if args.negative_adversarial_sampling:
logging.info(('adversarial_temperature = %f' % args.adversarial_temperature))
if args.do_train:
logging.info(('learning_rate = %d' % current_learning_rate))
training_logs = []
for step in range(init_step, args.max_steps):
log = kge_model.train_step(kge_model, optimizer, train_iterator, args)
training_logs.append(log)
if (step >= warm_up_steps):
current_learning_rate = (current_learning_rate / 10)
logging.info(('Change learning_rate to %f at step %d' % (current_learning_rate, step)))
optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), kge_model.parameters()), lr=current_learning_rate)
warm_up_steps = (warm_up_steps * 3)
if (((step % args.save_checkpoint_steps) == 0) and (step > 0)):
save_variable_list = {'step': step, 'current_learning_rate': current_learning_rate, 'warm_up_steps': warm_up_steps, 'entity_dict': entity_dict}
save_model(kge_model, optimizer, save_variable_list, args)
if ((step % args.log_steps) == 0):
metrics = {}
for metric in training_logs[0].keys():
metrics[metric] = (sum([log[metric] for log in training_logs]) / len(training_logs))
log_metrics('Train', step, metrics, writer)
training_logs = []
if (args.do_valid and ((step % args.valid_steps) == 0) and (step > 0)):
logging.info('Evaluating on Valid Dataset...')
metrics = kge_model.test_step(kge_model, valid_triples, args, entity_dict)
log_metrics('Valid', step, metrics, writer)
save_variable_list = {'step': step, 'current_learning_rate': current_learning_rate, 'warm_up_steps': warm_up_steps}
save_model(kge_model, optimizer, save_variable_list, args)
if args.do_valid:
logging.info('Evaluating on Valid Dataset...')
metrics = kge_model.test_step(kge_model, valid_triples, args, entity_dict)
log_metrics('Valid', step, metrics, writer)
if args.do_test:
logging.info('Evaluating on Test Dataset...')
metrics = kge_model.test_step(kge_model, test_triples, args, entity_dict)
log_metrics('Test', step, metrics, writer)
if args.evaluate_train:
logging.info('Evaluating on Training Dataset...')
small_train_triples = {}
indices = np.random.choice(len(train_triples['head']), args.ntriples_eval_train, replace=False)
for i in train_triples:
if ('type' in i):
small_train_triples[i] = [train_triples[i][x] for x in indices]
else:
small_train_triples[i] = train_triples[i][indices]
metrics = kge_model.test_step(kge_model, small_train_triples, args, entity_dict, random_sampling=True)
log_metrics('Train', step, metrics, writer) |
class MultiGraphics(WithEqualityById, SageObject):
def __init__(self, graphics_list):
self._glist = []
self._positions = []
for ins in graphics_list:
if isinstance(ins, Graphics):
self.append(ins)
else:
if ((not isinstance(ins, (list, tuple))) or (len(ins) != 2)):
raise TypeError('a pair (Graphics, position) is expected, not {}'.format(ins))
self.append(ins[0], pos=ins[1])
def _repr_(self):
return str(self)
def _rich_repr_(self, display_manager, **kwds):
types = display_manager.types
prefer_raster = (('.png', types.OutputImagePng), ('.jpg', types.OutputImageJpg), ('.gif', types.OutputImageGif))
prefer_vector = (('.svg', types.OutputImageSvg), ('.pdf', types.OutputImagePdf))
graphics = display_manager.preferences.graphics
if (graphics == 'disable'):
return
elif ((graphics == 'raster') or (graphics is None)):
preferred = (prefer_raster + prefer_vector)
elif (graphics == 'vector'):
preferred = (prefer_vector + prefer_raster)
else:
raise ValueError('unknown graphics output preference')
for (file_ext, output_container) in preferred:
if (output_container in display_manager.supported_output()):
return display_manager.graphics_from_save(self.save, kwds, file_ext, output_container)
def __getitem__(self, i):
return self._glist[i]
def __setitem__(self, i, g):
self._glist[i] = g
def __len__(self):
return len(self._glist)
def matplotlib(self, figure=None, figsize=None, **kwds):
from matplotlib.figure import Figure
glist = self._glist
if (len(glist) == 0):
glist = [Graphics()]
if (figure is None):
if (figsize is not None):
figsize = _parse_figsize(figsize)
figure = Figure(figsize=figsize)
global do_verify
do_verify = True
for (i, g) in enumerate(glist):
options = {}
options.update(Graphics.SHOW_OPTIONS)
options['legend_options'] = Graphics.LEGEND_OPTIONS
options.update(g._extra_kwds)
options.update(kwds)
options.pop('dpi', None)
options.pop('fig_tight', None)
transparent = options.pop('transparent', None)
subplot = self._add_subplot(figure, i)
g.matplotlib(figure=figure, sub=subplot, verify=do_verify, **options)
if transparent:
subplot.set_facecolor('none')
return figure
def save(self, filename, figsize=None, **kwds):
from matplotlib import rcParams
ext = os.path.splitext(filename)[1].lower()
if (ext in ['', '.sobj']):
SageObject.save(self, filename)
elif (ext not in ALLOWED_EXTENSIONS):
raise ValueError((("allowed file extensions for images are '" + "', '".join(ALLOWED_EXTENSIONS)) + "'!"))
else:
rc_backup = (rcParams['ps.useafm'], rcParams['pdf.use14corefonts'], rcParams['text.usetex'])
figure = self.matplotlib(figsize=figsize, **kwds)
transparent = kwds.get('transparent', Graphics.SHOW_OPTIONS['transparent'])
fig_tight = kwds.get('fig_tight', Graphics.SHOW_OPTIONS['fig_tight'])
dpi = kwds.get('dpi', Graphics.SHOW_OPTIONS['dpi'])
if (ext == '.pgf'):
from sage.features.latex import xelatex, pdflatex, lualatex
latex_implementations = []
if xelatex().is_present():
latex_implementations.append('xelatex')
if pdflatex().is_present():
latex_implementations.append('pdflatex')
if lualatex().is_present():
latex_implementations.append('lualatex')
if (not latex_implementations):
raise ValueError('Matplotlib requires either xelatex, lualatex, or pdflatex.')
if (latex_implementations[0] == 'pdflatex'):
pgf_options = {'pgf.texsystem': 'pdflatex', 'pgf.preamble': ['\\usepackage[utf8x]{inputenc}', '\\usepackage[T1]{fontenc}']}
else:
pgf_options = {'pgf.texsystem': latex_implementations[0]}
rcParams.update(pgf_options)
from matplotlib.backends.backend_pgf import FigureCanvasPgf
figure.set_canvas(FigureCanvasPgf(figure))
else:
from matplotlib.backends.backend_agg import FigureCanvasAgg
figure.set_canvas(FigureCanvasAgg(figure))
if isinstance(self, GraphicsArray):
figure.tight_layout()
opts = dict(dpi=dpi, transparent=transparent)
if (fig_tight is True):
opts['bbox_inches'] = 'tight'
figure.savefig(filename, **opts)
(rcParams['ps.useafm'], rcParams['pdf.use14corefonts'], rcParams['text.usetex']) = rc_backup
def save_image(self, filename=None, *args, **kwds):
self.save(filename, *args, **kwds)
def _latex_(self, **kwds):
tmpfilename = tmp_filename(ext='.pgf')
self.save(filename=tmpfilename, **kwds)
with open(tmpfilename, 'r') as tmpfile:
latex_list = tmpfile.readlines()
return ''.join(latex_list)
def show(self, **kwds):
from sage.repl.rich_output import get_display_manager
dm = get_display_manager()
dm.display_immediately(self, **kwds)
def plot(self):
return self
def inset(self, graphics, pos=None, fontsize=None):
if (pos is None):
pos = (0.7, 0.7, 0.2, 0.2)
if (fontsize is not None):
graphics._extra_kwds['fontsize'] = fontsize
elif ('fontsize' not in graphics._extra_kwds):
graphics._extra_kwds['fontsize'] = 6
current = []
for (i, g) in enumerate(self._glist):
current.append((g, self.position(i)))
resu = MultiGraphics(current)
resu.append(graphics, pos=pos)
return resu
def __str__(self):
n = len(self._glist)
if (n <= 1):
return 'Multigraphics with {} element'.format(n)
return 'Multigraphics with {} elements'.format(n)
def _add_subplot(self, figure, index, **options):
return figure.add_axes(self._positions[index], label=str(index), **options)
def position(self, index):
return self._positions[index]
def append(self, graphics, pos=None):
from matplotlib import rcParams
if (not isinstance(graphics, Graphics)):
raise TypeError('a Graphics object is expected, not {}'.format(graphics))
if (pos is None):
left = rcParams['figure.subplot.left']
bottom = rcParams['figure.subplot.bottom']
width = (rcParams['figure.subplot.right'] - left)
height = (rcParams['figure.subplot.top'] - bottom)
pos = (left, bottom, width, height)
elif ((not isinstance(pos, (list, tuple))) or (len(pos) != 4)):
raise TypeError('pos must be a 4-tuple, not {}'.format(pos))
pos = tuple((float(p) for p in pos))
self._glist.append(graphics)
self._positions.append(pos) |
def data_cleaning(words):
words = words.replace('', '00000000')
words = re.sub("[^'A-Za-z0-9oOaAuU]+", ' ', words).upper()
words = words.replace("'", ' ')
words = words.replace('', ' ')
words = words.replace('0000SS0000', '')
return words |
def test_initialize_example_background_knowledge_1():
(train, _) = load_toy_cancer()
_bk = Background(modes=train.modes)
assert (_bk.modes == train.modes)
assert (not _bk.line_search)
assert (not _bk.recursion)
_capture = str(_bk)
assert ('setParam: nodeSize=2.' in _capture)
assert ('setParam: maxTreeDepth=3.' in _capture)
assert ('setParam: numOfCycles=100.' in _capture)
assert ('setParam: numOfClauses=100.' in _capture)
assert ('friends(+Person,-Person).' in _capture)
assert ('friends(-Person,+Person).' in _capture)
assert ('smokes(+Person).' in _capture)
assert ('cancer(+Person).' in _capture) |
def generate_ld_preload(scorep_config):
(_, preload, _) = scorep.helper.call(((['scorep-config'] + scorep_config) + ['--preload-libs']))
return preload.strip() |
def _maybe_wrap_suffix(suffix, indent, tensor_str):
suffix_len = len(suffix)
last_line_len = ((len(tensor_str) - tensor_str.rfind('\n')) + 1)
if ((suffix_len > 2) and ((last_line_len + suffix_len) > PRINT_OPTS.linewidth)):
return ((',\n' + (' ' * indent)) + suffix[2:])
return suffix |
def obtain_model(config, extra_path=None):
if (config.dataset == 'cifar'):
return get_cifar_models(config, extra_path)
elif (config.dataset == 'imagenet'):
return get_imagenet_models(config)
else:
raise ValueError('invalid dataset in the model config : {:}'.format(config)) |
def _determine_cutout_reachability(ct: SDFG, sdfg: SDFG, in_translation: Dict[(Any, Any)], out_translation: Dict[(Any, Any)], state_reach: Dict[(SDFGState, Set[SDFGState])]=None) -> Tuple[(Set[SDFGState], Set[SDFGState])]:
if (state_reach is None):
original_sdfg_id = out_translation[ct.sdfg_id]
state_reachability_dict = StateReachability().apply_pass(sdfg.sdfg_list[original_sdfg_id], None)
state_reach = state_reachability_dict[original_sdfg_id]
inverse_cutout_reach: Set[SDFGState] = set()
cutout_reach: Set[SDFGState] = set()
cutout_states = set(ct.states())
for state in cutout_states:
original_state = out_translation[state]
for (k, v) in state_reach.items():
if ((k not in in_translation) or (in_translation[k] not in cutout_states)):
if ((original_state is not None) and (original_state in v)):
inverse_cutout_reach.add(k)
for rstate in state_reach[original_state]:
if ((rstate not in in_translation) or (in_translation[rstate] not in cutout_states)):
cutout_reach.add(rstate)
return (inverse_cutout_reach, cutout_reach) |
def move_shenzhen(root_folder, destination_root):
RE_SEX_AGE = re.compile('(?P<sex>.*al)[e]?[\\s|,]*(?P<age>[0-9]+)[yr]?[s]?')
RE_FNAME = re.compile('CHNCXR\\_(?P<idx>[0-9]+)\\_(?P<lbl>[0|1])\\.txt')
root_path = Path(root_folder)
os.makedirs(destination_root, exist_ok=True)
key_words = ['upper', 'lower', 'left', 'right', 'bilateral', 'atb', 'ptb', 'stb']
parsed = []
for (i, f) in enumerate(os.listdir((root_path / 'ClinicalReadings'))):
f_result = RE_FNAME.search(f)
pid = f_result.groupdict()['idx']
lbl = f_result.groupdict()['lbl']
with open(((root_path / 'ClinicalReadings') / f), 'r') as txt:
lines = txt.readlines()
for l in lines:
result = RE_SEX_AGE.search(l)
if result:
age = int(result.groupdict()['age'])
sex = result.groupdict()['sex'].lower()
data = {'age': age, 'sex': sex, 'index': i, 'patient': pid, 'healthy': 1, 'fname': f.split('.')[0]}
symp = {k: 0 for k in key_words}
data.update(symp)
else:
l = l.strip().lower()
if (len(l) > 0):
if ('normal' in l):
assert (lbl == '0')
else:
if (lbl != '1'):
import pdb
pdb.set_trace()
data['healthy'] = 0
for k in key_words:
if (k in l):
data[k] = 1
if ('pleuritis' in l):
data['PTB'] = 1
parsed.append(data)
df = pd.DataFrame.from_dict(parsed)
healthy = df[(df['healthy'] == 1)]
disease = df[(df['healthy'] == 0)]
dst_path = (Path(destination_root) / 'healthy')
os.makedirs(str(dst_path), exist_ok=True)
for (i, row) in tqdm(healthy.iterrows()):
fname = ((root_path / 'CXR_png') / f"{row['fname']}.png")
dst_fname = (dst_path / f"{row['fname']}.png")
shutil.copy(str((root_path / fname)), dst_fname)
dst_path = (Path(destination_root) / 'disease')
os.makedirs(str(dst_path), exist_ok=True)
for (i, row) in tqdm(disease.iterrows()):
fname = ((root_path / 'CXR_png') / f"{row['fname']}.png")
dst_fname = (dst_path / f"{row['fname']}.png")
shutil.copy(str((root_path / fname)), dst_fname) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
wandb.log({'blurb_task': data_args.blurb_task, 'model_name': model_args.model_name_or_path, 'per_device_train_batch_size': training_args.per_device_train_batch_size, 'learning_rate': training_args.learning_rate, 'weight_decay': training_args.weight_decay, 'adam_beta1': training_args.adam_beta1, 'adam_beta2': training_args.adam_beta2, 'adam_epsilon': training_args.adam_epsilon, 'label_smoothing_factor': training_args.label_smoothing_factor, 'num_train_epochs': training_args.num_train_epochs, 'warmup_steps': training_args.warmup_steps, 'seed': training_args.seed})
send_example_telemetry('run_glue', model_args, data_args, framework='flax')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.setLevel((logging.INFO if (jax.process_index() == 0) else logging.ERROR))
if (jax.process_index() == 0):
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
if training_args.push_to_hub:
if (training_args.hub_model_id is None):
repo_name = get_full_repo_name(Path(training_args.output_dir).absolute().name, token=training_args.hub_token)
else:
repo_name = training_args.hub_model_id
create_repo(repo_name, exist_ok=True, token=training_args.hub_token)
repo = Repository(training_args.output_dir, clone_from=repo_name, token=training_args.hub_token)
if (data_args.task_name is not None):
raw_datasets = load_dataset('glue', data_args.task_name, use_auth_token=(True if model_args.use_auth_token else None))
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = (data_args.train_file if (data_args.train_file is not None) else data_args.valid_file).split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, use_auth_token=(True if model_args.use_auth_token else None))
if (data_args.task_name is not None):
is_regression = (data_args.task_name == 'stsb')
if (not is_regression):
label_list = raw_datasets['train'].features['label'].names
num_labels = len(label_list)
else:
num_labels = 1
else:
is_regression = (raw_datasets['train'].features['label'].dtype in ['float32', 'float64'])
if is_regression:
num_labels = 1
else:
label_list = raw_datasets['train'].unique('label')
label_list.sort()
num_labels = len(label_list)
config = AutoConfig.from_pretrained(model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, use_fast=(not model_args.use_slow_tokenizer), use_auth_token=(True if model_args.use_auth_token else None))
model = FlaxAutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, config=config, use_auth_token=(True if model_args.use_auth_token else None), from_pt=True)
if (data_args.task_name is not None):
(sentence1_key, sentence2_key) = task_to_keys[data_args.task_name]
else:
non_label_column_names = [name for name in raw_datasets['train'].column_names if (name != 'label')]
if (('sentence1' in non_label_column_names) and ('sentence2' in non_label_column_names)):
(sentence1_key, sentence2_key) = ('sentence1', 'sentence2')
elif ('sentence' in non_label_column_names):
(sentence1_key, sentence2_key) = ('sentence', None)
elif (len(non_label_column_names) >= 2):
(sentence1_key, sentence2_key) = non_label_column_names[:2]
else:
(sentence1_key, sentence2_key) = (non_label_column_names[0], None)
label_to_id = None
id_to_label = None
if ((model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id) and (data_args.task_name is not None) and (not is_regression)):
label_name_to_id = {k.lower(): v for (k, v) in model.config.label2id.items()}
if (list(sorted(label_name_to_id.keys())) == list(sorted(label_list))):
logger.info(f'The configuration of the model provided the following label correspondence: {label_name_to_id}. Using it!')
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.
Ignoring the model labels as a result.''')
elif ((data_args.task_name is None) and (is_regression == False)):
label_to_id = {v: i for (i, v) in enumerate(label_list)}
if (not is_regression):
id_to_label = {v: i for (v, i) in enumerate(label_to_id)}
if ('chemprot' in str(data_args.train_file).lower()):
id_to_label[0] = 'false'
if ('ddi' in str(data_args.train_file).lower()):
id_to_label[0] = 'DDI-false'
def preprocess_function(examples):
texts = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*texts, padding='max_length', max_length=data_args.max_seq_length, truncation=True)
if ('label' in examples):
if (label_to_id is not None):
result['labels'] = [label_to_id[l] for l in examples['label']]
else:
result['labels'] = examples['label']
return result
processed_datasets = raw_datasets.map(preprocess_function, batched=True, remove_columns=raw_datasets['train'].column_names)
train_dataset = processed_datasets['train']
eval_dataset = processed_datasets[('validation_matched' if (data_args.task_name == 'mnli') else 'validation')]
test_dataset = processed_datasets[('test_matched' if (data_args.task_name == 'mnli') else 'test')]
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
has_tensorboard = is_tensorboard_available()
if (has_tensorboard and (jax.process_index() == 0)):
try:
from flax.metrics.tensorboard import SummaryWriter
summary_writer = SummaryWriter(training_args.output_dir)
summary_writer.hparams({**training_args.to_dict(), **vars(model_args), **vars(data_args)})
except ImportError as ie:
has_tensorboard = False
logger.warning(f'Unable to display metrics through TensorBoard because some package are not installed: {ie}')
else:
logger.warning('Unable to display metrics through TensorBoard because the package is not installed: Please run pip install tensorboard to enable.')
def compute_metrics(preds, labels):
if (data_args.task_name is not None):
metric = evaluate.load('glue', data_args.task_name)
else:
metric = evaluate.load('accuracy')
if (data_args.metric_name == 'hoc'):
from utils_hoc import eval_hoc
labels = np.array(p.label_ids).astype(int)
preds = (np.array(preds) > 0).astype(int)
ids = eval_dataset['id']
return eval_hoc(labels.tolist(), preds.tolist(), list(ids))
if (data_args.task_name is not None):
result = metric.compute(predictions=preds, references=labels)
if (len(result) > 1):
result['combined_score'] = np.mean(list(result.values())).item()
return result
elif (data_args.metric_name == 'pearsonr'):
from scipy.stats import pearsonr as scipy_pearsonr
pearsonr = float(scipy_pearsonr(labels, preds)[0])
return {'pearsonr': pearsonr}
elif (data_args.metric_name == 'PRF1'):
TP = ((preds == labels) & (preds != 0)).astype(int).sum().item()
P_total = (preds != 0).astype(int).sum().item()
L_total = (labels != 0).astype(int).sum().item()
P = ((TP / P_total) if P_total else 0)
R = ((TP / L_total) if L_total else 0)
F1 = ((((2 * P) * R) / (P + R)) if (P + R) else 0)
return {'precision': P, 'recall': R, 'F1': F1}
elif is_regression:
return {'mse': ((preds - labels) ** 2).mean().item()}
else:
return {'accuracy': (preds == labels).astype(np.float32).mean().item()}
def write_train_metric(summary_writer, train_metrics, train_time, step):
summary_writer.scalar('train_time', train_time, step)
train_metrics = get_metrics(train_metrics)
for (key, vals) in train_metrics.items():
tag = f'train_{key}'
for (i, val) in enumerate(vals):
summary_writer.scalar(tag, val, (((step - len(vals)) + i) + 1))
def write_eval_metric(summary_writer, eval_metrics, step):
for (metric_name, value) in eval_metrics.items():
summary_writer.scalar(f'eval_{metric_name}', value, step)
num_epochs = int(training_args.num_train_epochs)
rng = jax.random.PRNGKey(training_args.seed)
dropout_rngs = jax.random.split(rng, jax.local_device_count())
train_batch_size = (int(training_args.per_device_train_batch_size) * jax.local_device_count())
per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
eval_batch_size = (per_device_eval_batch_size * jax.device_count())
learning_rate_fn = create_learning_rate_fn(len(train_dataset), train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate)
state = create_train_state(model, learning_rate_fn, is_regression, num_labels=num_labels, weight_decay=training_args.weight_decay)
def train_step(state: train_state.TrainState, batch: Dict[(str, Array)], dropout_rng: PRNGKey) -> Tuple[(train_state.TrainState, float)]:
(dropout_rng, new_dropout_rng) = jax.random.split(dropout_rng)
targets = batch.pop('labels')
def loss_fn(params):
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
loss = state.loss_fn(logits, targets)
return loss
grad_fn = jax.value_and_grad(loss_fn)
(loss, grad) = grad_fn(state.params)
grad = jax.lax.pmean(grad, 'batch')
new_state = state.apply_gradients(grads=grad)
metrics = jax.lax.pmean({'loss': loss, 'learning_rate': learning_rate_fn(state.step)}, axis_name='batch')
return (new_state, metrics, new_dropout_rng)
p_train_step = jax.pmap(train_step, axis_name='batch', donate_argnums=(0,))
def eval_step(state, batch):
logits = state.apply_fn(**batch, params=state.params, train=False)[0]
return state.logits_fn(logits)
p_eval_step = jax.pmap(eval_step, axis_name='batch')
if (data_args.task_name is not None):
metric = evaluate.load('glue', data_args.task_name)
else:
metric = evaluate.load('accuracy')
logger.info(f'===== Starting training ({num_epochs} epochs) =====')
train_time = 0
state = replicate(state)
steps_per_epoch = (len(train_dataset) // train_batch_size)
total_steps = (steps_per_epoch * num_epochs)
epochs = tqdm(range(num_epochs), desc=f'Epoch ... (0/{num_epochs})', position=0, leave=True)
for epoch in epochs:
train_start = time.time()
train_metrics = []
(rng, input_rng) = jax.random.split(rng)
train_loader = glue_train_data_collator(input_rng, train_dataset, train_batch_size)
for (step, batch) in enumerate(tqdm(train_loader, total=steps_per_epoch, desc='Training...', position=0, leave=True)):
(state, train_metric, dropout_rngs) = p_train_step(state, batch, dropout_rngs)
train_metrics.append(train_metric)
cur_step = ((epoch * steps_per_epoch) + (step + 1))
if (((cur_step % training_args.logging_steps) == 0) and (cur_step > 0)):
train_metric = unreplicate(train_metric)
train_time += (time.time() - train_start)
if (has_tensorboard and (jax.process_index() == 0)):
write_train_metric(summary_writer, train_metrics, train_time, cur_step)
epochs.write(f"Step... ({cur_step}/{total_steps} | Training Loss: {train_metric['loss']}, Learning Rate: {train_metric['learning_rate']})")
train_metrics = []
if ((((cur_step % training_args.eval_steps) == 0) or ((cur_step % steps_per_epoch) == 0)) and (cur_step > 0)):
eval_loader = glue_eval_data_collator(eval_dataset, eval_batch_size)
preds_list = []
labels_list = []
for batch in tqdm(eval_loader, total=math.ceil((len(eval_dataset) / eval_batch_size)), desc='Evaluating ...', position=0, leave=True):
labels = batch.pop('labels')
predictions = pad_shard_unpad(p_eval_step)(state, batch, min_device_batch=per_device_eval_batch_size)
for pred_example in predictions:
preds_list.append(pred_example)
for label_example in labels:
labels_list.append(label_example)
metric.add_batch(predictions=np.array(predictions), references=labels)
preds_list = np.array(preds_list)
labels_list = np.array(labels_list)
blurb_result = compute_metrics(preds_list, labels_list)
blurb_result = {('eval_' + str(key)): val for (key, val) in blurb_result.items()}
wandb.log(blurb_result)
eval_metric = metric.compute()
logger.info(f'{blurb_result}| Step... ({cur_step}/{total_steps} | Eval metrics: {eval_metric}) ')
if (has_tensorboard and (jax.process_index() == 0)):
write_eval_metric(summary_writer, eval_metric, cur_step)
if ((((cur_step % training_args.save_steps) == 0) and (cur_step > 0)) or (cur_step == total_steps)):
if (jax.process_index() == 0):
params = jax.device_get(unreplicate(state.params))
model.save_pretrained(training_args.output_dir, params=params)
tokenizer.save_pretrained(training_args.output_dir)
if training_args.push_to_hub:
repo.push_to_hub(commit_message=f'Saving weights and logs of step {cur_step}', blocking=False)
epochs.desc = f'Epoch ... {(epoch + 1)}/{num_epochs}'
if data_args.test_file:
eval_loader = glue_eval_data_collator(test_dataset, eval_batch_size)
preds_list = []
preds_dict = {}
labels_list = []
for batch in tqdm(eval_loader, total=math.ceil((len(test_dataset) / eval_batch_size)), desc='Evaluating on Test Data ...', position=0, leave=True):
labels = batch.pop('labels')
predictions = pad_shard_unpad(p_eval_step)(state, batch, min_device_batch=per_device_eval_batch_size)
for pred_example in predictions:
preds_list.append(pred_example)
for label_example in labels:
labels_list.append(label_example)
metric.add_batch(predictions=np.array(predictions), references=labels)
raw_ids = []
for row in raw_datasets['test']:
raw_ids.append(row['id'])
for (label_id, pred_value) in zip(raw_ids, preds_list):
if is_regression:
preds_dict[label_id] = str(pred_value)
else:
preds_dict[label_id] = str(id_to_label[pred_value])
if ('/' in model_args.model_name_or_path):
model_name_hf = model_args.model_name_or_path.split('/')[(- 1)]
else:
model_name_hf = model_args.model_name_or_path
final_entry = {'dataset_name': data_args.blurb_task, 'model_name': model_name_hf, 'predictions': preds_dict}
with open((((model_args.model_name_or_path.split('/')[(- 1)] + '_') + str(data_args.blurb_task)) + '.json'), 'w') as f:
json.dump(final_entry, f, indent=4, sort_keys=True)
preds_list = np.array(preds_list)
labels_list = np.array(labels_list)
blurb_result = compute_metrics(preds_list, labels_list)
blurb_result = {('test_' + str(key)): val for (key, val) in blurb_result.items()}
wandb.log(blurb_result)
eval_metric = metric.compute()
logger.info(f' test results : {blurb_result}')
if (jax.process_index() == 0):
eval_metric = {f'eval_{metric_name}': value for (metric_name, value) in eval_metric.items()}
path = os.path.join(training_args.output_dir, 'eval_results.json')
with open(path, 'w') as f:
json.dump(eval_metric, f, indent=4, sort_keys=True) |
def logP_benchmark(target: float) -> GoalDirectedBenchmark:
benchmark_name = f'logP (target: {target})'
objective = RdkitScoringFunction(descriptor=logP, score_modifier=GaussianModifier(mu=target, sigma=1))
specification = uniform_specification(1, 10, 100)
return GoalDirectedBenchmark(name=benchmark_name, objective=objective, contribution_specification=specification) |
def spline(x, y, n, yp0, ypn_1):
u = np.zeros((n - 1))
y2 = np.zeros(n)
if (yp0 > 9.9e+29):
y2[0] = 0.0
u[0] = 0.0
else:
y2[0] = (- 0.5)
u[0] = ((3.0 / (x[1] - x[0])) * (((y[1] - y[0]) / (x[1] - x[0])) - yp0))
for i in range(1, (n - 1)):
sig = ((x[i] - x[(i - 1)]) / (x[(i + 1)] - x[(i - 1)]))
p = ((sig * y2[(i - 1)]) + 2.0)
y2[i] = ((sig - 1.0) / p)
u[i] = (((y[(i + 1)] - y[i]) / (x[(i + 1)] - x[i])) - ((y[i] - y[(i - 1)]) / (x[i] - x[(i - 1)])))
u[i] = ((((6.0 * u[i]) / (x[(i + 1)] - x[(i - 1)])) - (sig * u[(i - 1)])) / p)
if (ypn_1 > 9.9e+29):
qn_1 = 0.0
un_1 = 0.0
else:
qn_1 = 0.5
un_1 = ((3.0 / (x[(n - 1)] - x[(n - 2)])) * (ypn_1 - ((y[(n - 1)] - y[(n - 2)]) / (x[(n - 1)] - x[(n - 2)]))))
y2[(n - 1)] = ((un_1 - (qn_1 * u[(n - 2)])) / ((qn_1 * y2[(n - 2)]) + 1.0))
for i in range((n - 2), (- 1), (- 1)):
y2[i] = ((y2[i] * y2[(i + 1)]) + u[i])
return y2 |
def get_execution_error_thresh():
try:
return float(os.environ['ERROR_THRESH'])
except KeyError:
return 0.01 |
.openapi_version('3.0')
.operations('create_user', 'get_user', 'update_user')
def test_step_override(testdir, app_schema, base_url):
testdir.make_test(f'''
schema.base_url = "{base_url}"
class APIWorkflow(schema.as_state_machine()):
def step(self, case, previous=None):
raise ValueError("ERROR FOUND!")
TestStateful = APIWorkflow.TestCase
TestStateful.settings = settings(
max_examples=1,
deadline=None,
derandomize=True,
suppress_health_check=list(HealthCheck),
)
''', schema=app_schema)
result = testdir.runpytest()
result.assert_outcomes(failed=1)
result.stdout.re_match_lines(['.+ValueError: ERROR FOUND!']) |
class TomlTz(tzinfo):
def __init__(self, toml_offset):
if (toml_offset == 'Z'):
self._raw_offset = '+00:00'
else:
self._raw_offset = toml_offset
self._sign = ((- 1) if (self._raw_offset[0] == '-') else 1)
self._hours = int(self._raw_offset[1:3])
self._minutes = int(self._raw_offset[4:6])
def tzname(self, dt):
return ('UTC' + self._raw_offset)
def utcoffset(self, dt):
return (self._sign * timedelta(hours=self._hours, minutes=self._minutes))
def dst(self, dt):
return timedelta(0) |
def create_train_and_eval_tmp_table(train_select, valid_select, datasource):
train_table = create_tmp_table_from_select(train_select, datasource)
valid_table = create_tmp_table_from_select(valid_select, datasource)
return (train_table, valid_table) |
def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size, num_workers, args):
(width, height) = (data_shape, data_shape)
batchify_fn = Tuple(*([Stack() for _ in range(6)] + [Pad(axis=0, pad_val=(- 1)) for _ in range(1)]))
if args.no_random_shape:
train_loader = gluon.data.DataLoader(train_dataset.transform(YOLO3DefaultTrainTransform(width, height, net, mixup=args.mixup)), batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
else:
transform_fns = [YOLO3DefaultTrainTransform((x * 32), (x * 32), net, mixup=args.mixup) for x in range(10, 20)]
train_loader = RandomTransformDataLoader(transform_fns, train_dataset, batch_size=batch_size, interval=10, last_batch='rollover', shuffle=True, batchify_fn=batchify_fn, num_workers=num_workers)
val_batchify_fn = Tuple(Stack(), Pad(pad_val=(- 1)))
val_loader = gluon.data.DataLoader(val_dataset.transform(YOLO3DefaultValTransform(width, height)), batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
return (train_loader, val_loader) |
class RegNetConvLayer(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int=3, stride: int=1, groups: int=1, activation: Optional[str]='relu'):
super().__init__()
self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=(kernel_size // 2), groups=groups, bias=False)
self.normalization = nn.BatchNorm2d(out_channels)
self.activation = (ACT2FN[activation] if (activation is not None) else nn.Identity())
def forward(self, hidden_state):
hidden_state = self.convolution(hidden_state)
hidden_state = self.normalization(hidden_state)
hidden_state = self.activation(hidden_state)
return hidden_state |
def test_CDF_TT2000_to_UTC_EPOCH16(lib):
tt =
epoch16 = (ctypes.c_double * 2)((- 1.0), (- 1.0))
res = lib.CDF_TT2000_to_UTC_EPOCH16(tt, epoch16)
print('Expect (.0, 1000.0)')
print('Actual ({}, {})'.format(epoch16[0], epoch16[1])) |
def _maybe_real(A, B, tol=None):
if (np.isrealobj(A) and np.iscomplexobj(B)):
if (tol is None):
tol = {0: (feps * 1000.0), 1: (eps * 1000000.0)}[_array_precision[B.dtype.char]]
if np.allclose(B.imag, 0.0, atol=tol):
B = B.real
return B |
_function
def barycentric_projection_matrix(n, angle=0):
from sage.matrix.constructor import matrix
from sage.misc.functional import sqrt
n = ZZ(n)
if (n == 0):
return matrix(QQ, 0, 1)
a = (1 / n)
b = sqrt((1 - (a ** 2)))
result = (b * barycentric_projection_matrix((n - 1)))
result = result.augment(vector(([0] * (n - 1))))
result = result.stack(matrix([(([a] * n) + [(- 1)])]))
assert sum(result.columns()).is_zero()
if (angle and (n == 2)):
from sage.functions.trig import sin
from sage.functions.trig import cos
rotation = matrix([[sin(angle), cos(angle)], [(- cos(angle)), sin(angle)]])
result = (rotation * result)
result.set_immutable()
return result |
def McGeeGraph(embedding=2):
from sage.graphs.generators.families import LCFGraph
g = LCFGraph(24, [12, 7, (- 7)], 8)
g.name('McGee graph')
if (embedding == 1):
return g
elif (embedding == 2):
o = [[7, 2, 13, 8, 19, 14, 1, 20], [5, 4, 11, 10, 17, 16, 23, 22], [3, 12, 9, 18, 15, 0, 21, 6]]
g._circle_embedding(o[0], radius=1.5)
g._circle_embedding(o[1], radius=3, shift=(- 0.5))
g._circle_embedding(o[2], radius=2.25, shift=0.5)
return g
else:
raise ValueError('the value of embedding must be 1 or 2') |
.register('boe')
class BagOfEmbeddingsEncoder(Seq2VecEncoder):
def __init__(self, embedding_dim: int, averaged: bool=False) -> None:
super(BagOfEmbeddingsEncoder, self).__init__()
self._embedding_dim = embedding_dim
self._averaged = averaged
def get_input_dim(self) -> int:
return self._embedding_dim
def get_output_dim(self) -> int:
return self._embedding_dim
def forward(self, tokens: torch.Tensor, mask: torch.Tensor=None):
if (mask is not None):
tokens = (tokens * mask.unsqueeze((- 1)).float())
summed = tokens.sum(1)
if self._averaged:
if (mask is not None):
lengths = get_lengths_from_binary_sequence_mask(mask)
length_mask = (lengths > 0)
lengths = torch.max(lengths, Variable(lengths.data.new().resize_(1).fill_(1)))
else:
lengths = Variable(tokens.data.new().resize_(1).fill_(tokens.size(1)), requires_grad=False)
length_mask = None
summed = (summed / lengths.unsqueeze((- 1)).float())
if (length_mask is not None):
summed = (summed * (length_mask > 0).float().unsqueeze((- 1)))
return summed
def from_params(cls, params: Params) -> 'BagOfEmbeddingsEncoder':
embedding_dim = params.pop('embedding_dim')
averaged = params.pop('averaged', default=None)
return cls(embedding_dim=embedding_dim, averaged=averaged) |
def test_maxpool1d_padding_same():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]:
return rf.max_pool1d(x, pool_size=3, padding='same', in_spatial_dim=in_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, out_spatial_dim) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=(batch_dim, out_spatial_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 7})
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 9}) |
class PoseEncoderModel(nn.Module):
def __init__(self, pose_dims: (int, int)=(137, 2), hidden_dim: int=128, encoder_depth=4, encoder_heads=2, encoder_dim_feedforward=2048, max_seq_size: int=1000, dropout=0.5):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.max_seq_size = max_seq_size
self.pose_dims = pose_dims
self.pose_dim = int(np.prod(pose_dims))
self.positional_embeddings = nn.Embedding(num_embeddings=max_seq_size, embedding_dim=hidden_dim)
self.pose_projection = nn.Linear(self.pose_dim, hidden_dim)
self.encoder_layer = nn.TransformerEncoderLayer(d_model=hidden_dim, nhead=encoder_heads, dim_feedforward=encoder_dim_feedforward, batch_first=True)
self.pose_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=encoder_depth)
def forward(self, pose, additional_sequence=None):
(batch_size, seq_length, _, _) = pose['data'].shape
pose_data = self.dropout(pose['data'])
flat_pose_data = pose_data.reshape(batch_size, seq_length, (- 1))
positions = torch.arange(0, seq_length, dtype=torch.int, device=pose_data.device)
positional_embedding = self.positional_embeddings(positions)
embedding = (self.pose_projection(flat_pose_data) + positional_embedding)
mask = pose['mask']
if (additional_sequence is not None):
embedding = torch.cat([embedding, additional_sequence['data']], dim=1)
mask = torch.cat([mask, additional_sequence['mask']], dim=1)
return self.pose_encoder(embedding, src_key_padding_mask=mask) |
def translate(language_code: str) -> PerturbationSpec:
return PerturbationSpec(class_name='helm.benchmark.augmentations.translate_perturbation.TranslatePerturbation', args={'language_code': language_code}) |
def re(R_est, R_gt):
assert (R_est.shape == R_gt.shape == (3, 3))
error_cos = (0.5 * (np.trace(R_est.dot(np.linalg.inv(R_gt))) - 1.0))
error_cos = min(1.0, max((- 1.0), error_cos))
error = math.acos(error_cos)
error = ((180.0 * error) / np.pi)
return error |
class Scale(Resize):
def __init__(self, *args, **kwargs):
warnings.warn(('The use of the transforms.Scale transform is deprecated, ' + 'please use transforms.Resize instead.'))
super(Scale, self).__init__(*args, **kwargs) |
class AdvLoss(nn.Module):
def __init__(self):
super(AdvLoss, self).__init__()
self.criterion = nn.L1Loss(size_average=True)
def forward(self, fake_feature, real_feature, v_loss):
fm_loss = 0
feat_weights = (10.0 / len(fake_feature))
for i in range((len(fake_feature) - 1)):
fm_loss += (feat_weights * self.criterion(fake_feature[i], real_feature[i].detach()))
return ((- v_loss) + fm_loss) |
def random_image(shape=(128, 128)):
img = gaussian_filter(np.random.normal(size=shape), (min(shape) / 20))
img = (img > np.percentile(img, 80))
img = label(img)
img[(img > 255)] = ((img[(img > 255)] % 254) + 1)
return img |
def prepare_librimix(datapath, savepath, n_spks=2, skip_prep=False, librimix_addnoise=False, fs=8000):
if skip_prep:
return
if ('Libri' in datapath):
if (n_spks == 2):
assert ('Libri2Mix' in datapath), 'Inconsistent number of speakers and datapath'
create_libri2mix_csv(datapath, savepath, addnoise=librimix_addnoise)
elif (n_spks == 3):
assert ('Libri3Mix' in datapath), 'Inconsistent number of speakers and datapath'
create_libri3mix_csv(datapath, savepath, addnoise=librimix_addnoise)
else:
raise ValueError('Unsupported Number of Speakers')
else:
raise ValueError('Unsupported Dataset') |
.unit
.cartographer
def test_img_layer_dict_to_str():
min_zoom = 0
max_zoom = 2
name = 'test'
layer_dict = dict(directory=(name + '/{z}/{y}/{x}.png'), name=name, min_zoom=min_zoom, max_zoom=(max_zoom + 5), max_native_zoom=max_zoom)
actual_str = c.img_layer_dict_to_str(layer_dict)
expected_str = ''.join([('const ' + layer_dict['name']), ((' = L.tileLayer("' + layer_dict['directory']) + '"'), ', { ', (('attribution:"' + "<a href=' + '", '), (('minZoom: ' + str(layer_dict['min_zoom'])) + ', '), (('maxZoom: ' + str(layer_dict['max_zoom'])) + ', '), (('maxNativeZoom: ' + str(layer_dict['max_native_zoom'])) + ' '), '});'])
assert (expected_str == actual_str) |
_model
def seresnet18(pretrained=False, **kwargs):
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnet18', pretrained, **model_args) |
class FunctionSignature(Signature):
def __init__(self, id_, return_type, arg_types, arg_pluralities=None, is_symmetric=False, name=None):
super(FunctionSignature, self).__init__(id_, return_type, len(arg_types), name=name)
self.arg_types = arg_types
if (arg_pluralities is None):
arg_pluralities = ((False,) * len(arg_types))
self.arg_pluralities = arg_pluralities
self.is_symmetric = is_symmetric
self.valence = len(arg_types)
def __repr__(self):
return self.name
def simple_repr(self):
return self.name
def serialized(self):
out = super(FunctionSignature, self).serialized()
out['arg_types'] = list(self.arg_types)
return out |
def write_csv_file(cmds_dict: dict, pmu_profile_info: dict, save_file: str):
save_content = []
for k in cmds_dict:
cur_core_cmds_list = cmds_dict[k]
cmds_count = len(cur_core_cmds_list)
pmu_gdma_count = len(pmu_profile_info[k][0])
pmu_tpu_count = len(pmu_profile_info[k][1])
pmu_sdma_count = len(pmu_profile_info[k][2])
pmu_gdma_inst_id: int = 0
pmu_tpu_inst_id: int = 0
pmu_sdma_inst_id: int = 0
gdma_inst_id: int = 0
tpu_inst_id: int = 0
sdma_inst_id: int = 0
for i in range(cmds_count):
entry = cur_core_cmds_list[i]
(type, cmd_info) = entry
item = []
if (type == EngineType.GDMA):
if (pmu_gdma_inst_id >= pmu_gdma_count):
item = [str(i), gdma_inst_id, 'GDMA', '', '', '', '', '', '']
item += cmd_info
else:
gdma_item = pmu_profile_info[k][0][pmu_gdma_inst_id]
inst_id = gdma_item[0]
thread_id = gdma_item[1]
if (str(gdma_inst_id) == inst_id):
item = [str(i), inst_id, 'GDMA', thread_id, gdma_item[2], gdma_item[3], gdma_item[4], gdma_item[5], '']
item += cmd_info
pmu_gdma_inst_id += 1
elif (gdma_inst_id < int(inst_id)):
item = [str(i), gdma_inst_id, 'GDMA', '', '', '', '', '', '']
item += cmd_info
else:
assert 0
save_content.append(item)
gdma_inst_id += 1
elif (type == EngineType.TPU):
if (pmu_tpu_inst_id >= pmu_tpu_count):
item = [str(i), tpu_inst_id, 'TPU', '', '', '', '', '', '']
item += cmd_info
else:
tpu_item = pmu_profile_info[k][1][pmu_tpu_inst_id]
inst_id = tpu_item[0]
thread_id = tpu_item[1]
if (str(tpu_inst_id) == inst_id):
item = [str(i), inst_id, 'TPU', thread_id, tpu_item[2], tpu_item[3], tpu_item[4], tpu_item[5], tpu_item[6]]
item += cmd_info
pmu_tpu_inst_id += 1
elif (tpu_inst_id < int(inst_id)):
item = [str(i), tpu_inst_id, 'TPU', '', '', '', '', '', '']
item += cmd_info
else:
assert 0
save_content.append(item)
tpu_inst_id += 1
elif (type == EngineType.SDMA):
if (pmu_sdma_inst_id >= pmu_sdma_count):
item = [str(i), sdma_inst_id, 'SDMA', '', '', '', '', '', '']
item += cmd_info
else:
sdma_item = pmu_profile_info[k][2][pmu_sdma_inst_id]
inst_id = sdma_item[0]
thread_id = sdma_item[1]
if (str(sdma_inst_id) == inst_id):
item = [str(i), inst_id, 'SDMA', thread_id, sdma_item[2], sdma_item[3], sdma_item[4], sdma_item[5], '']
item += cmd_info
pmu_sdma_inst_id += 1
elif (sdma_inst_id < int(inst_id)):
item = [str(i), sdma_inst_id, 'SDMA', '', '', '', '', '', '']
item += cmd_info
else:
assert 0
save_content.append(item)
sdma_inst_id += 1
else:
assert 0
with open(save_file, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['No', 'inst_id', 'cmd_type', 'thread_id', 'cycle', 'start(us)', 'end(us)', 'interval(us)', 'bank_conflict', 'cmd_name', 'id_dep_info', 'res0_info', 'opd0_info', 'opd1_info', 'opd2_info', 'opd3_info', 'opd4_info', 'opd5_info'])
count = len(save_content)
for i in range(count):
item = save_content[i]
writer.writerow([str(item[j]) for j in range(len(item))])
print('save file finished') |
def test_1d_clustering():
np.random.seed(42)
n = 50
X = np.concatenate((np.random.normal((- 1), 1.5, (n, 1)), np.random.normal(1, 1.5, (n, 1))))
vis = ncvis.NCVis(n_neighbors=15, M=16, ef_construction=200, d=1, n_init_epochs=20, n_epochs=50, min_dist=0.4, n_threads=(- 1), distance='euclidean', random_seed=42)
Y = vis.fit_transform(X).ravel()
n_pos = np.count_nonzero(((Y - Y.mean()) > 0))
assert (np.abs((n_pos - n)) < 5), 'Clustering quality is too poor' |
def _generating_function_of_integral_points_(polyhedron, indices=None, **kwds):
import logging
logger = logging.getLogger(__name__)
logger.info('using polyhedron %s', polyhedron.Hrepresentation_str(**Hrepresentation_str_options))
if polyhedron.is_empty():
from sage.structure.factorization import Factorization
return (Factorization([], unit=0),)
Hrepr = polyhedron.Hrepresentation()
inequalities = tuple((tuple(entry) for entry in Hrepr if entry.is_inequality()))
equations = tuple((tuple(entry) for entry in Hrepr if entry.is_equation()))
if ((len(inequalities) + len(equations)) != len(Hrepr)):
raise ValueError('cannot handle {}.'.format(polyhedron))
if (not inequalities):
raise NotImplementedError('no inequality given')
if (indices is None):
indices = range((len(inequalities[0]) - 1))
n = (len(indices) + 1)
if any(((len(e) != n) for e in inequalities)):
raise ValueError('not all coefficient vectors of the inequalities have the same length')
if any(((len(e) != n) for e in equations)):
raise ValueError('not all coefficient vectors of the equations have the same length')
mods = _TransformMod.generate_mods(equations)
logger.debug('splitting by moduli %s', mods)
return tuple((__generating_function_of_integral_points__(indices, inequalities, equations, mod, **kwds) for mod in mods)) |
def mb_return(state, dynamical_model, reward_model, policy, num_steps=1, gamma=1.0, value_function=None, num_samples=1, entropy_reg=0.0, reward_transformer=RewardTransformer(), termination_model=None, reduction='none'):
state = repeat_along_dimension(state, number=num_samples, dim=0)
trajectory = rollout_model(dynamical_model=dynamical_model, reward_model=reward_model, policy=policy, initial_state=state, max_steps=num_steps, termination_model=termination_model)
observation = stack_list_of_tuples(trajectory, dim=(state.ndim - 1))
value = mc_return(observation=observation, gamma=gamma, value_function=value_function, entropy_regularization=entropy_reg, reward_transformer=reward_transformer, reduction=reduction)
return MBValueReturn(value, observation) |
def find_model_using_name(model_name):
model_filename = (('models.' + model_name) + '_model')
modellib = importlib.import_module(model_filename)
model = None
target_model_name = (model_name.replace('_', '') + 'model')
for (name, cls) in modellib.__dict__.items():
if ((name.lower() == target_model_name.lower()) and issubclass(cls, BaseModel)):
model = cls
if (model is None):
print(('In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase.' % (model_filename, target_model_name)))
exit(0)
return model |
def register_Ns3UplinkLteGlobalPathlossDatabase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::UplinkLteGlobalPathlossDatabase const &', 'arg0')])
cls.add_method('UpdatePathloss', 'void', [param('std::string', 'context'), param('ns3::Ptr< ns3::SpectrumPhy const >', 'txPhy'), param('ns3::Ptr< ns3::SpectrumPhy const >', 'rxPhy'), param('double', 'lossDb')], is_virtual=True)
return |
class ScipyOptimizeInterfaceDomain(PythonDomain):
name = 'scipy-optimize'
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
self.directives = dict(self.directives)
function_directive = self.directives['function']
self.directives['function'] = wrap_mangling_directive(function_directive) |
def check_rule_validity(context_type, rule_parts):
valid = False
valid_hyperparams = rule_hyperparams[context_type]
try:
rule_context_ratio = (float(rule_parts[3]) / 4072)
except:
return False
rule_prompt_separator = rule_parts[(- 1)]
rule_rule_context_formatting = '_'.join(rule_parts[4:(- 1)])
if (rule_context_ratio in valid_hyperparams['context_ratio']):
if (rule_prompt_separator in valid_hyperparams['prompt_separator']):
if (rule_rule_context_formatting in valid_hyperparams['rule_context_formatting']):
valid = True
return valid |
def get_matcher(vgg, opt):
matcher = Matcher(opt['what'], 'mse', opt['map_idx'])
def hook(module, input, output):
matcher(module, output)
for layer_name in opt['layers']:
vgg._modules[layer_name].register_forward_hook(hook)
return matcher |
class ParsedRequirement(object):
def __init__(self, requirement, is_editable, comes_from, constraint, options=None, line_source=None):
self.requirement = requirement
self.is_editable = is_editable
self.comes_from = comes_from
self.options = options
self.constraint = constraint
self.line_source = line_source |
def test_report_constant(constantdf: pd.DataFrame) -> None:
from sys import platform
if (platform == 'darwin'):
import matplotlib
matplotlib.use('PS')
create_report(constantdf, mode='basic') |
class DaCeMLBackend(base.Backend):
def prepare(cls, model, device='CPU', **kwargs):
super().prepare(model, device, **kwargs)
dace_model = onnx_importer.ONNXModel('backend_model', model, cuda=(device == 'CUDA'), onnx_simplify=False, storage=dtypes.StorageType.Default)
return DaCeMLBackendRep(dace_model) |
def test_mean_reduce_symbolic_shape():
N = dace.symbol('N')
def mean_reduce_symbolic_shape(A: dace.float64[(10, N, 3)]):
return np.mean(A, axis=((- 2), 0))
X = np.random.normal(scale=10, size=(10, 12, 3)).astype(np.float64)
dace_result = mean_reduce_symbolic_shape(A=X)
numpy_result = np.mean(X, axis=((- 2), 0))
assert np.allclose(dace_result, numpy_result) |
def draw_rect(im, rect, color=(1.0, 1.0, 1.0)):
if (im.dtype != np.uint8):
raise ValueError('The image must be of type uint8.')
im_pil = Image.fromarray(im)
draw = ImageDraw.Draw(im_pil)
draw.rectangle((rect[0], rect[1], (rect[0] + rect[2]), (rect[1] + rect[3])), outline=tuple([int((c * 255)) for c in color]), fill=None)
del draw
return np.asarray(im_pil) |
class AutoIterative(AutoFallbackSolver):
name = 'ls.auto_iterative'
_ls_solvers = [('ls.petsc', {'method': 'cg', 'precond': 'icc'}), ('ls.scipy_iterative', {'method': 'cg'})] |
def validate_it_aic(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(aic.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(aic.is_valid)
else:
return df.applymap(aic.is_valid)
return aic.is_valid(df) |
def separated(values, *, limit, stringify, sep):
count = len(values)
if ((limit is not None) and (count > limit)):
values = values[:limit]
continuation = (f'{sep}... ({(count - limit)} more)' if (count > limit) else '')
else:
continuation = ''
rendered = sep.join((stringify(x) for x in values))
return (rendered + continuation) |
class ApiManager(metaclass=Singleton):
def __init__(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0
self.models: Optional[list[Model]] = None
def reset(self):
self.total_prompt_tokens = 0
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0.0
self.models = None
def create_chat_completion(self, messages: list[MessageDict], model: (str | None)=None, temperature: float=None, max_tokens: (int | None)=None, deployment_id=None):
cfg = Config()
if model.startswith('claude'):
return self.create_chat_completion_non_openai(messages, model, temperature, max_tokens)
if (temperature is None):
temperature = cfg.temperature
if (deployment_id is not None):
response = openai.ChatCompletion.create(deployment_id=deployment_id, model=model, messages=messages, temperature=temperature, max_tokens=max_tokens, api_key=cfg.openai_api_key)
else:
response = openai.ChatCompletion.create(model=model, messages=messages, temperature=temperature, max_tokens=max_tokens, api_key=cfg.openai_api_key)
if (not hasattr(response, 'error')):
logger.debug(f'Response: {response}')
prompt_tokens = response.usage.prompt_tokens
completion_tokens = response.usage.completion_tokens
self.update_cost(prompt_tokens, completion_tokens, model)
return response
def create_chat_completion_non_openai(self, messages: list[MessageDict], model: (str | None)=None, temperature: float=None, max_tokens: (int | None)=None):
cfg = Config()
if (temperature is None):
temperature = cfg.temperature
log_file = os.path.join(LOG_DIR, (((model + '_') + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) + '.log'))
try:
prompt = ''
for (idx, m) in enumerate(messages):
if (m['role'] in ['user', 'system']):
if ((idx != 0) and (messages[(idx - 1)]['role'] not in ['user', 'system'])):
prompt += (anthropic.HUMAN_PROMPT + ' ')
prompt += (m['content'] + '\n')
else:
prompt += ((anthropic.AI_PROMPT + m['content']) + '\n')
completion = complete_text_claude(prompt=prompt, model=model, temperature=temperature, max_tokens_to_sample=max_tokens, log_file=log_file)
except Exception as e:
return Namespace(**{'error': str(e)})
with open(log_file, 'r') as f:
content = f.read()
tokens = content.split('tokens')[1].strip().split('\n')
prompt_tokens = int(tokens[0].split(':')[1].strip())
completion_tokens = int(tokens[1].split(':')[1].strip())
self.update_cost(prompt_tokens, completion_tokens, model)
response = {'choices': [Namespace(**{'finish_reason': 'stop', 'index': 0, 'message': Namespace(**{'content': completion, 'role': 'assistant'})})], 'created': time.time(), 'id': 'chatcmpl', 'model': model, 'object': 'chat.completion', 'usage': Namespace(**{'completion_tokens': completion_tokens, 'prompt_tokens': prompt_tokens, 'total_tokens': (prompt_tokens + completion_tokens)})}
response = Namespace(**response)
logger.debug(f'Response: {response}')
return response
def update_cost(self, prompt_tokens, completion_tokens, model: str):
model = (model[:(- 3)] if model.endswith('-v2') else model)
model_info = OPEN_AI_MODELS[model]
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
self.total_cost += ((prompt_tokens * model_info.prompt_token_cost) / 1000)
if issubclass(type(model_info), CompletionModelInfo):
self.total_cost += ((completion_tokens * model_info.completion_token_cost) / 1000)
logger.debug(f'Total running cost: ${self.total_cost:.3f}')
def set_total_budget(self, total_budget):
self.total_budget = total_budget
def get_total_prompt_tokens(self):
return self.total_prompt_tokens
def get_total_completion_tokens(self):
return self.total_completion_tokens
def get_total_cost(self):
return self.total_cost
def get_total_budget(self):
return self.total_budget
def get_models(self) -> List[Model]:
if (self.models is None):
all_models = openai.Model.list()['data']
self.models = [model for model in all_models if ('gpt' in model['id'])]
return (self.models + [{'id': 'claude-v1'}]) |
class VQModel(pl.LightningModule):
def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None, monitor=None, remap=None, sane_index_shape=False):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, remap=remap, sane_index_shape=sane_index_shape)
self.quant_conv = torch.nn.Conv2d(ddconfig['z_channels'], embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig['z_channels'], 1)
if (ckpt_path is not None):
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.image_key = image_key
if (colorize_nlabels is not None):
assert (type(colorize_nlabels) == int)
self.register_buffer('colorize', torch.randn(3, colorize_nlabels, 1, 1))
if (monitor is not None):
self.monitor = monitor
self.current_step = 0
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location='cpu')['state_dict']
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print('Deleting key {} from state_dict.'.format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f'Restored from {path}')
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
(quant, emb_loss, info) = self.quantize(h)
return (quant, emb_loss, info)
def decode(self, quant):
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
def decode_code(self, code_b):
quant_b = self.quantize.embed_code(code_b)
dec = self.decode(quant_b)
return dec
def forward(self, input):
(quant, diff, _) = self.encode(input)
dec = self.decode(quant)
return (dec, diff)
def get_input(self, batch, k):
x = batch[k]
if (len(x.shape) == 3):
x = x[(..., None)]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
return x.float()
def training_step(self, batch, batch_idx, optimizer_idx):
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
if (optimizer_idx == 0):
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log('train/aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if (optimizer_idx == 1):
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log('train/discloss', discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
self.current_step += 1
if ((self.current_step % 1000) == 0):
ckpt = os.path.join(self.trainer.logdir, 'checkpoints', f'{self.current_step:04d}.ckpt')
self.trainer.save_checkpoint(ckpt)
return discloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split='val')
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split='val')
rec_loss = log_dict_ae['val/rec_loss']
self.log('val/rec_loss', rec_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log('val/aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def on_validation_end(self, **kwargs):
ckpt = os.path.join(self.trainer.logdir, 'checkpoints', 'latest.ckpt')
self.trainer.save_checkpoint(ckpt)
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam(((((list(self.encoder.parameters()) + list(self.decoder.parameters())) + list(self.quantize.parameters())) + list(self.quant_conv.parameters())) + list(self.post_quant_conv.parameters())), lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9))
return ([opt_ae, opt_disc], [])
def get_last_layer(self):
return self.decoder.conv_out.weight
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
(xrec, _) = self(x)
if (x.shape[1] > 3):
assert (xrec.shape[1] > 3)
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log['inputs'] = x
log['reconstructions'] = xrec
return log
def to_rgb(self, x):
assert (self.image_key == 'segmentation')
if (not hasattr(self, 'colorize')):
self.register_buffer('colorize', torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = (((2.0 * (x - x.min())) / (x.max() - x.min())) - 1.0)
return x |
_connect.numpy.implements('argmin')
def _nep_18_impl_argmin(a, axis=None, out=UNSUPPORTED, *, keepdims=False):
return argmin(a, axis=axis, keepdims=keepdims) |
def register_Ns3DefaultDeleter__Ns3Ipv6Route_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Ipv6Route > const &', 'arg0')])
cls.add_method('Delete', 'void', [param('ns3::Ipv6Route *', 'object')], is_static=True)
return |
def _ell(A, m):
if ((len(A.shape) != 2) or (A.shape[0] != A.shape[1])):
raise ValueError('expected A to be like a square matrix')
choose_2m_m = scipy.special.comb((2 * m), m, exact=True)
abs_c_recip = float((choose_2m_m * math.factorial(((2 * m) + 1))))
u = (2 ** (- 53))
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), ((2 * m) + 1))
if (not A_abs_onenorm):
return 0
alpha = (A_abs_onenorm / (_onenorm(A) * abs_c_recip))
log2_alpha_div_u = np.log2((alpha / u))
value = int(np.ceil((log2_alpha_div_u / (2 * m))))
return max(value, 0) |
def enroll_per_utt(test_DB, model):
dict_embeddings = {}
total_len = len(test_DB)
with torch.no_grad():
for i in range(len(test_DB)):
tmp_filename = test_DB['filename'][i]
(enroll_embedding, _) = get_d_vector(tmp_filename, model)
key = os.sep.join(tmp_filename.split(os.sep)[(- 3):])
key = (os.path.splitext(key)[0] + '.wav')
dict_embeddings[key] = enroll_embedding
print(('[%s/%s] Embedding for "%s" is saved' % (str(i).zfill(len(str(total_len))), total_len, key)))
return dict_embeddings |
class BaseDataset(Dataset):
def __init__(self, dataset_path, image_files, labels, transform=None):
super(BaseDataset, self).__init__()
self.dataset_path = dataset_path
self.image_files = image_files
self.labels = labels
self.transform = transform
def __len__(self):
return len(self.image_files)
def __getitem__(self, idx):
label = self.labels[idx]
image_file = self.image_files[idx]
image_file = os.path.join(self.dataset_path, image_file)
image = Image.open(image_file)
if (image.mode != 'RGB'):
image = image.convert('RGB')
if self.transform:
image = self.transform(image)
return (image, label) |
class conv1x1(nn.Module):
def __init__(self, planes, out_planes=None, stride=1):
super(conv1x1, self).__init__()
if (config_task.mode == 'series_adapters'):
self.conv = nn.Sequential(nn.BatchNorm2d(planes), conv1x1_fonc(planes))
elif (config_task.mode == 'parallel_adapters'):
self.conv = conv1x1_fonc(planes, out_planes, stride)
else:
self.conv = conv1x1_fonc(planes)
def forward(self, x):
y = self.conv(x)
if (config_task.mode == 'series_adapters'):
y += x
return y |
def test_corrupted_flow_args():
base_gen = DummyGenerator([[0]], check_flow_args=True)
corr_gen = CorruptedGenerator(base_gen)
corr_gen.flow('some', args=1) |
def combine_partial_results(partial_results) -> List:
records = []
for partial_result in partial_results:
records.extend(partial_result)
records = list(sorted(records, key=(lambda x: x['id'])))
preds = [x['pred'] for x in records]
return preds |
def run_epochs(model, model_bert, opt, opt_bert, bert_config, tokenizer, path_wikisql, model_path, train_loader, train_table, dev_loader, dev_table, test_loader, test_table, early_stop_ep=None, bool_eval=True, startime_time=None):
tepoch = 100
accumulate_gradients = 4
assert bool_eval
print(('## Actual tepoch %d, accumulate_gradients %d ' % (tepoch, accumulate_gradients)))
print('## Early stop epoch: {}'.format(early_stop_ep))
max_seq_length = 222
num_target_layers = 2
acc_lx_t_best = (- 1)
acc_ex_t_best = (- 1)
epoch_best = (- 1)
patience_counter = 0
for epoch in range(tepoch):
(acc_train, aux_out_train) = train_fast(train_loader, train_table, model, model_bert, opt, bert_config, tokenizer, max_seq_length, num_target_layers, accumulate_gradients, opt_bert=opt_bert, st_pos=0, path_db=path_wikisql, dset_name='train')
print_result(epoch, acc_train, 'train')
if bool_eval:
with torch.no_grad():
(acc_dev, results_dev, cnt_list) = test_fast(dev_loader, dev_table, model, model_bert, bert_config, tokenizer, max_seq_length, num_target_layers, detail=False, path_db=path_wikisql, st_pos=0, dset_name='dev', EG=False, bool_ex=False)
print_result(epoch, acc_dev, 'dev')
acc_lx_t = acc_dev[(- 2)]
acc_ex_t = acc_dev[(- 1)]
if (acc_lx_t > acc_lx_t_best):
acc_lx_t_best = acc_lx_t
acc_ex_t_best = acc_ex_t
epoch_best = epoch
patience_counter = 0
state = {'model': model.state_dict()}
torch.save(state, os.path.join(model_path, 'model_best.pt'))
state = {'model_bert': model_bert.state_dict()}
torch.save(state, os.path.join(model_path, 'model_bert_best.pt'))
else:
patience_counter += 1
if ((early_stop_ep is not None) and (patience_counter == early_stop_ep)):
print(' Early stop!')
break
print(f' Best Dev lx acc: {acc_lx_t_best} at epoch: {epoch_best}')
print(' Time stamp: {}'.format(datetime.datetime.now()))
if (startime_time is not None):
print(' Time spent: {}'.format((datetime.datetime.now() - startime_time)))
sys.stdout.flush()
print('Loading back best checkpoints...')
if torch.cuda.is_available():
res = torch.load(os.path.join(model_path, 'model_bert_best.pt'))
else:
res = torch.load(os.path.join(model_path, 'model_bert_best.pt'), map_location='cpu')
model_bert.load_state_dict(res['model_bert'])
model_bert.to(device)
if torch.cuda.is_available():
res = torch.load(os.path.join(model_path, 'model_best.pt'))
else:
res = torch.load(os.path.join(model_path, 'model_best.pt'), map_location='cpu')
model.load_state_dict(res['model'])
with torch.no_grad():
(acc_dev, results_dev, cnt_list) = test_fast(dev_loader, dev_table, model, model_bert, bert_config, tokenizer, max_seq_length, num_target_layers, detail=False, path_db=path_wikisql, st_pos=0, dset_name='dev', EG=False, bool_ex=True)
print_result((- 1), acc_dev, 'dev')
dev_acc_lx_t_best = acc_dev[(- 2)]
dev_acc_ex_t_best = acc_dev[(- 1)]
(acc_test, results_test, cnt_list) = test_fast(test_loader, test_table, model, model_bert, bert_config, tokenizer, max_seq_length, num_target_layers, detail=False, path_db=path_wikisql, st_pos=0, dset_name='test', EG=False, bool_ex=True)
print_result((- 1), acc_test, 'test')
test_acc_lx_t_best = acc_test[(- 2)]
test_acc_ex_t_best = acc_test[(- 1)]
return (dev_acc_lx_t_best, dev_acc_ex_t_best, test_acc_lx_t_best, test_acc_ex_t_best) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.