code stringlengths 101 5.91M |
|---|
def prologue(args):
if ((not hasattr(args, 'id')) or (args.id is None)):
args.id = np.random.randint(10000)
args.outdir = (args.outdir + f'/{args.arch}/{args.id}/')
if (not os.path.exists(args.outdir)):
os.makedirs(args.outdir)
copy_code(args.outdir)
train_dataset = get_dataset(args.dataset, 'train')
test_dataset = get_dataset(args.dataset, 'test')
pin_memory = (args.dataset == 'imagenet')
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=args.batch, num_workers=args.workers, pin_memory=pin_memory)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=args.batch, num_workers=args.workers, pin_memory=pin_memory)
if (args.pretrained_model != ''):
assert (args.arch == 'cifar_resnet110'), 'Unsupported architecture for pretraining'
checkpoint = torch.load(args.pretrained_model)
model = get_architecture(checkpoint['arch'], args.dataset)
model.load_state_dict(checkpoint['state_dict'])
model[1].fc = nn.Linear(64, get_num_classes('cifar10')).to(device)
else:
model = get_architecture(args.arch, args.dataset)
logfilename = os.path.join(args.outdir, 'log.txt')
init_logfile(logfilename, 'epoch\ttime\tlr\ttrain loss\ttrain acc\ttestloss\ttest acc')
writer = SummaryWriter(args.outdir)
criterion = CrossEntropyLoss().to(device)
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = StepLR(optimizer, step_size=args.lr_step_size, gamma=args.gamma)
starting_epoch = 0
model_path = os.path.join(args.outdir, 'checkpoint.pth.tar')
if args.resume:
if os.path.isfile(model_path):
print("=> loading checkpoint '{}'".format(model_path))
checkpoint = torch.load(model_path, map_location=(lambda storage, loc: storage))
starting_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(model_path, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(model_path))
return (train_loader, test_loader, criterion, model, optimizer, scheduler, starting_epoch, logfilename, model_path, device, writer) |
def build_optimizer(config, model):
assert isinstance(config, dict)
opt_type = config['opt_type'].upper()
base_lr = config['base_lr']
base_wd = config.get('base_wd', 0.0)
bias_lr_multiplier = config.get('bias_lr_multiplier', 1.0)
bias_wd_multiplier = config.get('bias_wd_multiplier', 1.0)
if (opt_type not in _ALLOWED_OPT_TYPES):
raise ValueError(f'Invalid optimizer type `{opt_type}`!Allowed types: {_ALLOWED_OPT_TYPES}.')
model_params = []
for (param_name, param) in model.named_parameters():
param_group = {'params': [param]}
if param.requires_grad:
if ('bias' in param_name):
param_group['lr'] = (base_lr * bias_lr_multiplier)
param_group['weight_decay'] = (base_wd * bias_wd_multiplier)
else:
param_group['lr'] = base_lr
param_group['weight_decay'] = base_wd
model_params.append(param_group)
if (opt_type == 'SGD'):
return torch.optim.SGD(params=model_params, lr=base_lr, momentum=config.get('momentum', 0.9), dampening=config.get('dampening', 0), weight_decay=base_wd, nesterov=config.get('nesterov', False))
if (opt_type == 'ADAM'):
return AdamOptimizer(params=model_params, lr=base_lr, betas=config.get('betas', (0.9, 0.999)), eps=config.get('eps', 1e-08), weight_decay=base_wd, amsgrad=config.get('amsgrad', False))
raise NotImplementedError(f'Not implemented optimizer type `{opt_type}`!') |
def test_dataset(csv_file_path: str):
df = load_matrix_from_csv(csv_file_path, start_col_index=0, end_col_index=4, header=0)
rumor_num = 0
non_rumor_num = 0
for tweet_row in df[:]:
tweet_id = tweet_row[0]
created_time = tweet_row[1]
tweet_text = tweet_row[2]
tag = tweet_row[3]
tag = int(tag)
if (tag == 1):
rumor_num += 1
else:
non_rumor_num += 1
print('num of rumor instance: ', rumor_num)
print('num of non-rumor instance: ', non_rumor_num) |
class BernoulliLayer(Initializable, ProbabilisticLayer):
def __init__(self, dim_X, dim_Y, **kwargs):
super(BernoulliLayer, self).__init__(**kwargs)
self.dim_X = dim_X
self.dim_Y = dim_Y
self.linear_transform = Linear(name=(self.name + '_linear'), input_dim=dim_Y, output_dim=dim_X, weights_init=self.weights_init, biases_init=self.biases_init, use_bias=self.use_bias)
self.children = [self.linear_transform]
(inputs=['Y'], outputs=['X_expected'])
def sample_expected(self, Y):
return tensor.nnet.sigmoid(self.linear_transform.apply(Y))
(inputs=['Y'], outputs=['X', 'log_prob'])
def sample(self, Y):
prob_X = self.sample_expected(Y)
U = self.theano_rng.uniform(size=prob_X.shape, nstreams=N_STREAMS)
X = tensor.cast((U <= prob_X), floatX)
return (X, self.log_prob(X, Y))
(inputs=['X', 'Y'], outputs=['log_prob'])
def log_prob(self, X, Y):
prob_X = self.sample_expected(Y)
log_prob = ((X * tensor.log(prob_X)) + ((1.0 - X) * tensor.log((1 - prob_X))))
return log_prob.sum(axis=1) |
class Res_CBAM_block(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(Res_CBAM_block, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
if ((stride != 1) or (out_channels != in_channels)):
self.shortcut = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride), nn.BatchNorm2d(out_channels))
else:
self.shortcut = None
self.ca = ChannelAttention(out_channels)
self.sa = SpatialAttention()
def forward(self, x):
residual = x
if (self.shortcut is not None):
residual = self.shortcut(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = (self.ca(out) * out)
out = (self.sa(out) * out)
out += residual
out = self.relu(out)
return out |
def output_results(results, output=None):
headers = ['Scenario Name', 'Steps', 'Total Reward']
rows = []
for name in AVAIL_BENCHMARKS:
rows.append([name, *results[name].get_formatted_summary()])
table = PrettyTable(headers)
for row in rows:
table.add_row(row)
if (output is not None):
with open(output, 'w') as fout:
fout.write((','.join(headers) + '\n'))
for row in rows:
fout.write((','.join(row) + '\n')) |
def collect_env_info():
has_gpu = torch.cuda.is_available()
torch_version = torch.__version__
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
has_rocm = False
if ((getattr(torch.version, 'hip', None) is not None) and (ROCM_HOME is not None)):
has_rocm = True
has_cuda = (has_gpu and (not has_rocm))
data = []
data.append(('sys.platform', sys.platform))
data.append(('Python', sys.version.replace('\n', '')))
data.append(('numpy', np.__version__))
try:
import detectron2
data.append(('detectron2', ((detectron2.__version__ + ' ') + os.path.dirname(detectron2.__file__))))
except ImportError:
data.append(('detectron2', 'failed to import'))
except AttributeError:
data.append(('detectron2', 'imported a wrong installation'))
try:
import detectron2._C as _C
except ImportError as e:
data.append(('detectron2._C', f'not built correctly: {e}'))
if (sys.platform != 'win32'):
try:
cxx = os.environ.get('CXX', 'c++')
cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True)
cxx = cxx.decode('utf-8').strip().split('\n')[0]
except subprocess.SubprocessError:
cxx = 'Not found'
data.append(('Compiler ($CXX)', cxx))
if (has_cuda and (CUDA_HOME is not None)):
try:
nvcc = os.path.join(CUDA_HOME, 'bin', 'nvcc')
nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True)
nvcc = nvcc.decode('utf-8').strip().split('\n')[(- 1)]
except subprocess.SubprocessError:
nvcc = 'Not found'
data.append(('CUDA compiler', nvcc))
if (has_cuda and (sys.platform != 'win32')):
try:
so_file = importlib.util.find_spec('detectron2._C').origin
except (ImportError, AttributeError):
pass
else:
data.append(('detectron2 arch flags', detect_compute_compatibility(CUDA_HOME, so_file)))
else:
data.append(('Compiler', _C.get_compiler_version()))
data.append(('CUDA compiler', _C.get_cuda_version()))
if (has_cuda and getattr(_C, 'has_cuda', (lambda : True))()):
data.append(('detectron2 arch flags', detect_compute_compatibility(CUDA_HOME, _C.__file__)))
data.append(get_env_module())
data.append(('PyTorch', ((torch_version + ' ') + os.path.dirname(torch.__file__))))
data.append(('PyTorch debug build', torch.version.debug))
if (not has_gpu):
has_gpu_text = 'No: torch.cuda.is_available() == False'
else:
has_gpu_text = 'Yes'
data.append(('GPU available', has_gpu_text))
if has_gpu:
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
cap = '.'.join((str(x) for x in torch.cuda.get_device_capability(k)))
name = (torch.cuda.get_device_name(k) + f' (arch={cap})')
devices[name].append(str(k))
for (name, devids) in devices.items():
data.append((('GPU ' + ','.join(devids)), name))
if has_rocm:
msg = (' - invalid!' if (not (ROCM_HOME and os.path.isdir(ROCM_HOME))) else '')
data.append(('ROCM_HOME', (str(ROCM_HOME) + msg)))
else:
try:
from torch.utils.collect_env import get_nvidia_driver_version, run as _run
data.append(('Driver version', get_nvidia_driver_version(_run)))
except Exception:
pass
msg = (' - invalid!' if (not (CUDA_HOME and os.path.isdir(CUDA_HOME))) else '')
data.append(('CUDA_HOME', (str(CUDA_HOME) + msg)))
cuda_arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
if cuda_arch_list:
data.append(('TORCH_CUDA_ARCH_LIST', cuda_arch_list))
data.append(('Pillow', PIL.__version__))
try:
data.append(('torchvision', ((str(torchvision.__version__) + ' ') + os.path.dirname(torchvision.__file__))))
if has_cuda:
try:
torchvision_C = importlib.util.find_spec('torchvision._C').origin
msg = detect_compute_compatibility(CUDA_HOME, torchvision_C)
data.append(('torchvision arch flags', msg))
except (ImportError, AttributeError):
data.append(('torchvision._C', 'Not found'))
except AttributeError:
data.append(('torchvision', 'unknown'))
try:
import fvcore
data.append(('fvcore', fvcore.__version__))
except (ImportError, AttributeError):
pass
try:
import iopath
data.append(('iopath', iopath.__version__))
except (ImportError, AttributeError):
pass
try:
import cv2
data.append(('cv2', cv2.__version__))
except (ImportError, AttributeError):
data.append(('cv2', 'Not found'))
env_str = (tabulate(data) + '\n')
env_str += collect_torch_env()
return env_str |
class MobileNetV3(nn.Module):
def __init__(self, channels, exp_channels, init_block_channels, final_block_channels, classifier_mid_channels, kernels3, use_relu, use_se, first_stride, final_use_se, in_channels=3, in_size=(224, 224), num_classes=1000):
super(MobileNetV3, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module('init_block', conv3x3_block(in_channels=in_channels, out_channels=init_block_channels, stride=2, activation='hswish'))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels):
stage = nn.Sequential()
for (j, out_channels) in enumerate(channels_per_stage):
exp_channels_ij = exp_channels[i][j]
stride = (2 if ((j == 0) and ((i != 0) or first_stride)) else 1)
use_kernel3 = (kernels3[i][j] == 1)
activation = ('relu' if (use_relu[i][j] == 1) else 'hswish')
use_se_flag = (use_se[i][j] == 1)
stage.add_module('unit{}'.format((j + 1)), MobileNetV3Unit(in_channels=in_channels, out_channels=out_channels, exp_channels=exp_channels_ij, use_kernel3=use_kernel3, stride=stride, activation=activation, use_se=use_se_flag))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('final_block', MobileNetV3FinalBlock(in_channels=in_channels, out_channels=final_block_channels, use_se=final_use_se))
in_channels = final_block_channels
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=7, stride=1))
self.output = MobileNetV3Classifier(in_channels=in_channels, out_channels=num_classes, mid_channels=classifier_mid_channels, dropout_rate=0.2)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), (- 1))
return x |
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target):
image = F.normalize(image, mean=self.mean, std=self.std)
return (image, target) |
def init_random_seed(random_seed):
import random
random.seed(random_seed)
import numpy as np
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed) |
def txt_logger(txtname, log):
with open(txtname, 'a') as f:
for item in log:
f.write(item)
f.write(',')
f.write('\n') |
def test_attribute_unpacking_no_overwrite():
run_cell('\n class Foo:\n def __init__(self, x):\n self.x = x\n ')
run_cell('x = Foo(5)')
run_cell('y = Foo(6)')
run_cell('w = 42')
run_cell('z = 43')
run_cell('x.x, y.x = w + 2, z + 3')
run_cell('s, t = 12, 13')
run_cell('x.x, y.x = x.x + s, y.x + t')
run_cell('w = 101')
run_cell('logging.info(x.x)')
assert_detected()
run_cell('logging.info(y.x)')
assert_not_detected()
run_cell('z = 103')
run_cell('logging.info(y.x)')
assert_detected() |
def store_standard_system(polsys, **nbvar):
from phcpy.phcpy2c3 import py2c_syscon_clear_standard_system
from phcpy.phcpy2c3 import py2c_syscon_initialize_number_of_standard_polynomials
from phcpy.phcpy2c3 import py2c_syscon_store_standard_polynomial
py2c_syscon_clear_standard_system()
dim = len(polsys)
fail = 0
py2c_syscon_initialize_number_of_standard_polynomials(dim)
for cnt in range(0, dim):
pol = polsys[cnt]
nchar = len(pol)
if (len(nbvar) == 0):
fail = py2c_syscon_store_standard_polynomial(nchar, dim, (cnt + 1), pol)
else:
nvr = list(nbvar.values())[0]
fail = py2c_syscon_store_standard_polynomial(nchar, nvr, (cnt + 1), pol)
if (fail != 0):
break
return fail |
class QasmParser():
def __init__(self, filename):
if (filename is None):
filename = ''
self.lexer = QasmLexer(filename)
self.tokens = self.lexer.tokens
self.parse_dir = tempfile.mkdtemp(prefix='qiskit')
self.precedence = (('left', '+', '-'), ('left', '*', '/'), ('left', 'negative', 'positive'), ('right', '^'))
self.parser = yacc.yacc(module=self, debug=False, outputdir=self.parse_dir)
self.qasm = None
self.parse_deb = False
self.global_symtab = {}
self.current_symtab = self.global_symtab
self.symbols = []
self.external_functions = ['sin', 'cos', 'tan', 'exp', 'ln', 'sqrt', 'acos', 'atan', 'asin']
def __enter__(self):
return self
def __exit__(self, *args):
if os.path.exists(self.parse_dir):
shutil.rmtree(self.parse_dir)
def update_symtab(self, obj):
if (obj.name in self.current_symtab):
prev = self.current_symtab[obj.name]
raise QasmError('Duplicate declaration for', (((obj.type + " '") + obj.name) + "' at line"), (str(obj.line) + ', file'), (obj.file + '.\nPrevious occurrence at line'), (str(prev.line) + ', file'), prev.file)
self.current_symtab[obj.name] = obj
def verify_declared_bit(self, obj):
if (obj.name not in self.current_symtab):
raise QasmError((("Cannot find symbol '" + obj.name) + "' in argument list for gate, line"), str(obj.line), 'file', obj.file)
sym = self.current_symtab[obj.name]
if (not ((sym.type == 'id') and sym.is_bit)):
raise QasmError('Bit', obj.name, 'is not declared as a bit in the gate.')
def verify_bit_list(self, obj):
for children in obj.children:
self.verify_declared_bit(children)
def verify_exp_list(self, obj):
if (obj.children is not None):
for children in obj.children:
if isinstance(children, node.Id):
if (children.name in self.external_functions):
continue
if (children.name not in self.current_symtab):
raise QasmError(((("Argument '" + children.name) + "' in expression cannot be ") + 'found, line'), str(children.line), 'file', children.file)
elif hasattr(children, 'children'):
self.verify_exp_list(children)
def verify_as_gate(self, obj, bitlist, arglist=None):
if (obj.name not in self.global_symtab):
raise QasmError((("Cannot find gate definition for '" + obj.name) + "', line"), str(obj.line), 'file', obj.file)
g_sym = self.global_symtab[obj.name]
if (not ((g_sym.type == 'gate') or (g_sym.type == 'opaque'))):
raise QasmError((((((("'" + obj.name) + "' is used as a gate ") + 'or opaque call but the symbol is neither;') + " it is a '") + g_sym.type) + "' line"), str(obj.line), 'file', obj.file)
if (g_sym.n_bits() != bitlist.size()):
raise QasmError((("Gate or opaque call to '" + obj.name) + "' uses"), str(bitlist.size()), 'qubits but is declared for', str(g_sym.n_bits()), 'qubits', 'line', str(obj.line), 'file', obj.file)
if arglist:
if (g_sym.n_args() != arglist.size()):
raise QasmError((("Gate or opaque call to '" + obj.name) + "' uses"), str(arglist.size()), 'qubits but is declared for', str(g_sym.n_args()), 'qubits', 'line', str(obj.line), 'file', obj.file)
elif (g_sym.n_args() > 0):
raise QasmError((("Gate or opaque call to '" + obj.name) + "' has no arguments but is declared for"), str(g_sym.n_args()), 'qubits', 'line', str(obj.line), 'file', obj.file)
def verify_reg(self, obj, object_type):
if (obj.name not in self.global_symtab):
raise QasmError('Cannot find definition for', object_type, (("'" + obj.name) + "'"), 'at line', str(obj.line), 'file', obj.file)
g_sym = self.global_symtab[obj.name]
if (g_sym.type != object_type):
raise QasmError((((((("Type for '" + g_sym.name) + "' should be '") + object_type) + "' but was found to be '") + g_sym.type) + "'"), 'line', str(obj.line), 'file', obj.file)
if (obj.type == 'indexed_id'):
bound = g_sym.index
ndx = obj.index
if ((ndx < 0) or (ndx >= bound)):
raise QasmError((("Register index for '" + g_sym.name) + "' out of bounds. Index is"), str(ndx), 'bound is 0 <= index <', str(bound), 'at line', str(obj.line), 'file', obj.file)
def verify_reg_list(self, obj, object_type):
for children in obj.children:
self.verify_reg(children, object_type)
def id_tuple_list(self, id_node):
if (id_node.type != 'id'):
raise QasmError('internal error, id_tuple_list')
bit_list = []
try:
g_sym = self.current_symtab[id_node.name]
except KeyError:
g_sym = self.global_symtab[id_node.name]
if ((g_sym.type == 'qreg') or (g_sym.type == 'creg')):
for idx in range(g_sym.index):
bit_list.append((id_node.name, idx))
else:
bit_list.append((id_node.name, (- 1)))
return bit_list
def verify_distinct(self, list_of_nodes):
bit_list = []
line_number = (- 1)
filename = ''
for node_ in list_of_nodes:
if (node_.type == 'id'):
bit_list.extend(self.id_tuple_list(node_))
line_number = node_.line
filename = node_.file
elif (node_.type == 'indexed_id'):
bit_list.append((node_.name, node_.index))
line_number = node_.line
filename = node_.file
elif (node_.type == 'primary_list'):
for child in node_.children:
if (child.type == 'id'):
bit_list.extend(self.id_tuple_list(child))
else:
bit_list.append((child.name, child.index))
line_number = child.line
filename = child.file
elif (node_.type == 'id_list'):
for child in node_.children:
bit_list.extend(self.id_tuple_list(child))
line_number = child.line
filename = child.file
else:
raise QasmError('internal error, verify_distinct')
if (len(bit_list) != len(set(bit_list))):
raise QasmError(('duplicate identifiers at line %d file %s' % (line_number, filename)))
def pop_scope(self):
self.current_symtab = self.symbols.pop()
def push_scope(self):
self.symbols.append(self.current_symtab)
self.current_symtab = {}
start = 'main'
def p_main(self, program):
self.qasm = program[1]
def p_program_0(self, program):
program[0] = node.Program([program[1]])
def p_program_1(self, program):
program[0] = program[1]
program[0].add_child(program[2])
def p_statement(self, program):
if (len(program) > 2):
if (program[2] != ';'):
raise QasmError(("Missing ';' at end of statement; " + 'received'), str(program[2].value))
program[0] = program[1]
def p_format(self, program):
program[0] = node.Format(program[1])
def p_format_0(self, program):
version = '2.0;'
raise QasmError((("Invalid version string. Expected '" + version) + "'. Is the semicolon missing?"))
def p_id(self, program):
program[0] = program[1]
def p_id_e(self, program):
raise QasmError((("Expected an ID, received '" + str(program[1].value)) + "'"))
def p_indexed_id(self, program):
if (len(program) == 4):
raise QasmError('Expecting an integer index; received', str(program[3].value))
if (program[4] != ']'):
raise QasmError("Missing ']' in indexed ID; received", str(program[4].value))
program[0] = node.IndexedId([program[1], node.Int(program[3])])
def p_primary(self, program):
program[0] = program[1]
def p_id_list_0(self, program):
program[0] = node.IdList([program[1]])
def p_id_list_1(self, program):
program[0] = program[1]
program[0].add_child(program[3])
def p_gate_id_list_0(self, program):
program[0] = node.IdList([program[1]])
self.update_symtab(program[1])
def p_gate_id_list_1(self, program):
program[0] = program[1]
program[0].add_child(program[3])
self.update_symtab(program[3])
def p_bit_list_0(self, program):
program[0] = node.IdList([program[1]])
program[1].is_bit = True
self.update_symtab(program[1])
def p_bit_list_1(self, program):
program[0] = program[1]
program[0].add_child(program[3])
program[3].is_bit = True
self.update_symtab(program[3])
def p_primary_list_0(self, program):
program[0] = node.PrimaryList([program[1]])
def p_primary_list_1(self, program):
program[0] = program[1]
program[1].add_child(program[3])
def p_decl(self, program):
if (len(program) > 2):
if (program[2] != ';'):
raise QasmError((("Missing ';' in qreg or creg declaration. Instead received '" + program[2].value) + "'"))
program[0] = program[1]
def p_qreg_decl(self, program):
program[0] = node.Qreg([program[2]])
if (program[2].name in self.external_functions):
raise QasmError(((('QREG names cannot be reserved words. ' + "Received '") + program[2].name) + "'"))
if (program[2].index == 0):
raise QasmError('QREG size must be positive')
self.update_symtab(program[0])
def p_qreg_decl_e(self, program):
raise QasmError(('Expecting indexed id (ID[int]) in QREG' + ' declaration; received'), program[2].value)
def p_creg_decl(self, program):
program[0] = node.Creg([program[2]])
if (program[2].name in self.external_functions):
raise QasmError(((('CREG names cannot be reserved words. ' + "Received '") + program[2].name) + "'"))
if (program[2].index == 0):
raise QasmError('CREG size must be positive')
self.update_symtab(program[0])
def p_creg_decl_e(self, program):
raise QasmError(('Expecting indexed id (ID[int]) in CREG' + ' declaration; received'), program[2].value)
def p_gate_decl_0(self, program):
program[0] = node.Gate([program[2], program[4], program[5]])
if (program[2].name in self.external_functions):
raise QasmError(((('GATE names cannot be reserved words. ' + "Received '") + program[2].name) + "'"))
self.pop_scope()
self.update_symtab(program[0])
def p_gate_decl_1(self, program):
program[0] = node.Gate([program[2], program[6], program[7]])
if (program[2].name in self.external_functions):
raise QasmError(((('GATE names cannot be reserved words. ' + "Received '") + program[2].name) + "'"))
self.pop_scope()
self.update_symtab(program[0])
def p_gate_decl_2(self, program):
program[0] = node.Gate([program[2], program[5], program[7], program[8]])
if (program[2].name in self.external_functions):
raise QasmError(((('GATE names cannot be reserved words. ' + "Received '") + program[2].name) + "'"))
self.pop_scope()
self.update_symtab(program[0])
def p_gate_scope(self, program):
del program
self.push_scope()
def p_gate_body_0(self, program):
if (program[2] != '}'):
raise QasmError((("Missing '}' in gate definition; received'" + str(program[2].value)) + "'"))
program[0] = node.GateBody(None)
def p_gate_body_1(self, program):
program[0] = node.GateBody(program[2])
def p_gate_op_list_0(self, program):
program[0] = [program[1]]
def p_gate_op_list_1(self, program):
program[0] = program[1]
program[0].append(program[2])
def p_unitary_op_0(self, program):
program[0] = node.UniversalUnitary([program[3], program[5]])
self.verify_reg(program[5], 'qreg')
self.verify_exp_list(program[3])
def p_unitary_op_1(self, program):
program[0] = node.Cnot([program[2], program[4]])
self.verify_reg(program[2], 'qreg')
self.verify_reg(program[4], 'qreg')
self.verify_distinct([program[2], program[4]])
def p_unitary_op_2(self, program):
program[0] = node.CustomUnitary([program[1], program[2]])
self.verify_as_gate(program[1], program[2])
self.verify_reg_list(program[2], 'qreg')
self.verify_distinct([program[2]])
def p_unitary_op_3(self, program):
program[0] = node.CustomUnitary([program[1], program[4]])
self.verify_as_gate(program[1], program[4])
self.verify_reg_list(program[4], 'qreg')
self.verify_distinct([program[4]])
def p_unitary_op_4(self, program):
program[0] = node.CustomUnitary([program[1], program[3], program[5]])
self.verify_as_gate(program[1], program[5], arglist=program[3])
self.verify_reg_list(program[5], 'qreg')
self.verify_exp_list(program[3])
self.verify_distinct([program[5]])
def p_gate_op_0(self, program):
program[0] = node.UniversalUnitary([program[3], program[5]])
self.verify_declared_bit(program[5])
self.verify_exp_list(program[3])
def p_gate_op_0e1(self, p):
raise QasmError(('Invalid U inside gate definition. ' + "Missing bit id or ';'"))
def p_gate_op_0e2(self, program):
raise QasmError("Missing ')' in U invocation in gate definition.")
def p_gate_op_1(self, program):
program[0] = node.Cnot([program[2], program[4]])
self.verify_declared_bit(program[2])
self.verify_declared_bit(program[4])
self.verify_distinct([program[2], program[4]])
def p_gate_op_1e1(self, program):
raise QasmError(((('Invalid CX inside gate definition. ' + "Expected an ID or ',', received '") + str(program[2].value)) + "'"))
def p_gate_op_1e2(self, program):
raise QasmError(((('Invalid CX inside gate definition. ' + "Expected an ID or ';', received '") + str(program[4].value)) + "'"))
def p_gate_op_2(self, program):
program[0] = node.CustomUnitary([program[1], program[2]])
self.verify_as_gate(program[1], program[2])
self.verify_bit_list(program[2])
self.verify_distinct([program[2]])
def p_gate_op_2e(self, program):
raise QasmError('Invalid gate invocation inside gate definition.')
def p_gate_op_3(self, program):
program[0] = node.CustomUnitary([program[1], program[4]])
self.verify_as_gate(program[1], program[4])
self.verify_bit_list(program[4])
self.verify_distinct([program[4]])
def p_gate_op_4(self, program):
program[0] = node.CustomUnitary([program[1], program[3], program[5]])
self.verify_as_gate(program[1], program[5], arglist=program[3])
self.verify_bit_list(program[5])
self.verify_exp_list(program[3])
self.verify_distinct([program[5]])
def p_gate_op_4e0(self, program):
raise QasmError(('Invalid bit list inside gate definition or' + " missing ';'"))
def p_gate_op_4e1(self, program):
raise QasmError(('Unmatched () for gate invocation inside gate' + ' invocation.'))
def p_gate_op_5(self, program):
program[0] = node.Barrier([program[2]])
self.verify_bit_list(program[2])
self.verify_distinct([program[2]])
def p_gate_op_5e(self, program):
raise QasmError('Invalid barrier inside gate definition.')
def p_opaque_0(self, program):
program[0] = node.Opaque([program[2], program[4]])
if (program[2].name in self.external_functions):
raise QasmError(((('OPAQUE names cannot be reserved words. ' + "Received '") + program[2].name) + "'"))
self.pop_scope()
self.update_symtab(program[0])
def p_opaque_1(self, program):
program[0] = node.Opaque([program[2], program[6]])
self.pop_scope()
self.update_symtab(program[0])
def p_opaque_2(self, program):
program[0] = node.Opaque([program[2], program[5], program[7]])
if (program[2].name in self.external_functions):
raise QasmError(((('OPAQUE names cannot be reserved words. ' + "Received '") + program[2].name) + "'"))
self.pop_scope()
self.update_symtab(program[0])
def p_opaque_1e(self, program):
raise QasmError('Poorly formed OPAQUE statement.')
def p_measure(self, program):
program[0] = node.Measure([program[2], program[4]])
self.verify_reg(program[2], 'qreg')
self.verify_reg(program[4], 'creg')
def p_measure_e(self, program):
raise QasmError(('Illegal measure statement.' + str(program[3].value)))
def p_barrier(self, program):
program[0] = node.Barrier([program[2]])
self.verify_reg_list(program[2], 'qreg')
self.verify_distinct([program[2]])
def p_reset(self, program):
program[0] = node.Reset([program[2]])
self.verify_reg(program[2], 'qreg')
def p_if(self, program):
if (len(program) == 3):
raise QasmError(('Ill-formed IF statement. Perhaps a' + " missing '('?"))
if (len(program) == 5):
raise QasmError((("Ill-formed IF statement. Expected '==', " + "received '") + str(program[4].value)))
if (len(program) == 6):
raise QasmError((('Ill-formed IF statement. Expected a number, ' + "received '") + str(program[5].value)))
if (len(program) == 7):
raise QasmError("Ill-formed IF statement, unmatched '('")
if (program[7].type == 'if'):
raise QasmError('Nested IF statements not allowed')
if (program[7].type == 'barrier'):
raise QasmError('barrier not permitted in IF statement')
program[0] = node.If([program[3], node.Int(program[5]), program[7]])
def p_quantum_op(self, program):
program[0] = program[1]
def p_unary_0(self, program):
program[0] = node.Int(program[1])
def p_unary_1(self, program):
program[0] = node.Real(sympy.Number(program[1]))
def p_unary_2(self, program):
program[0] = node.Real(sympy.pi)
def p_unary_3(self, program):
program[0] = program[1]
def p_unary_4(self, program):
program[0] = program[2]
def p_unary_6(self, program):
if (program[1].name not in self.external_functions):
raise QasmError('Illegal external function call: ', str(program[1].name))
program[0] = node.External([program[1], program[3]])
def p_expression_1(self, program):
program[0] = node.Prefix([node.UnaryOperator(program[1]), program[2]])
def p_expression_0(self, program):
program[0] = node.BinaryOp([node.BinaryOperator(program[2]), program[1], program[3]])
def p_expression_2(self, program):
program[0] = program[1]
def p_exp_list_0(self, program):
program[0] = node.ExpressionList([program[1]])
def p_exp_list_1(self, program):
program[0] = program[1]
program[0].add_child(program[3])
def p_ignore(self, program):
pass
def p_error(self, program):
if (not program):
raise QasmError(('Error at end of file. ' + "Perhaps there is a missing ';'"))
col = self.find_column(self.lexer.data, program)
print('Error near line', str(self.lexer.lineno), 'Column', col)
def find_column(self, input_, token):
if (token is None):
return 0
last_cr = input_.rfind('\n', 0, token.lexpos)
if (last_cr < 0):
last_cr = 0
column = ((token.lexpos - last_cr) + 1)
return column
def get_tokens(self):
try:
while True:
token = self.lexer.token()
if (not token):
break
(yield token)
except QasmError as e:
print('Exception tokenizing qasm file:', e.msg)
def parse_debug(self, val):
if (val is True):
self.parse_deb = True
elif (val is False):
self.parse_deb = False
else:
raise QasmError((("Illegal debug value '" + str(val)) + "' must be True or False."))
def parse(self, data):
self.parser.parse(data, lexer=self.lexer, debug=self.parse_deb)
if (self.qasm is None):
raise QasmError(('Uncaught exception in parser; ' + 'see previous messages for details.'))
return self.qasm
def print_tree(self):
if (self.qasm is not None):
self.qasm.to_string(0)
else:
print('No parsed qasm to print')
def run(self, data):
ast = self.parser.parse(data, debug=True)
self.parser.parse(data, debug=True)
ast.to_string(0) |
class SemanticSegmentationModelOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None |
def mit_convert(ckpt):
new_ckpt = OrderedDict()
for (k, v) in ckpt.items():
if k.startswith('head'):
continue
elif k.startswith('patch_embed'):
stage_i = int(k.split('.')[0].replace('patch_embed', ''))
new_k = k.replace(f'patch_embed{stage_i}', f'layers.{(stage_i - 1)}.0')
new_v = v
if ('proj.' in new_k):
new_k = new_k.replace('proj.', 'projection.')
elif k.startswith('block'):
stage_i = int(k.split('.')[0].replace('block', ''))
new_k = k.replace(f'block{stage_i}', f'layers.{(stage_i - 1)}.1')
new_v = v
if ('attn.q.' in new_k):
sub_item_k = k.replace('q.', 'kv.')
new_k = new_k.replace('q.', 'attn.in_proj_')
new_v = torch.cat([v, ckpt[sub_item_k]], dim=0)
elif ('attn.kv.' in new_k):
continue
elif ('attn.proj.' in new_k):
new_k = new_k.replace('proj.', 'attn.out_proj.')
elif ('attn.sr.' in new_k):
new_k = new_k.replace('sr.', 'sr.')
elif ('mlp.' in new_k):
string = f'{new_k}-'
new_k = new_k.replace('mlp.', 'ffn.layers.')
if (('fc1.weight' in new_k) or ('fc2.weight' in new_k)):
new_v = v.reshape((*v.shape, 1, 1))
new_k = new_k.replace('fc1.', '0.')
new_k = new_k.replace('dwconv.dwconv.', '1.')
new_k = new_k.replace('fc2.', '4.')
string += f'{new_k} {v.shape}-{new_v.shape}'
elif k.startswith('norm'):
stage_i = int(k.split('.')[0].replace('norm', ''))
new_k = k.replace(f'norm{stage_i}', f'layers.{(stage_i - 1)}.2')
new_v = v
else:
new_k = k
new_v = v
new_ckpt[new_k] = new_v
return new_ckpt |
class LocalSearch(Algorithm[(S, R)], threading.Thread):
def __init__(self, problem: Problem[S], mutation: Mutation, termination_criterion: TerminationCriterion=store.default_termination_criteria, comparator: Comparator=store.default_comparator):
super(LocalSearch, self).__init__()
self.comparator = comparator
self.problem = problem
self.mutation = mutation
self.termination_criterion = termination_criterion
self.observable.register(termination_criterion)
def create_initial_solutions(self) -> List[S]:
self.solutions.append(self.problem.create_solution())
return self.solutions
def evaluate(self, solutions: List[S]) -> List[S]:
return [self.problem.evaluate(solutions[0])]
def stopping_condition_is_met(self) -> bool:
return self.termination_criterion.is_met
def init_progress(self) -> None:
self.evaluations = 0
def step(self) -> None:
mutated_solution = copy.deepcopy(self.solutions[0])
mutated_solution: Solution = self.mutation.execute(mutated_solution)
mutated_solution = self.evaluate([mutated_solution])[0]
result = self.comparator.compare(mutated_solution, self.solutions[0])
if (result == (- 1)):
self.solutions[0] = mutated_solution
elif (result == 1):
pass
elif (random.random() < 0.5):
self.solutions[0] = mutated_solution
def update_progress(self) -> None:
self.evaluations += 1
observable_data = self.observable_data()
self.observable.notify_all(**observable_data)
def observable_data(self) -> dict:
ctime = (time.time() - self.start_computing_time)
return {'PROBLEM': self.problem, 'EVALUATIONS': self.evaluations, 'SOLUTIONS': self.get_result(), 'COMPUTING_TIME': ctime}
def get_result(self) -> R:
return self.solutions[0]
def get_name(self) -> str:
return 'LS' |
_searchspace('continuous')
class ContinuousSearchSpace(BaseSearchSpace):
def __init__(self, bound, interval=None, value=None, type=None):
super().__init__(bound, interval, value, 'continuous')
def get_value(self):
if (self.bound[1] > 1):
int_num = random.randrange(int(self.bound[0]), (int(self.bound[1]) + 1))
else:
int_num = 0
while True:
value = (random.random() * self.bound[1])
value = (int_num + value)
if ((value > self.bound[0]) and (value < self.bound[1])):
break
return value |
def output_real_images(dataloader, num_imgs, real_dir):
img_counter = 0
batch_size = dataloader.batch_size
dataloader = iter(dataloader)
for i in range((num_imgs // batch_size)):
(real_imgs, _) = next(dataloader)
for img in real_imgs:
save_image(img, os.path.join(real_dir, f'{img_counter:0>5}.jpg'), normalize=True, range=((- 1), 1))
img_counter += 1 |
def load_from_pretrained(args, pretrained_model_name_or_path):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, num_labels=(args.num_labels if hasattr(args, 'num_labels') else None), finetuning_task=args.task_name.lower(), cache_dir=args.cache_dir)
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, use_fast=args.use_fast_tokenizer, cache_dir=args.cache_dir)
model = AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, ignore_mismatched_sizes=True, cache_dir=args.cache_dir)
return (config, tokenizer, model) |
class BaseModel():
def __init__(self, opt):
self.opt = opt
self.device = torch.device(('cuda' if (opt['num_gpu'] != 0) else 'cpu'))
self.is_train = opt['is_train']
self.schedulers = []
self.optimizers = []
def feed_data(self, data):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
pass
def save(self, epoch, current_iter):
pass
def validation(self, dataloader, current_iter, tb_logger, save_img=False):
if self.opt['dist']:
self.dist_validation(dataloader, current_iter, tb_logger, save_img)
else:
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def get_current_log(self):
return self.log_dict
def model_to_device(self, net):
net = net.to(self.device)
if self.opt['dist']:
find_unused_parameters = self.opt.get('find_unused_parameters', False)
net = DistributedDataParallel(net, device_ids=[torch.cuda.current_device()], find_unused_parameters=find_unused_parameters)
elif (self.opt['num_gpu'] > 1):
net = DataParallel(net)
return net
def setup_schedulers(self):
train_opt = self.opt['train']
scheduler_type = train_opt['scheduler'].pop('type')
if (scheduler_type in ['MultiStepLR', 'MultiStepRestartLR']):
for optimizer in self.optimizers:
self.schedulers.append(lr_scheduler.MultiStepRestartLR(optimizer, **train_opt['scheduler']))
elif (scheduler_type == 'CosineAnnealingRestartLR'):
for optimizer in self.optimizers:
self.schedulers.append(lr_scheduler.CosineAnnealingRestartLR(optimizer, **train_opt['scheduler']))
else:
raise NotImplementedError(f'Scheduler {scheduler_type} is not implemented yet.')
def get_bare_model(self, net):
if isinstance(net, (DataParallel, DistributedDataParallel)):
net = net.module
return net
_only
def print_network(self, net):
if isinstance(net, (DataParallel, DistributedDataParallel)):
net_cls_str = f'{net.__class__.__name__} - {net.module.__class__.__name__}'
else:
net_cls_str = f'{net.__class__.__name__}'
net = self.get_bare_model(net)
net_str = str(net)
net_params = sum(map((lambda x: x.numel()), net.parameters()))
logger.info(f'Network: {net_cls_str}, with parameters: {net_params:,d}')
logger.info(net_str)
def _set_lr(self, lr_groups_l):
for (optimizer, lr_groups) in zip(self.optimizers, lr_groups_l):
for (param_group, lr) in zip(optimizer.param_groups, lr_groups):
param_group['lr'] = lr
def _get_init_lr(self):
init_lr_groups_l = []
for optimizer in self.optimizers:
init_lr_groups_l.append([v['initial_lr'] for v in optimizer.param_groups])
return init_lr_groups_l
def update_learning_rate(self, current_iter, warmup_iter=(- 1)):
if (current_iter > 1):
for scheduler in self.schedulers:
scheduler.step()
if (current_iter < warmup_iter):
init_lr_g_l = self._get_init_lr()
warm_up_lr_l = []
for init_lr_g in init_lr_g_l:
warm_up_lr_l.append([((v / warmup_iter) * current_iter) for v in init_lr_g])
self._set_lr(warm_up_lr_l)
def get_current_learning_rate(self):
return [param_group['lr'] for param_group in self.optimizers[0].param_groups]
_only
def save_network(self, net, net_label, current_iter, param_key='params'):
if (current_iter == (- 1)):
current_iter = 'latest'
save_filename = f'{net_label}_{current_iter}.pth'
save_path = os.path.join(self.opt['path']['models'], save_filename)
net = (net if isinstance(net, list) else [net])
param_key = (param_key if isinstance(param_key, list) else [param_key])
assert (len(net) == len(param_key)), 'The lengths of net and param_key should be the same.'
save_dict = {}
for (net_, param_key_) in zip(net, param_key):
net_ = self.get_bare_model(net_)
state_dict = net_.state_dict()
for (key, param) in state_dict.items():
if key.startswith('module.'):
key = key[7:]
state_dict[key] = param.cpu()
save_dict[param_key_] = state_dict
torch.save(save_dict, save_path)
def _print_different_keys_loading(self, crt_net, load_net, strict=True):
crt_net = self.get_bare_model(crt_net)
crt_net = crt_net.state_dict()
crt_net_keys = set(crt_net.keys())
load_net_keys = set(load_net.keys())
if (crt_net_keys != load_net_keys):
logger.warning('Current net - loaded net:')
for v in sorted(list((crt_net_keys - load_net_keys))):
logger.warning(f' {v}')
logger.warning('Loaded net - current net:')
for v in sorted(list((load_net_keys - crt_net_keys))):
logger.warning(f' {v}')
if (not strict):
common_keys = (crt_net_keys & load_net_keys)
for k in common_keys:
if (crt_net[k].size() != load_net[k].size()):
logger.warning(f'Size different, ignore [{k}]: crt_net: {crt_net[k].shape}; load_net: {load_net[k].shape}')
load_net[(k + '.ignore')] = load_net.pop(k)
def load_network(self, net, load_path, strict=True, param_key='params'):
net = self.get_bare_model(net)
logger.info(f'Loading {net.__class__.__name__} model from {load_path}.')
load_net = torch.load(load_path, map_location=(lambda storage, loc: storage))[param_key]
for (k, v) in deepcopy(load_net).items():
if k.startswith('module.'):
load_net[k[7:]] = v
load_net.pop(k)
self._print_different_keys_loading(net, load_net, strict)
net.load_state_dict(load_net, strict=strict)
_only
def save_training_state(self, epoch, current_iter):
if (current_iter != (- 1)):
state = {'epoch': epoch, 'iter': current_iter, 'optimizers': [], 'schedulers': []}
for o in self.optimizers:
state['optimizers'].append(o.state_dict())
for s in self.schedulers:
state['schedulers'].append(s.state_dict())
save_filename = f'{current_iter}.state'
save_path = os.path.join(self.opt['path']['training_states'], save_filename)
torch.save(state, save_path)
def resume_training(self, resume_state):
resume_optimizers = resume_state['optimizers']
resume_schedulers = resume_state['schedulers']
assert (len(resume_optimizers) == len(self.optimizers)), 'Wrong lengths of optimizers'
assert (len(resume_schedulers) == len(self.schedulers)), 'Wrong lengths of schedulers'
for (i, o) in enumerate(resume_optimizers):
self.optimizers[i].load_state_dict(o)
for (i, s) in enumerate(resume_schedulers):
self.schedulers[i].load_state_dict(s)
def reduce_loss_dict(self, loss_dict):
with torch.no_grad():
if self.opt['dist']:
keys = []
losses = []
for (name, value) in loss_dict.items():
keys.append(name)
losses.append(value)
losses = torch.stack(losses, 0)
torch.distributed.reduce(losses, dst=0)
if (self.opt['rank'] == 0):
losses /= self.opt['world_size']
loss_dict = {key: loss for (key, loss) in zip(keys, losses)}
log_dict = OrderedDict()
for (name, value) in loss_dict.items():
log_dict[name] = value.mean().item()
return log_dict |
class MSC(nn.Module):
def __init__(self, base, scales=None):
super(MSC, self).__init__()
self.base = base
if scales:
self.scales = scales
else:
self.scales = [0.5, 0.75]
def forward(self, x):
import pdb
logits = self.base(x)
return logits |
class Xception(nn.Module):
def __init__(self, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'):
super(Xception, self).__init__()
self.drop_rate = drop_rate
self.global_pool = global_pool
self.num_classes = num_classes
self.num_features = 2048
self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.block1 = Block(64, 128, 2, 2, start_with_relu=False, grow_first=True)
self.block2 = Block(128, 256, 2, 2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, 2, 2, start_with_relu=True, grow_first=True)
self.block4 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 1024, 2, 2, start_with_relu=True, grow_first=False)
self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1)
self.bn3 = nn.BatchNorm2d(1536)
self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1)
self.bn4 = nn.BatchNorm2d(self.num_features)
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Linear((self.num_features * self.global_pool.feat_mult()), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
if num_classes:
num_features = (self.num_features * self.global_pool.feat_mult())
self.fc = nn.Linear(num_features, num_classes)
else:
self.fc = nn.Identity()
def forward_features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x).flatten(1)
if self.drop_rate:
F.dropout(x, self.drop_rate, training=self.training)
x = self.fc(x)
return x |
class TestPolicy(unittest.TestCase):
def setUp(self):
sess = tf.get_default_session()
if (sess is None):
tf.InteractiveSession()
def test_output_sym(self):
with tf.Session() as sess:
obs_dim = 23
action_dim = 7
self.env = DummyEnv(obs_dim, action_dim)
self.policy = GaussianMLPPolicy(obs_dim, action_dim, name='test_policy_output_sym', hidden_sizes=(64, 64))
obs_ph_1 = tf.placeholder(dtype=tf.float32, name='obs_ph_1', shape=((None,) + self.env.observation_space.shape))
output_sym_1 = self.policy.distribution_info_sym(obs_ph_1)
sess.run(tf.global_variables_initializer())
n_obs = self.env.get_obs(n=100)
(action, agent_infos) = self.policy.get_actions(n_obs)
agent_infos_output_sym = sess.run(output_sym_1, feed_dict={obs_ph_1: n_obs})
for k in agent_infos.keys():
self.assertTrue(np.allclose(agent_infos[k], agent_infos_output_sym[k], rtol=1e-05, atol=1e-05))
def test_get_action(self):
with tf.Session() as sess:
obs_dim = 23
action_dim = 7
self.env = DummyEnv(obs_dim, action_dim)
self.policy = GaussianMLPPolicy(obs_dim, action_dim, name='test_policy_get_action', hidden_sizes=(64, 64))
sess.run(tf.global_variables_initializer())
obs = self.env.get_obs()
(action, agent_infos) = self.policy.get_action(obs)
(actions, agents_infos) = self.policy.get_actions(np.expand_dims(obs, 0))
for k in agent_infos.keys():
self.assertTrue(np.allclose(agent_infos[k], agents_infos[k], rtol=1e-05, atol=1e-05))
def testSerialize1(self):
obs_dim = 23
action_dim = 7
self.env = DummyEnv(obs_dim, action_dim)
self.policy = GaussianMLPPolicy(obs_dim, action_dim, name='test_policy_serialize', hidden_sizes=(64, 64))
sess = tf.get_default_session()
sess.run(tf.global_variables_initializer())
all_param_values = self.policy.get_param_values()
self.policy.set_params(all_param_values)
def testSerialize2(self):
obs_dim = 2
action_dim = 7
env = DummyEnv(obs_dim, action_dim)
policy = GaussianMLPPolicy(obs_dim, action_dim, name='test_policy_serialize2', hidden_sizes=(54, 23))
sess = tf.get_default_session()
sess.run(tf.global_variables_initializer())
obs = env.get_obs()
(_, pre_agent_infos) = policy.get_action(obs)
pkl_str = pickle.dumps(policy)
tf.reset_default_graph()
with tf.Session() as sess:
policy_unpickled = pickle.loads(pkl_str)
(_, post_agent_infos) = policy_unpickled.get_action(obs)
for key in pre_agent_infos.keys():
self.assertTrue(np.allclose(pre_agent_infos[key], post_agent_infos[key])) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def prep_command_tokens(tokenlist, token_format=token_format):
return [CommandToken(tok[0], token_format.format(tok[0]), tok[1]) for tok in tokenlist] |
class ShuffleNet(nn.Module):
def __init__(self, channels, init_block_channels, groups, in_channels=3, in_size=(224, 224), num_classes=1000):
super(ShuffleNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module('init_block', ShuffleInitBlock(in_channels=in_channels, out_channels=init_block_channels))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels):
stage = nn.Sequential()
for (j, out_channels) in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = ((i == 0) and (j == 0))
stage.add_module('unit{}'.format((j + 1)), ShuffleUnit(in_channels=in_channels, out_channels=out_channels, groups=groups, downsample=downsample, ignore_group=ignore_group))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=7, stride=1))
self.output = nn.Linear(in_features=in_channels, out_features=num_classes)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.output(x)
return x |
def contrastive_loss(labels, embeddings_anchor, embeddings_positive, margin=1.0):
distances = math_ops.sqrt(math_ops.reduce_sum(math_ops.square((embeddings_anchor - embeddings_positive)), 1))
return math_ops.reduce_mean(((math_ops.to_float(labels) * math_ops.square(distances)) + ((1.0 - math_ops.to_float(labels)) * math_ops.square(math_ops.maximum((margin - distances), 0.0)))), name='contrastive_loss') |
def setup_task(cfg):
assert ('task' in cfg.run_cfg), 'Task name must be provided.'
task_name = cfg.run_cfg.task
task = registry.get_task_class(task_name).setup_task(cfg=cfg)
assert (task is not None), 'Task {} not properly registered.'.format(task_name)
return task |
def imagenet_dino_small_pretrained(output_dim):
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, num_classes=output_dim)
model = load_dino_model('/scratch/nvg7279/dino_models/dino_deitsmall16_pretrain.pth', **model_kwargs)
return _vit_replace_fc(model, output_dim) |
.dataclass
class FlaxSeq2SeqSequenceClassifierOutput(ModelOutput):
logits: jnp.ndarray = None
past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
decoder_attentions: Optional[Tuple[jnp.ndarray]] = None
cross_attentions: Optional[Tuple[jnp.ndarray]] = None
encoder_last_hidden_state: Optional[jnp.ndarray] = None
encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
encoder_attentions: Optional[Tuple[jnp.ndarray]] = None |
def pidfile_taken(path, verbose=False, force=False):
try:
os.makedirs(os.path.dirname(path), exist_ok=True)
fd = os.open(path, ((os.O_CREAT | os.O_EXCL) | os.O_RDWR))
except OSError as e:
if (e.errno == errno.EEXIST):
conflicter = 'race'
try:
with open(path, 'r') as lockfile:
conflicter = (lockfile.read().strip() or 'empty')
except:
pass
if force:
if verbose:
print(('Removing %s from %s' % (path, conflicter)))
os.remove(path)
return pidfile_taken(path, verbose=verbose, force=False)
if verbose:
print(('%s held by %s' % (path, conflicter)))
return conflicter
else:
raise
lockfile = os.fdopen(fd, 'r+')
atexit.register(delete_pidfile, lockfile, path)
lockfile.write(('%%s %s\n' % (os.getpid(), socket.gethostname(), os.getenv('STY', ''))))
lockfile.flush()
os.fsync(lockfile)
return None |
class NullMutation(Mutation[Solution]):
def __init__(self):
super(NullMutation, self).__init__(probability=0)
def execute(self, solution: Solution) -> Solution:
return solution
def get_name(self):
return 'Null mutation' |
class VocabUtility():
def vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank, world_size):
index_f = (rank * per_partition_vocab_size)
index_l = (index_f + per_partition_vocab_size)
return (index_f, index_l)
def vocab_range_from_global_vocab_size(global_vocab_size, rank, world_size):
per_partition_vocab_size = divide(global_vocab_size, world_size)
return VocabUtility.vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank, world_size) |
.skipif((Box2D is None), reason='Box2D not installed')
def test_lunar_lander():
_test_lander(LunarLander(), seed=0) |
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if (step is not None):
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for (k, v) in kwargs.items():
if (v is None):
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(((head + '/') + k), v, (self.step if (step is None) else step))
def flush(self):
self.writer.flush() |
def tensor_size(array):
if is_numpy_array(array):
return np.size(array)
elif is_torch_tensor(array):
return array.numel()
elif is_tf_tensor(array):
import tensorflow as tf
return tf.size(array)
elif is_jax_tensor(array):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(array)}.') |
def auto_augment_transform(config_str, hparams):
config = config_str.split('-')
policy_name = config[0]
config = config[1:]
for c in config:
cs = re.split('(\\d.*)', c)
if (len(cs) < 2):
continue
(key, val) = cs[:2]
if (key == 'mstd'):
hparams.setdefault('magnitude_std', float(val))
else:
assert False, 'Unknown AutoAugment config section'
aa_policy = auto_augment_policy(policy_name, hparams=hparams)
return AutoAugment(aa_policy) |
def analyze(prediction_file, gold_file):
with open(prediction_file) as f:
prediction = json.load(f)
with open(gold_file) as f:
gold = json.load(f)
metrics = {'em': 0, 'f1': 0, 'prec': 0, 'recall': 0, 'sp_em': 0, 'sp_f1': 0, 'sp_prec': 0, 'sp_recall': 0, 'joint_em': 0, 'joint_f1': 0, 'joint_prec': 0, 'joint_recall': 0}
for dp in gold:
cur_id = dp['_id']
(em, prec, recall) = update_answer(metrics, prediction['answer'][cur_id], dp['answer'])
if ((prec + recall) == 0):
f1 = 0
else:
f1 = (((2 * prec) * recall) / (prec + recall))
print(dp['answer'], prediction['answer'][cur_id])
print(f1, em)
a = input() |
def test_nms_device_and_dtypes_cpu():
iou_thr = 0.7
base_dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9], [49.3, 32.9, 51.0, 35.3, 0.9], [35.3, 11.5, 39.9, 14.5, 0.4], [35.2, 11.7, 39.7, 15.7, 0.3]])
dets = base_dets.astype(np.float32)
(supressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == supressed.dtype)
assert (len(inds) == len(supressed) == 3)
dets = torch.FloatTensor(base_dets)
(surpressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == surpressed.dtype)
assert (len(inds) == len(surpressed) == 3)
dets = base_dets.astype(np.float64)
(supressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == supressed.dtype)
assert (len(inds) == len(supressed) == 3)
dets = torch.DoubleTensor(base_dets)
(surpressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == surpressed.dtype)
assert (len(inds) == len(surpressed) == 3) |
def process_tokens(temp_tokens):
tokens = []
for token in temp_tokens:
flag = False
l = ('-', '', '', '', '/', '~', '"', "'", '', '', '', '', '')
to_append = re.split('([{}])'.format(''.join(l)), token)
for t in to_append:
if (t != ''):
tokens.append(t)
return tokens |
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
def multi_tpu_eval_steps_on_single_shard():
loop_vars = [_ZERO_LOSS]
if (model_fn_wrapper._eval_cache_fn is not None):
batch_size = ctx.global_batch_size
num_shards = ctx._config._tpu_config.num_shards
loop_vars += model_fn_wrapper._eval_cache_fn((batch_size // num_shards))
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step, loop_vars)
ret = tpu.shard(multi_tpu_eval_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment)
loss = ret[0]
scaffold = _get_scaffold(captured_scaffold_fn)
return (loss, host_calls, scaffold, captured_eval_hooks.get()) |
class FurthestPointSamplingWithDist(Function):
def forward(ctx, points_dist: torch.Tensor, num_points: int) -> torch.Tensor:
assert points_dist.is_contiguous()
(B, N, _) = points_dist.size()
output = points_dist.new_zeros([B, num_points], dtype=torch.int32)
temp = points_dist.new_zeros([B, N]).fill_(.0)
ext_module.furthest_point_sampling_with_dist_forward(points_dist, temp, output, b=B, n=N, m=num_points)
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(output)
return output
def backward(xyz, a=None):
return (None, None) |
def test_double_Laurent_polynomial(vrblvl=0):
set_double_Laurent_dimension(2, vrblvl)
dim = get_double_Laurent_dimension(vrblvl)
print('the dimension :', dim)
org = 'x*y^(-3) - 1;'
idx = 1
set_double_Laurent_polynomial(idx, dim, org, vrblvl)
pol = get_double_Laurent_polynomial(idx, vrblvl)
print('the retrieved polynomial :', pol)
smb = string_of_symbols(100, vrblvl)
print('the list of symbols :', smb)
return int((len(smb) != 2)) |
def linspace(start: torch.Tensor, stop: torch.Tensor, num: int):
steps = (torch.arange(num, dtype=torch.float32, device=start.device) / (num - 1))
for i in range(start.ndim):
steps = steps.unsqueeze((- 1))
out = (start[None] + (steps * (stop - start)[None]))
return out |
class MPDataset(IterableDataset):
def __init__(self, root, transform=None, target_transform=None, top_k=(1, 5), keep_rgb: bool=False, shuffle: bool=False, num_gpus: int=1, rank_id: int=0, epoch: int=0, drop_last: bool=False):
super(MPDataset).__init__()
self.root = root
self.transform = transform
self.target_transform = target_transform
self.keep_rgb = keep_rgb
self.shuffle = shuffle
self.rank = rank_id
self.num_replicas = num_gpus
self.data_path = os.path.join(self.root, KEY_DATASET)
self.cls_path = os.path.join(self.root, KEY_CLASSES)
(self.classes, self.length) = get_base_info(self.cls_path, self.data_path)
self.sampler = build_sampler(self, num_gpus=self.num_replicas, random_sample=self.shuffle, rank_id=self.rank, drop_last=drop_last)
self.set_epoch(epoch)
self._update_evaluator(top_k)
def parse_file(self, img_list, label_list):
for (img_path, target) in zip(img_list, label_list):
image = default_loader(img_path, rgb=self.keep_rgb)
if (self.transform is not None):
image = self.transform(image)
if (self.target_transform is not None):
target = self.target_transform(target)
(yield (image, target))
def get_indices(self):
worker_info = torch.utils.data.get_worker_info()
if (worker_info is not None):
worker_id = worker_info.id
indices = self.indices[worker_id:self.indices_length:worker_info.num_workers]
else:
indices = self.indices
return indices
def __iter__(self):
indices = self.get_indices()
(img_list, label_list) = get_subset_data(self.data_path, indices)
assert (len(img_list) == len(label_list))
return iter(self.parse_file(img_list, label_list))
def __len__(self):
return (self.indices_length if (self.num_replicas > 1) else self.length)
def _update_evaluator(self, top_k):
self.evaluator = GeneralEvaluator(self.classes, top_k=top_k)
def set_epoch(self, epoch: int) -> None:
shuffle_dataset(self.sampler, epoch, self.shuffle)
self.indices = list(self.sampler)
self.indices_length = len(self.indices) |
def trigger_loss4(model, criterion, inputs, backdoor_inputs, backdoor_labels, pattern, extractor, device, grads):
convs = model.state_dict()['conv1.weight']
avg_convs = convs.mean([0])
w = convs[(0, ...)].size()[1]
assert (w == 7)
resize = transforms.Resize((w, w))
backdoor_conv_weight = resize(pattern)
(lmin, lmax) = (backdoor_conv_weight.min(), backdoor_conv_weight.max())
(cmin, cmax) = (convs.min(), convs.max())
backdoor_conv_weight = ((((backdoor_conv_weight - lmin) / (lmax - lmin)) * (cmax - cmin)) + cmin)
backdoor_conv = nn.Conv2d(3, 1, kernel_size=7, stride=2, padding=3, bias=False).to(device)
backdoor_conv.weight = torch.nn.Parameter(backdoor_conv_weight.unsqueeze(0))
backdoor_norm = nn.BatchNorm2d(1).to(device)
backdoor_relu = nn.ReLU(inplace=True).to(device)
backdoor_activations = backdoor_relu(backdoor_norm(backdoor_conv(backdoor_inputs))).mean([0, 1])
clean_outputs = model(backdoor_inputs)
clean_activations = extractor._extracted_activations['relu'].clone().detach().mean([0, 1])
activations = (backdoor_activations - clean_activations)
euclid_dist = (backdoor_conv_weight - avg_convs)
rewards = (torch.sum((activations * activations)) - (0.002 * torch.sum((euclid_dist * euclid_dist))))
if grads:
grads = torch.autograd.grad(rewards, pattern, retain_graph=True)
return (rewards, grads) |
class BasicBlock(nn.Module):
expansion = 1
num_layers = 2
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def block_conv_info(self):
block_kernel_sizes = [3, 3]
block_strides = [self.stride, 1]
block_paddings = [1, 1]
return (block_kernel_sizes, block_strides, block_paddings) |
def isclose(a, b, rel_tol=1e-05, abs_tol=0.0):
return (abs((a - b)) <= max((rel_tol * max(abs(a), abs(b))), abs_tol)) |
def load_embedding(dataset: str, architecture: str, seed: int, step: int, layer: int) -> np.ndarray:
folder_path = get_embedding_folder(dataset, architecture, seed, step, layer)
if (not os.path.exists(folder_path)):
print('Computing representations for model')
os.makedirs(folder_path)
rep = compute_embeddings(dataset, architecture, seed, step, layer)
else:
print('Representation already exists...loading...')
rep = np.load((folder_path / pathlib.Path('rep.npy')))
return rep |
def export_saved_model(self, export_dir_base, serving_input_receiver_fn, assets_extra=None, as_text=False, checkpoint_path=None, experimental_mode=ModeKeys.PREDICT, save_incr_model=True):
if (not serving_input_receiver_fn):
raise ValueError('An input_receiver_fn must be defined.')
input_receiver_fn_map = {experimental_mode: serving_input_receiver_fn}
return self._export_all_saved_models(export_dir_base, input_receiver_fn_map, assets_extra=assets_extra, as_text=as_text, checkpoint_path=checkpoint_path, strip_default_attrs=True, save_incr_model=save_incr_model) |
def main(args):
if (args.buffer_size < 1):
args.buffer_size = 1
if ((args.max_tokens is None) and (args.max_sentences is None)):
args.max_sentences = 1
assert ((not args.sampling) or (args.nbest == args.beam)), '--sampling requires --nbest to be equal to --beam'
assert ((not args.max_sentences) or (args.max_sentences <= args.buffer_size)), '--max-sentences/--batch-size cannot be larger than --buffer-size'
use_cuda = (torch.cuda.is_available() and (not args.cpu))
task = tasks.setup_task(args)
print('| loading model(s) from {}'.format(args.path))
model_paths = args.path.split(':')
(models, model_args) = utils.load_ensemble_for_inference(model_paths, task, model_arg_overrides=eval(args.model_overrides))
tgt_dict = task.target_dictionary
for model in models:
model.make_generation_fast_(beamable_mm_beam_size=(None if args.no_beamable_mm else args.beam), need_attn=args.print_alignment)
if args.fp16:
model.half()
translator = SequenceGenerator(models, tgt_dict, beam_size=args.beam, minlen=args.min_len, stop_early=(not args.no_early_stop), normalize_scores=(not args.unnormalized), len_penalty=args.lenpen, unk_penalty=args.unkpen, sampling=args.sampling, sampling_topk=args.sampling_topk, sampling_temperature=args.sampling_temperature, diverse_beam_groups=args.diverse_beam_groups, diverse_beam_strength=args.diverse_beam_strength)
if use_cuda:
translator.cuda()
align_dict = utils.load_align_dict(args.replace_unk)
def make_result(src_str, hypos):
result = Translation(src_str='O\t{}'.format(src_str), hypos=[], pos_scores=[], alignments=[])
for hypo in hypos[:min(len(hypos), args.nbest)]:
(hypo_tokens, hypo_str, alignment) = utils.post_process_prediction(hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=(hypo['alignment'].int().cpu() if (hypo['alignment'] is not None) else None), align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe)
result.hypos.append('H\t{}\t{}'.format(hypo['score'], hypo_str))
result.pos_scores.append('P\t{}'.format(' '.join(map((lambda x: '{:.4f}'.format(x)), hypo['positional_scores'].tolist()))))
result.alignments.append(('A\t{}'.format(' '.join(map((lambda x: str(utils.item(x))), alignment))) if args.print_alignment else None))
return result
def process_batch(batch):
tokens = batch.tokens
lengths = batch.lengths
if use_cuda:
tokens = tokens.cuda()
lengths = lengths.cuda()
encoder_input = {'src_tokens': tokens, 'src_lengths': lengths}
translations = translator.generate(encoder_input, maxlen=int(((args.max_len_a * tokens.size(1)) + args.max_len_b)))
return [make_result(batch.srcs[i], t) for (i, t) in enumerate(translations)]
def translate_one(source, args, task, max_positions):
for (batch, batch_indices) in make_batches([source], args, task, max_positions):
result = process_batch(batch)[0]
for (hypo, pos_scores, align) in zip(result.hypos, result.pos_scores, result.alignments):
hypo = hypo.split('\t')[(- 1)]
break
return hypo
def process_unrolled(steps, args, task, max_positions):
is_long = False
pairs = []
collect_outcomes = dict()
for (source, target) in steps:
for (i, token) in enumerate(source):
if ('*' in token):
source[i] = collect_outcomes[token]
source2 = []
for token in source:
if (type(token) != list):
source2.append(token)
else:
source2.extend(token)
source = source2
source = ' '.join(source)
predicted_target = translate_one(source, args, task, max_positions)
pairs.append((source, predicted_target))
if ('*' in target[0]):
collect_outcomes[target[0]] = predicted_target
else:
return (predicted_target, pairs)
logging.error('Unrolling stopped prematurely.')
max_positions = utils.resolve_max_positions(task.max_positions(), *[model.max_positions() for model in models])
print(translate_one('copy G17 P19 Z18 E1 S13 J15 A3 A3', args, task, max_positions))
data = []
with open(args.src) as f:
sample = {'unrolled': [], 'original': ('', '')}
for line in f:
[line_type, source, target] = line.split('\t')
source = source.strip().split()
target = target.strip().split()
if (line_type == 'unrolled'):
sample[line_type].append((source, target))
else:
sample[line_type] = (source, target)
data.append(sample)
sample = {'unrolled': [], 'original': ('', '')}
predictions_equal = []
scores_per_input_length = defaultdict(list)
scores_per_target_length = defaultdict(list)
all_pairs = []
random.shuffle(data)
for sample in tqdm(data[:1000]):
(unrolled_predicted, is_long, pairs) = process_unrolled(sample['unrolled'], args, task, max_positions)
(source, target) = sample['original']
target = ' '.join(target)
source = ' '.join(source)
original_predicted = translate_one(source, args, task, max_positions)
local_score = (original_predicted == unrolled_predicted)
all_pairs.append((pairs, local_score))
predictions_equal.append(local_score)
scores_per_input_length[len(source)].append(local_score)
scores_per_target_length[len(target)].append(local_score)
print(f'Localism {np.mean(predictions_equal)}')
with open('trace_localism.txt', 'w') as f:
for (pairs, score) in all_pairs:
f.write('\n')
for (s, t) in pairs:
f.write('{} -> {}\n'.format(s, t))
f.write('{}'.format(score)) |
def result(args, records):
print(' test-run information ')
print((('\x1b[1m\tModel\x1b[0m: \x1b[0;31m' + args.model) + '\x1b[0m'))
print((('\x1b[1m\tStage\x1b[0m: \x1b[0;31m' + args.stage) + '\x1b[0m'))
print((('\x1b[1m\tDataset\x1b[0m: \x1b[0;31m' + args.dataset) + '\x1b[0m'))
if args.cores:
print((('\x1b[1m\tCores\x1b[0m: \x1b[0;31m' + args.core) + '\x1b[0m'))
else:
core_num = (psutil.cpu_count(logical=False) * int(subprocess.getoutput('cat /proc/cpuinfo | grep "physical id" | sort -u | wc -l')))
print((('\x1b[1m\tCores\x1b[0m: \x1b[0;31m' + core_num) + '\x1b[0m'))
print((('\x1b[1m\tLookback\x1b[0m: \x1b[0;31m' + args.lookback) + '\x1b[0m'))
print((('\x1b[1m\tHorizon\x1b[0m: \x1b[0;31m' + args.horizon) + '\x1b[0m'))
if (args.stage == 'train'):
print('\n train result ')
print(f"[1m avg throughput[0m: [0;34m{records['train_throughput']}[0m")
print(' train result ')
elif (args.stage == 'latency'):
for framework in args.inference_framework:
print('\n {} latency result '.format(framework))
print('avg latency: {}ms'.format((records[(framework + '_latency')] * 1000)))
print('p50 latency: {}ms'.format((records[(framework + '_percentile_latency')][0] * 1000)))
print('p90 latency: {}ms'.format((records[(framework + '_percentile_latency')][1] * 1000)))
print('p95 latency: {}ms'.format((records[(framework + '_percentile_latency')][2] * 1000)))
print('p99 latency: {}ms'.format((records[(framework + '_percentile_latency')][3] * 1000)))
print(' {} latency result '.format(framework))
elif (args.stage == 'throughput'):
for framework in args.inference_framework:
print('\n {} throughput result '.format(framework))
print('avg throughput: {}'.format(records[(framework + '_infer_throughput')]))
print(' {} throughput result '.format(framework))
elif (args.stage == 'accuracy'):
print('\n accuracy result ')
for metric in args.metrics:
print('{}: {}'.format(metric, records[metric]))
print(' accuracy result ') |
def _copy_dir(files, run_dir):
src = os.path.join(run_dir, 'src')
if (not os.path.exists(src)):
os.makedirs(src)
for file_name in files:
if os.path.isdir(file_name):
shutil.copytree(file_name, os.path.join(src, file_name))
else:
shutil.copyfile(file_name, os.path.join(src, file_name)) |
def get_matched_ner_from_file(gold_file, pred_file, up_ignore_layer=0):
gold_lines = open(gold_file, encoding='utf-8').readlines()
pred_lines = open(pred_file, encoding='utf-8').readlines()
sentence_num = len(gold_lines)
assert (sentence_num == len(pred_lines))
gold_entity = []
pred_entity = []
match_entity = []
start_line = 0
end_line = (start_line + 1000000)
for idx in range(sentence_num):
if (idx >= end_line):
continue
if (idx < start_line):
continue
gold_filter_entity = filter_entity(get_ner_from_sentence(gold_lines[idx]), up_ignore_layer)
pred_filter_entity = filter_entity(get_ner_from_sentence(pred_lines[idx]), up_ignore_layer)
match = list(set(gold_filter_entity).intersection(set(pred_filter_entity)))
gold_entity += gold_filter_entity
pred_entity += pred_filter_entity
match_entity += match
return (gold_entity, pred_entity, match_entity) |
def dubbing_video(video_path, out_video_path, text_info, font_size=0.5, font_v_pos=0.95, font_color=(0, 0, 255)):
extract_audio(video_path, './temp_audio.wav')
video = cv2.VideoCapture(video_path)
frame_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = video.get(cv2.CAP_PROP_FPS)
output_video = cv2.VideoWriter('./temp_video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height))
current_frame = 0
while video.isOpened():
(ret, frame) = video.read()
if (not ret):
break
current_time = (current_frame / fps)
for (text_start, text_end, text) in text_info:
if (text_start <= current_time <= text_end):
text_position = (int((frame_width * 0.0)), int((frame_height * font_v_pos)))
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = font_size
font_color = font_color
line_type = 1
cv2.putText(frame, text, text_position, font, font_scale, font_color, line_type, cv2.LINE_AA)
output_video.write(frame)
current_frame += 1
video.release()
output_video.release()
combine_audio_video('./temp_video.mp4', './temp_audio.wav', out_video_path)
os.remove('./temp_video.mp4')
os.remove('./temp_audio.wav') |
def main(_run, seed, test_mode, evaluation_metric, minimize, total_trials, parameterization):
((train_dl, val_dl, test_dl), input_dim, output_dim, static_dim, model_interpolation, return_sequences) = load_data(test_mode=test_mode)
def train_evaluate(parameterization):
(model_params, trainer_params) = handle_parameterization(parameterization, _run)
(model, prepare_batch) = setup_model(input_dim, output_dim, static_dim, model_interpolation, **model_params, return_sequences=return_sequences, train_dl=train_dl)
try:
(model, results, _) = train(_run, model, train_dl, val_dl, test_dl, prepare_batch=prepare_batch, verbose=1)
metric_value = results['{}.val'.format(evaluation_metric)]
except:
metric_value = (1000 if minimize else 0)
ax_output = {evaluation_metric: (metric_value, 0.0)}
return ax_output
(_, v, experiment, _) = optimize(parameters=parameterization, evaluation_function=train_evaluate, experiment_name='hyperopt', objective_name=evaluation_metric, total_trials=total_trials, minimize=minimize, random_seed=seed)
(best_params, metric_info) = get_best_raw_objective_point(experiment)
metric = metric_info[evaluation_metric][0]
_run.info['best_parameters'] = undo_dunder(best_params)
_run.info[evaluation_metric] = metric |
def get_pyramidnet(blocks, alpha, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
if (blocks == 10):
layers = [1, 1, 1, 1]
elif (blocks == 12):
layers = [2, 1, 1, 1]
elif (blocks == 14):
layers = [2, 2, 1, 1]
elif (blocks == 16):
layers = [2, 2, 2, 1]
elif (blocks == 18):
layers = [2, 2, 2, 2]
elif (blocks == 34):
layers = [3, 4, 6, 3]
elif (blocks == 50):
layers = [3, 4, 6, 3]
elif (blocks == 101):
layers = [3, 4, 23, 3]
elif (blocks == 152):
layers = [3, 8, 36, 3]
elif (blocks == 200):
layers = [3, 24, 36, 3]
else:
raise ValueError('Unsupported ResNet with number of blocks: {}'.format(blocks))
init_block_channels = 64
growth_add = (float(alpha) / float(sum(layers)))
from functools import reduce
channels = reduce((lambda xi, yi: (xi + [[(((i + 1) * growth_add) + xi[(- 1)][(- 1)]) for i in list(range(yi))]])), layers, [[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if (blocks < 50):
bottleneck = False
else:
bottleneck = True
channels = [[(cij * 4) for cij in ci] for ci in channels]
net = PyramidNet(channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def const_or_evo_func(x):
if callable(x):
return x
else:
return (lambda y: ((y * 0) + x)) |
class FlaxBartForConditionalGeneration(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def training_loss_2nd_item_task(data, batch_index, model, sess, train_data, is_training):
train_loss = 0.0
num_batch = (data.oracle_num_items // setting.batch_size)
for index in batch_index:
(b_target_item, b_k_shot_user, b_second_order_items, b_third_order_users, b_oracle_item_ebd, b_mask_num_second_order_item, b_mask_num_third_order_user) = data.batch_gen_3rd_item_task(train_data, index)
feed_dict = {model.target_item: b_oracle_item_ebd, model.support_user_1st_pos: b_k_shot_user, model.training_phrase_user_task: is_training, model.support_item_2nd_pos: b_second_order_items, model.training_phrase_item_task: is_training}
train_loss += sess.run(model.loss_2nd_item_pos, feed_dict)
return (train_loss / num_batch) |
class AvgMeter(object):
def __init__(self, num=40):
self.num = num
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.losses = []
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
self.losses.append(val)
def show(self):
return torch.mean(torch.stack(self.losses[np.maximum((len(self.losses) - self.num), 0):])) |
def unit_postprocessing(unit, vid_size=None):
unit = unit.squeeze()
unit = unit.cpu().detach().numpy()
unit = np.clip(unit, (- 1), 1)
unit = np.round(((np.transpose(unit, (1, 2, 0)) + 1.0) * 127.5)).astype(np.uint8)
if ((unit.shape[:2][::(- 1)] != vid_size) and (vid_size is not None)):
unit = cv2.resize(unit, vid_size, interpolation=cv2.INTER_CUBIC)
return unit |
def convert(src, dst, depth):
if (depth not in arch_settings):
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
caffe_model = mmcv.load(src, encoding='latin1')
blobs = (caffe_model['blobs'] if ('blobs' in caffe_model) else caffe_model)
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, (len(block_nums) + 1)):
for j in range(block_nums[(i - 1)]):
if (j == 0):
convert_conv_fc(blobs, state_dict, f'res{(i + 1)}_{j}_branch1', f'layer{i}.{j}.downsample.0', converted_names)
convert_bn(blobs, state_dict, f'res{(i + 1)}_{j}_branch1_bn', f'layer{i}.{j}.downsample.1', converted_names)
for (k, letter) in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict, f'res{(i + 1)}_{j}_branch2{letter}', f'layer{i}.{j}.conv{(k + 1)}', converted_names)
convert_bn(blobs, state_dict, f'res{(i + 1)}_{j}_branch2{letter}_bn', f'layer{i}.{j}.bn{(k + 1)}', converted_names)
for key in blobs:
if (key not in converted_names):
print(f'Not Convert: {key}')
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst) |
def validate_status_exec(g_code, library, device='gpu') -> (ExecutionStatus, str):
write_code = wrap_code_with_device(g_code, library, device)
with open('/tmp/tmp{}.py'.format(CURRENT_TIME), 'w') as f:
f.write(write_code)
try:
if (library == 'tf'):
import tensorflow as tf
execGlobals = {'tf': tf, 'np': np}
else:
import torch
execGlobals = {'torch': torch, 'np': np}
exec(write_code, execGlobals)
except Exception as e:
error_msg = ((type(e).__name__ + ' ') + str(e))
if ('TargetNotCalledError' in error_msg):
return (ExecutionStatus.NOTCALL, error_msg)
return (ExecutionStatus.EXCEPTION, error_msg)
else:
return (ExecutionStatus.SUCCESS, '') |
class DepthwiseConv2d(Conv2d):
def convolve(self, x: TensorType) -> tf.Tensor:
if isinstance(x, inducing_variables.DepthwiseInducingImages):
return tf.nn.depthwise_conv2d(input=x.as_images, filter=self.filters, strides=(1, 1, 1, 1), padding='VALID')
return self.kernel.convolve(input=x, filters=self.filters)
def initialize(self, x, dtype: Any=None):
if isinstance(x, inducing_variables.InducingImages):
x = x.as_images
if (dtype is None):
dtype = x.dtype
channels_out = (self.kernel.channels_in * self.num_bases)
self._biases = bias_initializer(self.kernel.kernel, channels_out, dtype=dtype)
patch_size = (self.kernel.patch_shape[0] * self.kernel.patch_shape[1])
batch_shape = [self.kernel.channels_in, self.num_bases]
weights = weight_initializer(self.kernel.kernel, patch_size, batch_shape=batch_shape, dtype=dtype)
self._filters = tf.reshape(move_axis(weights, (- 1), 0), (self.kernel.patch_shape + batch_shape))
def filters(self):
if (self._filters is None):
return None
shape = (list(self.kernel.patch_shape) + [self.kernel.channels_in, 1])
inv_ls = tf.math.reciprocal(self.kernel.kernel.lengthscales)
if self.kernel.kernel.ard:
coeffs = tf.reshape(tf.transpose(inv_ls), shape)
else:
coeffs = tf.fill(shape, inv_ls)
return (coeffs * self._filters)
def output_scale(self):
num_features_out = (self.num_bases * self.kernel.channels_in)
return tf.sqrt(((2 * self.kernel.kernel.variance) / num_features_out)) |
def _calculate_expected_aligned_error(alignment_confidence_breaks: torch.Tensor, aligned_distance_error_probs: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
bin_centers = _calculate_bin_centers(alignment_confidence_breaks)
return (torch.sum((aligned_distance_error_probs * bin_centers), dim=(- 1)), bin_centers[(- 1)]) |
def contrastStretching(img, saturated_pixel=0.004):
' constrast stretching according to imageJ\n
values = np.sort(img, axis=None)
nr_pixels = np.size(values)
lim = int(np.round((saturated_pixel * nr_pixels)))
v_min = values[lim]
v_max = values[((- lim) - 1)]
img = (((img - v_min) * 255.0) / (v_max - v_min))
img = np.minimum(255.0, np.maximum(0.0, img))
return img |
class LeanParser():
lean_file: LeanFile
token_lines: List[List[Token]]
line: int
column: int
pos: int
current_token: Token
parameter_positions: Dict[(Tuple[(int, int)], List[Tuple[(int, int)]])]
tactic_block_positions: Set[Tuple[(int, int)]]
def __init__(self, lean_file: LeanFile, line: int, column: int, parameter_positions=None, tactic_block_positions=None):
self.lean_file = lean_file
self.token_lines = lean_file.lines
self.line = line
self.column = column
pos = None
current = None
for (i, t) in enumerate(self.token_lines[self.line]):
if (t.column == column):
pos = i
current = t
assert (pos is not None)
assert (current is not None)
self.pos = pos
self.current_token = current
self.parameter_positions = ({} if (parameter_positions is None) else parameter_positions)
self.tactic_block_positions = (set() if (tactic_block_positions is None) else tactic_block_positions)
def peek(self) -> Token:
return self.current_token
def next(self) -> Token:
assert (self.current_token.type != TokenType.EOF)
token = self.current_token
length = (self.current_token.end_column - self.current_token.column)
if ((self.pos + 1) < len(self.token_lines[self.line])):
self.pos += 1
self.column += length
else:
self.line += 1
self.pos = 0
self.column = 0
self.current_token = self.token_lines[self.line][self.pos]
assert (self.current_token.line == self.line), (self.line, self.current_token)
assert (self.current_token.column == self.column), (self.column, self.current_token)
return token
def is_eof(self) -> bool:
return (self.current_token.type == TokenType.EOF)
def format_file_location(self) -> str:
line_str = ''.join((t.string for t in self.token_lines[self.line]))
file_location = f'{self.lean_file.filename}:{(self.line + 1)}:{(self.column + 1)}'
return f'''{file_location}
{self.line:04}: {line_str} {(' ' * self.column)}{('^' * len(self.current_token.string))}'''
def raise_error(self, msg: str):
raise Exception(f'''{msg}:
{self.format_file_location()}''')
def start_pos(self) -> Tuple[(int, int)]:
return (self.current_token.line, self.current_token.column)
def end_pos(self) -> Tuple[(int, int)]:
return (self.current_token.line, self.current_token.end_column)
def read_next(self) -> str:
if self.is_eof():
self.raise_error('Expected token but EOF')
return self.next().string
def read_token(self, expected_string: str) -> str:
t = self.next()
if (t.string != expected_string):
self.raise_error('Expected {} but found {}'.format(repr(expected_string), repr(t.string)))
return t.string
def is_token(self, expected_string: str) -> bool:
t = self.peek()
return (t.string == expected_string)
def read_token_in(self, expected_strings: Set[str]) -> str:
t = self.next()
if (t.string not in expected_strings):
self.raise_error('Expected {} but found {}'.format(repr(expected_strings), repr(t.string)))
return t.string
def is_token_in(self, expected_strings: Set[str]) -> bool:
t = self.peek()
return (t.string in expected_strings)
def read_alphanum(self) -> str:
t = self.next()
if (t.type != TokenType.ALPHANUMERIC):
self.raise_error('Expected alphanumeric but found {}'.format(repr(t.string)))
return t.string
def is_alphanum(self) -> bool:
t = self.peek()
return (t.type == TokenType.ALPHANUMERIC)
def consume_space(self):
while ((not self.is_eof()) and (self.peek().type in [TokenType.WHITESPACE, TokenType.LINE_COMMENT, TokenType.BLOCK_COMMENT])):
self.next()
def read_univs(self) -> AST.Univs:
(line, column) = self.start_pos()
self.read_token('{')
self.consume_space()
univs = []
while (not self.is_token('}')):
u = self.read_alphanum()
univs.append(u)
self.consume_space()
self.read_token('}')
self.consume_space()
(end_line, end_column) = self.start_pos()
return AST.Univs(univs=univs, line=line, column=column, end_line=end_line, end_column=end_column)
def read_name_part(self) -> str:
if self.is_token(''):
ll = self.read_token('')
nn = self.read_alphanum()
rr = self.read_token('')
return ((ll + nn) + rr)
else:
return self.read_alphanum()
def read_namespace(self) -> str:
name = self.read_name_part()
self.read_token('.')
return name
def read_full_name(self) -> AST.Name:
(line, column) = self.start_pos()
name_path = []
name = self.read_name_part()
name_path.append(name)
while self.is_token('.'):
self.read_token('.')
name = self.read_name_part()
name_path.append(name)
(end_line, end_column) = self.start_pos()
return AST.Name(name_path=name_path, line=line, column=column, end_line=end_line, end_column=end_column)
def read_expr_until(self, end_tokens: Set[str]) -> AST.Expr:
self.consume_space()
expr_parts = []
(expr_line, expr_column) = self.start_pos()
while (not self.is_token_in(end_tokens)):
if self.is_token_in(BINDERS):
(line, column) = self.start_pos()
binder = self.read_token_in(BINDERS)
bound_part = self.read_expr_until({','})
self.read_token(',')
expr = self.read_expr_until(end_tokens)
(end_line, end_column) = self.start_pos()
part = AST.BoundExprPart(binder=binder, bound_part=bound_part, expr=expr, line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token_in(LEFT_BRACKETS):
(line, column) = self.start_pos()
left_bracket = self.read_token_in(LEFT_BRACKETS)
right_bracket = BRACKETS[left_bracket]
sub_exprs = []
sub_expr = self.read_expr_until({',', right_bracket})
sub_exprs.append(sub_expr)
while self.is_token(','):
self.read_token(',')
sub_expr = self.read_expr_until({',', right_bracket})
sub_exprs.append(sub_expr)
self.read_token(right_bracket)
(end_line, end_column) = self.start_pos()
part = AST.BracketExprPart(brackets=(left_bracket, right_bracket), exprs=sub_exprs, line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token('if'):
(line, column) = self.start_pos()
self.read_token('if')
if_part = self.read_expr_until({'then'})
self.read_token('then')
then_part = self.read_expr_until({'else'})
self.read_token('else')
else_part = self.read_expr_until(end_tokens)
(end_line, end_column) = self.start_pos()
part = AST.ITEExprPart(if_expr=if_part, then_expr=then_part, else_expr=else_part, line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token('let'):
(line, column) = self.start_pos()
self.read_token('let')
var_part = self.read_expr_until({':='})
self.read_token(':=')
expr_part = self.read_expr_until({'in'})
self.read_token('in')
body_part = self.read_expr_until(end_tokens)
(end_line, end_column) = self.start_pos()
part = AST.LetExprPart(var=var_part, expr=expr_part, body=body_part, line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token('match'):
(line, column) = self.start_pos()
self.read_token('match')
match_part = self.read_expr_until({'with'})
self.read_token('with')
self.consume_space()
cases = []
first = True
while (not self.is_token('end')):
(case_line, case_column) = self.start_pos()
if (not first):
self.read_token('|')
else:
first = False
if self.is_token('|'):
self.read_token('|')
self.consume_space()
case_start = self.read_expr_until({':='})
case_body = self.read_expr_until({'|', 'end'})
(case_end_line, case_end_column) = self.start_pos()
cases.append(AST.MatchCase(pattern=case_start, expr=case_body, line=case_line, column=case_column, end_line=case_end_line, end_column=case_end_column))
self.read_token('end')
(end_line, end_column) = self.start_pos()
part = AST.MatchExprPart(match_expr=match_part, cases=cases, line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token('begin'):
(line, column) = self.start_pos()
proof = self.read_begin()
(end_line, end_column) = self.start_pos()
part = AST.BeginExprPart(proof=proof, line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token('calc'):
(line, column) = self.start_pos()
self.read_token('calc')
calc_parts = []
calc_part = self.read_expr_until(({'...'} | end_tokens))
calc_parts.append(calc_part)
while self.is_token('...'):
self.read_token('...')
calc_part = self.read_expr_until(({'...'} | end_tokens))
calc_parts.append(calc_part)
(end_line, end_column) = self.start_pos()
part = AST.CalcExprPart(parts=calc_parts, line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token('do'):
(line, column) = self.start_pos()
self.read_token('do')
do_parts = []
do_part = self.read_expr_until(({','} | end_tokens))
do_parts.append(do_part)
while self.is_token(','):
self.read_token(',')
do_part = self.read_expr_until(({','} | end_tokens))
do_parts.append(do_part)
(end_line, end_column) = self.start_pos()
part = AST.DoExprPart(parts=do_parts, line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token('by'):
(line, column) = self.start_pos()
proof = self.read_by()
(end_line, end_column) = self.start_pos()
part = AST.ByExprPart(proof=proof, line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token_in((COMMANDS | RIGHT_BRACKETS)):
s = self.peek().string
self.raise_error(f'Expected expression to end {end_tokens} but found {repr(s)}')
else:
(line, column) = self.start_pos()
s = self.read_next()
(end_line, end_column) = self.start_pos()
part = AST.TokenExprPart(t_string=s, line=line, column=column, end_line=end_line, end_column=end_column)
expr_parts.append(part)
(expr_end_line, expr_end_column) = self.start_pos()
return AST.Expr(expr_parts=expr_parts, line=expr_line, column=expr_column, end_line=expr_end_line, end_column=expr_end_column)
def read_parameter_block(self) -> AST.ParamBlock:
(line, column) = self.start_pos()
left_bracket = self.read_token_in(LEFT_BRACKETS)
right_bracket = BRACKETS[left_bracket]
self.consume_space()
expr = self.read_expr_until({':', ':=', right_bracket})
if self.is_token(':'):
self.read_token(':')
vars = expr
expr = self.read_expr_until({':=', right_bracket})
else:
vars = None
if self.is_token(':='):
self.read_token(':=')
default_expr = self.read_expr_until({right_bracket})
else:
default_expr = None
self.read_token(right_bracket)
self.consume_space()
(end_line, end_column) = self.start_pos()
return AST.ParamBlock(brackets=(left_bracket, right_bracket), vars=vars, type_expr=expr, default_expr=default_expr, line=line, column=column, end_line=end_line, end_column=end_column)
def read_signature_type(self) -> AST.SignatureType:
(line, column) = self.start_pos()
arg_types = []
type_expr = self.read_expr_until((ARROWS | {':=', '|'}))
while self.is_token_in(ARROWS):
self.read_token_in(ARROWS)
arg_types.append(type_expr)
type_expr = self.read_expr_until((ARROWS | {':=', '|'}))
(end_line, end_column) = self.start_pos()
return AST.SignatureType(arg_types=arg_types, result_type=type_expr, line=line, column=column, end_line=end_line, end_column=end_column)
def read_signature(self) -> AST.Signature:
(line, column) = self.start_pos()
params = []
while self.is_token_in(LEFT_BRACKETS):
param = self.read_parameter_block()
params.append(param)
self.consume_space()
if self.is_token(':'):
self.read_token(':')
self.consume_space()
sign_type = self.read_signature_type()
else:
sign_type = None
(end_line, end_column) = self.start_pos()
return AST.Signature(params=params, signature_type=sign_type, line=line, column=column, end_line=end_line, end_column=end_column)
def read_body(self) -> AST.Body:
(line, column) = self.start_pos()
expr = self.read_expr_until(COMMANDS)
(end_line, end_column) = self.start_pos()
return AST.Body(expr=expr, line=line, column=column, end_line=end_line, end_column=end_column)
def read_def(self) -> AST.DefLike:
(line, column) = self.start_pos()
self.consume_space()
if self.is_token('{'):
univs = self.read_univs()
else:
univs = None
name = self.read_full_name()
self.consume_space()
signature = self.read_signature()
(end_line, end_column) = self.start_pos()
return AST.DefLike(univ=univs, name=name, signature=signature, line=line, column=column, end_line=end_line, end_column=end_column)
def consume_matching_brackets(self) -> None:
self.read_token_in(LEFT_BRACKETS)
depth = 1
while True:
if self.is_token_in(LEFT_BRACKETS):
self.read_token_in(LEFT_BRACKETS)
depth += 1
elif self.is_token_in(RIGHT_BRACKETS):
self.read_token_in(RIGHT_BRACKETS)
depth -= 1
if (not depth):
return None
else:
self.read_next()
def read_named_tactic(self) -> AST.NamedTactic:
(line, column) = self.start_pos()
tactic_name = self.read_full_name()
self.consume_space()
parameters = []
visted_parameters = set()
while True:
if ((self.start_pos() in self.parameter_positions) and (self.start_pos() not in visted_parameters)):
(param_line, param_column) = self.start_pos()
visted_parameters.add((param_line, param_column))
for (param_end_line, param_end_column) in self.parameter_positions[(param_line, param_column)]:
if (((param_end_line, param_end_column) > self.start_pos()) and (self.start_pos() in self.tactic_block_positions)):
itactic = self.read_itactic()
self.consume_space()
assert ((param_end_line, param_end_column) == self.start_pos())
parameters.append(AST.ITacticTacticParam(tactic=itactic, line=param_line, column=param_column, end_line=param_end_line, end_column=param_end_column))
else:
while (self.start_pos() < (param_end_line, param_end_column)):
self.read_next()
if (self.start_pos() != (param_end_line, param_end_column)):
self.raise_error(f'End of parameter is in middle of a token. Expected parameter to end at {(param_end_line, param_end_column)}')
parameters.append(AST.TacticParam(line=param_line, column=param_column, end_line=param_end_line, end_column=param_end_column))
self.consume_space()
elif (self.start_pos() in self.tactic_block_positions):
(param_line, param_column) = self.start_pos()
itactic = self.read_itactic()
self.consume_space()
(param_end_line, param_end_column) = self.start_pos()
parameters.append(AST.ITacticTacticParam(tactic=itactic, line=param_line, column=param_column, end_line=param_end_line, end_column=param_end_column))
elif self.is_token_in(LEFT_BRACKETS):
(param_line, param_column) = self.start_pos()
print('WARNING: Non-interactive parameter. Check that this is not a parsing error.')
print(self.format_file_location())
self.consume_matching_brackets()
self.consume_space()
(param_end_line, param_end_column) = self.start_pos()
parameters.append(AST.TacticParam(line=param_line, column=param_column, end_line=param_end_line, end_column=param_end_column))
elif (self.is_alphanum() and self.peek().string.isnumeric()):
(param_line, param_column) = self.start_pos()
print('WARNING: Non-interactive parameter. Check that this is not a parsing error.')
print(self.format_file_location())
self.read_alphanum()
self.consume_space()
(param_end_line, param_end_column) = self.start_pos()
parameters.append(AST.TacticParam(line=param_line, column=param_column, end_line=param_end_line, end_column=param_end_column))
elif self.is_token_in((COMMANDS | {'else', 'in'})):
break
elif self.is_alphanum():
(param_line, param_column) = self.start_pos()
print('WARNING: Non-interactive parameter. Check that this is not a parsing error.')
print(self.format_file_location())
self.read_full_name()
self.consume_space()
(param_end_line, param_end_column) = self.start_pos()
parameters.append(AST.TacticParam(line=param_line, column=param_column, end_line=param_end_line, end_column=param_end_column))
else:
break
(end_line, end_column) = self.start_pos()
return AST.NamedTactic(tactic_name=tactic_name, args=parameters, line=line, column=column, end_line=end_line, end_column=end_column)
def read_single_tactic(self) -> Union[(AST.NamedTactic, AST.Solve1Tactic, AST.CalcTactic)]:
(line, column) = self.start_pos()
if self.is_token_in({'{', 'begin'}):
t_list = self.read_tactic_list()
self.consume_space()
(end_line, end_column) = self.start_pos()
return AST.Solve1Tactic(tactics=t_list, line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token('by'):
self.read_token('by')
self.consume_space()
tactic = self.read_maybe_semicolon_tactic()
(end_line, end_column) = self.start_pos()
return AST.Solve1Tactic(tactics=[tactic], line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token('calc'):
self.read_expr_until((({'end', ';', ',', '|', '<|>'} | RIGHT_BRACKETS) | COMMANDS))
self.consume_space()
(end_line, end_column) = self.start_pos()
return AST.CalcTactic(line=line, column=column, end_line=end_line, end_column=end_column)
elif self.is_token('do'):
self.raise_error('Parsing "do" tactic in interactive mode not yet implemented')
else:
return self.read_named_tactic()
def read_maybe_alt_tactic(self) -> Union[(AST.AlternativeTactic, AST.NamedTactic, AST.Solve1Tactic, AST.CalcTactic)]:
(line, column) = self.start_pos()
first_tactic = self.read_single_tactic()
if self.is_token('<|>'):
(alternative_line, alternative_column) = self.start_pos()
self.read_token('<|>')
self.consume_space()
second_tactic = self.read_maybe_alt_tactic()
(end_line, end_column) = self.start_pos()
return AST.AlternativeTactic(tactic1=first_tactic, tactic2=second_tactic, alternative_line=alternative_line, alternative_column=alternative_column, line=line, column=column, end_line=end_line, end_column=end_column)
else:
return first_tactic
def read_tactic_list(self) -> List[AST.Tactic]:
left = self.read_next()
if (left not in ['begin', '{', '[']):
self.raise_error('Expected "begin", "{", or "[".')
right = {'begin': 'end', '{': '}', '[': ']'}[left]
tactics = []
while True:
self.consume_space()
if self.is_token(right):
break
t = self.read_maybe_semicolon_tactic()
tactics.append(t)
if self.is_token(','):
self.read_token(',')
continue
elif self.is_token(right):
break
else:
self.raise_error(f'Expected "," or "{right}"')
self.read_token(right)
self.consume_space()
return tactics
def read_maybe_semicolon_tactic(self) -> AST.Tactic:
(line, column) = self.start_pos()
tactic = self.read_maybe_alt_tactic()
first_semicolon_pos = None
semicolon_count = 0
while self.is_token(';'):
semicolon_pos = self.start_pos()
semicolon_count += 1
if (first_semicolon_pos is None):
first_semicolon_pos = semicolon_pos
self.read_token(';')
self.consume_space()
if self.is_token('['):
t_list = self.read_tactic_list()
self.consume_space
(end_line, end_column) = self.start_pos()
tactic = AST.SemicolonListTactic(tactic1=tactic, tactic_list=t_list, first_semicolon_line=first_semicolon_pos[0], first_semicolon_column=first_semicolon_pos[1], semicolon_line=semicolon_pos[0], semicolon_column=semicolon_pos[1], semicolon_count=semicolon_count, line=line, column=column, end_line=end_line, end_column=end_column)
else:
tactic2 = self.read_maybe_alt_tactic()
(end_line, end_column) = self.start_pos()
tactic = AST.SemicolonTactic(tactic1=tactic, tactic2=tactic2, first_semicolon_line=first_semicolon_pos[0], first_semicolon_column=first_semicolon_pos[1], semicolon_line=semicolon_pos[0], semicolon_column=semicolon_pos[1], semicolon_count=semicolon_count, line=line, column=column, end_line=end_line, end_column=end_column)
return tactic
def read_by(self) -> AST.ByProof:
(line, column) = self.start_pos()
self.read_token('by')
self.consume_space()
tactic = self.read_maybe_semicolon_tactic()
(end_line, end_column) = self.start_pos()
return AST.ByProof(tactic=tactic, line=line, column=column, end_line=end_line, end_column=end_column)
def read_begin(self) -> AST.BeginProof:
(line, column) = self.start_pos()
if (not self.is_token('begin')):
self.raise_error('Expected "begin"')
tactics = self.read_tactic_list()
(end_line, end_column) = self.start_pos()
return AST.BeginProof(tactics=tactics, line=line, column=column, end_line=end_line, end_column=end_column)
def read_bracket_proof(self) -> AST.BracketProof:
(line, column) = self.start_pos()
if (not self.is_token('{')):
self.raise_error('Expected "{"')
tactics = self.read_tactic_list()
(end_line, end_column) = self.start_pos()
return AST.BracketProof(tactics=tactics, line=line, column=column, end_line=end_line, end_column=end_column)
def read_itactic(self) -> AST.ITactic:
(line, column) = self.start_pos()
if (not self.is_token_in({'{', 'begin'})):
self.raise_error('Expected "{" or "begin"')
tactics = self.read_tactic_list()
(end_line, end_column) = self.start_pos()
return AST.ITactic(tactics=tactics, line=line, column=column, end_line=end_line, end_column=end_column) |
def build_fake_yaml2(sigopt_api_token, sigopt_project_id):
fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: x\n outputs: op2_to_store\n device: cpu\n evaluation:\n accuracy:\n metric:\n topk: 1\n tuning:\n strategy:\n name: sigopt\n sigopt_api_token: {}\n sigopt_project_id: {}\n sigopt_experiment_name: nc-tune\n exit_policy:\n max_trials: 3\n accuracy_criterion:\n relative: -0.01\n workspace:\n path: saved\n '.format(sigopt_api_token, sigopt_project_id)
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml2.yaml', 'w', encoding='utf-8') as f:
yaml.dump(y, f)
f.close() |
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--work-dir', help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
parser.add_argument('--gpu-ids', type=int, nargs='+', help='(Deprecated, please use --gpu-id) ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--gpu-id', type=int, default=0, help='id of gpu to use (only applicable to non-distributed testing)')
parser.add_argument('--format-only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-dir', help='directory where painted images will be saved')
parser.add_argument('--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)')
parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu-collect is not specified')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function (deprecate), change to --eval-options instead.')
parser.add_argument('--eval-options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.eval_options):
raise ValueError('--options and --eval-options cannot be both specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args |
class MetricBase():
def __init__(self, name):
self.name = name
self._dataset_obj = None
self._progress_lo = None
self._progress_hi = None
self._progress_max = None
self._progress_sec = None
self._progress_time = None
self._reset()
def close(self):
self._reset()
def _reset(self, network_pkl=None, run_dir=None, data_dir=None, dataset_args=None, mirror_augment=None):
if (self._dataset_obj is not None):
self._dataset_obj.close()
self._network_pkl = network_pkl
self._data_dir = data_dir
self._dataset_args = dataset_args
self._dataset_obj = None
self._mirror_augment = mirror_augment
self._eval_time = 0
self._results = []
if (((dataset_args is None) or (mirror_augment is None)) and (run_dir is not None)):
run_config = misc.parse_config_for_previous_run(run_dir)
self._dataset_args = dict(run_config['dataset'])
self._dataset_args['shuffle_mb'] = 0
self._mirror_augment = run_config['train'].get('mirror_augment', False)
def configure_progress_reports(self, plo, phi, pmax, psec=15):
self._progress_lo = plo
self._progress_hi = phi
self._progress_max = pmax
self._progress_sec = psec
def run(self, network_pkl, run_dir=None, data_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True, Gs_kwargs=dict(is_validation=True)):
self._reset(network_pkl=network_pkl, run_dir=run_dir, data_dir=data_dir, dataset_args=dataset_args, mirror_augment=mirror_augment)
time_begin = time.time()
with tf.Graph().as_default(), tflib.create_session(tf_config).as_default():
self._report_progress(0, 1)
(_G, _D, Gs) = misc.load_pkl(self._network_pkl)
self._evaluate(Gs, Gs_kwargs=Gs_kwargs, num_gpus=num_gpus)
self._report_progress(1, 1)
self._eval_time = (time.time() - time_begin)
if log_results:
if (run_dir is not None):
log_file = os.path.join(run_dir, ('metric-%s.txt' % self.name))
with dnnlib.util.Logger(log_file, 'a'):
print(self.get_result_str().strip())
else:
print(self.get_result_str().strip())
def get_result_str(self):
network_name = os.path.splitext(os.path.basename(self._network_pkl))[0]
if (len(network_name) > 29):
network_name = ('...' + network_name[(- 26):])
result_str = ('%-30s' % network_name)
result_str += (' time %-12s' % dnnlib.util.format_time(self._eval_time))
for res in self._results:
result_str += (((' ' + self.name) + res.suffix) + ' ')
result_str += (res.fmt % res.value)
return result_str
def update_autosummaries(self):
for res in self._results:
tflib.autosummary.autosummary((('Metrics/' + self.name) + res.suffix), res.value)
def _evaluate(self, Gs, Gs_kwargs, num_gpus):
raise NotImplementedError
def _report_result(self, value, suffix='', fmt='%-10.4f'):
self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)]
def _report_progress(self, pcur, pmax, status_str=''):
if ((self._progress_lo is None) or (self._progress_hi is None) or (self._progress_max is None)):
return
t = time.time()
if ((self._progress_sec is not None) and (self._progress_time is not None) and (t < (self._progress_time + self._progress_sec))):
return
self._progress_time = t
val = (self._progress_lo + ((pcur / pmax) * (self._progress_hi - self._progress_lo)))
dnnlib.RunContext.get().update(status_str, int(val), self._progress_max)
def _get_cache_file_for_reals(self, extension='pkl', **kwargs):
all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment)
all_args.update(self._dataset_args)
all_args.update(kwargs)
md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8'))
dataset_name = (self._dataset_args.get('tfrecord_dir', None) or self._dataset_args.get('h5_file', None))
dataset_name = os.path.splitext(os.path.basename(dataset_name))[0]
return os.path.join('.stylegan2-cache', ('%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension)))
def _get_dataset_obj(self):
if (self._dataset_obj is None):
self._dataset_obj = dataset.load_dataset(data_dir=self._data_dir, **self._dataset_args)
return self._dataset_obj
def _iterate_reals(self, minibatch_size):
dataset_obj = self._get_dataset_obj()
while True:
(images, _labels) = dataset_obj.get_minibatch_np(minibatch_size)
if self._mirror_augment:
images = misc.apply_mirror_augment(images)
(yield images)
def _iterate_fakes(self, Gs, minibatch_size, num_gpus):
while True:
latents = np.random.randn(minibatch_size, *Gs.input_shape[1:])
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True)
(yield images)
def _get_random_labels_tf(self, minibatch_size):
return self._get_dataset_obj().get_random_labels_tf(minibatch_size) |
_end_docstrings(PIPELINE_INIT_ARGS)
class Text2TextGenerationPipeline(Pipeline):
return_name = 'generated'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type((TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if (self.framework == 'tf') else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING))
def _sanitize_parameters(self, return_tensors=None, return_text=None, return_type=None, clean_up_tokenization_spaces=None, truncation=None, stop_sequence=None, **generate_kwargs):
preprocess_params = {}
if (truncation is not None):
preprocess_params['truncation'] = truncation
forward_params = generate_kwargs
postprocess_params = {}
if ((return_tensors is not None) and (return_type is None)):
return_type = (ReturnType.TENSORS if return_tensors else ReturnType.TEXT)
if (return_type is not None):
postprocess_params['return_type'] = return_type
if (clean_up_tokenization_spaces is not None):
postprocess_params['clean_up_tokenization_spaces'] = clean_up_tokenization_spaces
if (stop_sequence is not None):
stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
if (len(stop_sequence_ids) > 1):
warnings.warn('Stopping on a multiple token sequence is not yet supported on transformers. The first token of the stop sequence will be used as the stop sequence string in the interim.')
generate_kwargs['eos_token_id'] = stop_sequence_ids[0]
return (preprocess_params, forward_params, postprocess_params)
def check_inputs(self, input_length: int, min_length: int, max_length: int):
return True
def _parse_and_tokenize(self, *args, truncation):
prefix = (self.model.config.prefix if (self.model.config.prefix is not None) else '')
if isinstance(args[0], list):
if (self.tokenizer.pad_token_id is None):
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input')
args = ([(prefix + arg) for arg in args[0]],)
padding = True
elif isinstance(args[0], str):
args = ((prefix + args[0]),)
padding = False
else:
raise ValueError(f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors=self.framework)
if ('token_type_ids' in inputs):
del inputs['token_type_ids']
return inputs
def __call__(self, *args, **kwargs):
result = super().__call__(*args, **kwargs)
if (isinstance(args[0], list) and all((isinstance(el, str) for el in args[0])) and all(((len(res) == 1) for res in result))):
return [res[0] for res in result]
return result
def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs):
inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs)
return inputs
def _forward(self, model_inputs, **generate_kwargs):
if (self.framework == 'pt'):
(in_b, input_length) = model_inputs['input_ids'].shape
elif (self.framework == 'tf'):
(in_b, input_length) = tf.shape(model_inputs['input_ids']).numpy()
generate_kwargs['min_length'] = generate_kwargs.get('min_length', self.model.config.min_length)
generate_kwargs['max_length'] = generate_kwargs.get('max_length', self.model.config.max_length)
self.check_inputs(input_length, generate_kwargs['min_length'], generate_kwargs['max_length'])
output_ids = self.model.generate(**model_inputs, **generate_kwargs)
out_b = output_ids.shape[0]
if (self.framework == 'pt'):
output_ids = output_ids.reshape(in_b, (out_b // in_b), *output_ids.shape[1:])
elif (self.framework == 'tf'):
output_ids = tf.reshape(output_ids, (in_b, (out_b // in_b), *output_ids.shape[1:]))
return {'output_ids': output_ids}
def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False):
records = []
for output_ids in model_outputs['output_ids'][0]:
if (return_type == ReturnType.TENSORS):
record = {f'{self.return_name}_token_ids': output_ids}
elif (return_type == ReturnType.TEXT):
record = {f'{self.return_name}_text': self.tokenizer.decode(output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces)}
records.append(record)
return records |
_module()
class CityscapesDataset(CocoDataset):
METAINFO = {'classes': ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle'), 'palette': [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)]}
def filter_data(self) -> List[dict]:
if self.test_mode:
return self.data_list
if (self.filter_cfg is None):
return self.data_list
filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False)
min_size = self.filter_cfg.get('min_size', 0)
ids_with_ann = set((data_info['img_id'] for data_info in self.data_list))
ids_in_cat = set()
for (i, class_id) in enumerate(self.cat_ids):
ids_in_cat |= set(self.cat_img_map[class_id])
ids_in_cat &= ids_with_ann
valid_data_infos = []
for (i, data_info) in enumerate(self.data_list):
img_id = data_info['img_id']
width = data_info['width']
height = data_info['height']
all_is_crowd = all([(instance['ignore_flag'] == 1) for instance in data_info['instances']])
if (filter_empty_gt and ((img_id not in ids_in_cat) or all_is_crowd)):
continue
if (min(width, height) >= min_size):
valid_data_infos.append(data_info)
return valid_data_infos |
class AutoConfigTest(unittest.TestCase):
def test_module_spec(self):
self.assertIsNotNone(transformers.models.auto.__spec__)
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto'))
def test_config_from_model_shortcut(self):
config = AutoConfig.from_pretrained('bert-base-uncased')
self.assertIsInstance(config, BertConfig)
def test_config_model_type_from_local_file(self):
config = AutoConfig.from_pretrained(SAMPLE_ROBERTA_CONFIG)
self.assertIsInstance(config, RobertaConfig)
def test_config_model_type_from_model_identifier(self):
config = AutoConfig.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER)
self.assertIsInstance(config, RobertaConfig)
def test_config_for_model_str(self):
config = AutoConfig.for_model('roberta')
self.assertIsInstance(config, RobertaConfig)
def test_pattern_matching_fallback(self):
with tempfile.TemporaryDirectory() as tmp_dir:
folder = os.path.join(tmp_dir, 'fake-roberta')
os.makedirs(folder, exist_ok=True)
with open(os.path.join(folder, 'config.json'), 'w') as f:
f.write(json.dumps({}))
config = AutoConfig.from_pretrained(folder)
self.assertEqual(type(config), RobertaConfig)
def test_new_config_registration(self):
try:
AutoConfig.register('custom', CustomConfig)
with self.assertRaises(ValueError):
AutoConfig.register('model', CustomConfig)
with self.assertRaises(ValueError):
AutoConfig.register('bert', BertConfig)
config = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(tmp_dir)
new_config = AutoConfig.from_pretrained(tmp_dir)
self.assertIsInstance(new_config, CustomConfig)
finally:
if ('custom' in CONFIG_MAPPING._extra_content):
del CONFIG_MAPPING._extra_content['custom']
def test_repo_not_found(self):
with self.assertRaisesRegex(EnvironmentError, 'bert-base is not a local folder and is not a valid model identifier'):
_ = AutoConfig.from_pretrained('bert-base')
def test_revision_not_found(self):
with self.assertRaisesRegex(EnvironmentError, 'aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)'):
_ = AutoConfig.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision='aaaaaa')
def test_configuration_not_found(self):
with self.assertRaisesRegex(EnvironmentError, 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.'):
_ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo')
def test_from_pretrained_dynamic_config(self):
config = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model', trust_remote_code=True)
self.assertEqual(config.__class__.__name__, 'NewModelConfig')
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(tmp_dir)
reloaded_config = AutoConfig.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertEqual(reloaded_config.__class__.__name__, 'NewModelConfig') |
def training_loss_3rd_user_task(data, batch_index, model, sess, train_data, is_training):
train_loss = 0.0
num_batch = (data.oracle_num_users // setting.batch_size)
for index in batch_index:
(b_target_user, b_k_shot_item, b_second_order_users, b_third_order_items, b_oracle_user_ebd, b_mask_num_second_order_user, b_mask_num_third_order_item) = data.batch_gen_3rd_user_task(train_data, index)
feed_dict = {model.target_user: b_oracle_user_ebd, model.support_item_1st_: b_k_shot_item, model.training_phrase_user_task: is_training, model.support_user_2nd_: b_second_order_users, model.training_phrase_item_task: is_training, model.support_item_3rd: b_third_order_items}
train_loss += sess.run(model.loss_3rd_user, feed_dict)
return (train_loss / num_batch) |
def cfg_from_list(cfg_list):
from ast import literal_eval
assert ((len(cfg_list) % 2) == 0)
for (k, v) in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:(- 1)]:
assert (subkey in d)
d = d[subkey]
subkey = key_list[(- 1)]
assert (subkey in d)
try:
value = literal_eval(v)
except:
value = v
assert (type(value) == type(d[subkey])), 'type {} does not match original type {}'.format(type(value), type(d[subkey]))
d[subkey] = value |
class ShearX(object):
def __init__(self, fillcolor=(128, 128, 128)):
self.fillcolor = fillcolor
def __call__(self, x, magnitude):
return x.transform(x.size, Image.AFFINE, (1, (magnitude * random.choice([(- 1), 1])), 0, 0, 1, 0), Image.BICUBIC, fillcolor=self.fillcolor) |
def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
lr = ((((init_lr - min_lr) * 0.5) * (1.0 + math.cos(((math.pi * epoch) / max_epoch)))) + min_lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr |
def test_version_used_when_live():
run_cell('x = 0')
run_cell('\n if True:\n y = 7\n else:\n # even though this branch is not taken,\n # liveness-based usage should detect the\n # version of `x` used at the time it was\n # live, meaning cell 1 should get included\n # in the slice\n logging.info(x)\n ')
run_cell('x = 5')
run_cell('logging.info(x + y)')
deps = set(compute_unparsed_slice(4).keys())
assert (deps == {1, 2, 3, 4}), ('got %s' % deps)
slice_size = num_stmts_in_slice(4)
assert (slice_size == 4), ('got %d' % slice_size) |
def _infunc(x_val, func, c, d, more_args, epsrel=1.49e-08):
myargs = ((x_val,) + more_args)
return integrate.quad(func, c, d, args=myargs, epsrel=epsrel, limit=2000)[0] |
class CIFAR10(data.Dataset):
base_folder = 'cifar-10-batches-py'
url = '
filename = 'cifar-10-python.tar.gz'
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [['data_batch_1', 'c99cafc152244af753f735de768cd75f'], ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'], ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'], ['data_batch_4', '634dddfa80567beed471001a'], ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb']]
test_list = [['test_batch', '40351d587109b95175f43aff81a1287e']]
meta = {'filename': 'batches.meta', 'key': 'label_names', 'md5': '5ff9c542aee3614f3951f8cda6e48888'}
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train
self.debug: bool = (os.environ.get('PYDEBUG') == '1')
if download:
self.download()
if (not self._check_integrity()):
raise RuntimeError(('Dataset not found or corrupted.' + ' You can use download=True to download it'))
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
for (file_name, checksum) in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if (sys.version_info[0] == 2):
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if ('labels' in entry):
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape((- 1), 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1))
self._load_meta()
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if (not check_integrity(path, self.meta['md5'])):
raise RuntimeError(('Dataset metadata file not found or corrupted.' + ' You can use download=True to download it'))
with open(path, 'rb') as infile:
if (sys.version_info[0] == 2):
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for (i, _class) in enumerate(self.classes)}
def __getitem__(self, index):
(img, target) = (self.data[index], self.targets[index])
img = Image.fromarray(img)
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target)
def __len__(self):
if self.debug:
return int((len(self.data) / 50))
return int(len(self.data))
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
(filename, md5) = (fentry[0], fentry[1])
fpath = os.path.join(root, self.base_folder, filename)
if (not check_integrity(fpath, md5)):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.tgz_md5)
with tarfile.open(os.path.join(self.root, self.filename), 'r:gz') as tar:
tar.extractall(path=self.root)
def __repr__(self):
fmt_str = (('Dataset ' + self.__class__.__name__) + '\n')
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = ('train' if (self.train is True) else 'test')
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
return fmt_str |
def mk_vis_txt_pair_datalist(anno_path, data_ratio=1.0, vis_id_key='coco_id', txt_key='caption'):
raw_datalist = load_datalist_with_ratio(anno_path, data_ratio)
datalist = []
for raw_d in raw_datalist:
d = dict(txt=raw_d[txt_key], vis_id=raw_d[vis_id_key])
datalist.append(d)
grouped = defaultdict(list)
for d in datalist:
grouped[d['vis_id']].append(d)
return grouped |
def test_kinetic_energy_shape():
(_, log_f) = make_dummy_log_f()
x = make_dummy_x()
kinetic_energy_fn = physics.kinetic.create_laplacian_kinetic_energy(log_f)
kinetic_energy_fn = jax.vmap(kinetic_energy_fn, in_axes=(None, 0), out_axes=0)
kinetic_energies = kinetic_energy_fn(None, x)
assert (kinetic_energies.shape == (x.shape[0],)) |
class Trainer(object):
def __init__(self, args):
if (args.algorithm == 'fed_mutual'):
self.train = train_mutual
elif (args.algorithm == 'fed_avg'):
self.train = train_avg
elif (args.algorithm == 'normal'):
self.train = train_normal
def __call__(self, node):
self.train(node) |
class WeightedSumTestCases(unittest.TestCase):
def test_should_aggregative_sum_work_properly_with_2D_vectors(self) -> None:
aggregative_function = WeightedSum()
self.assertEqual(1.5, aggregative_function.compute([1.5, 2.9], [1.0, 0.0]))
self.assertEqual(2.9, aggregative_function.compute([1.5, 2.9], [0.0, 1.0]))
self.assertEqual(((1.5 / 2.0) + (2.9 / 2.0)), aggregative_function.compute([1.5, 2.9], [0.5, 0.5])) |
def gender(word, pos=NOUN):
w = word.lower()
if (pos == NOUN):
if w.endswith(gender_masculine):
return MASCULINE
if w.endswith(gender_feminine):
return FEMININE
if w.endswith(gender_neuter):
return NEUTER
for g in gender_majority_vote:
if w.endswith(gender_majority_vote[g]):
return g |
class LeakyParallel(nn.Module):
def __init__(self, input_size, hidden_size, beta=None, bias=True, threshold=1.0, dropout=0.0, spike_grad=None, surrogate_disable=False, learn_beta=False, learn_threshold=False, graded_spikes_factor=1.0, learn_graded_spikes_factor=False, weight_hh_enable=False, device=None, dtype=None):
super(LeakyParallel, self).__init__()
self.rnn = nn.RNN(input_size, hidden_size, num_layers=1, nonlinearity='relu', bias=bias, batch_first=False, dropout=dropout, device=device, dtype=dtype)
self._beta_buffer(beta, learn_beta)
self.hidden_size = hidden_size
if (self.beta is not None):
self.beta = self.beta.clamp(0, 1)
if (spike_grad is None):
self.spike_grad = self.ATan.apply
else:
self.spike_grad = spike_grad
self._beta_to_weight_hh()
if (weight_hh_enable is False):
self.weight_hh_enable()
if learn_beta:
self.rnn.weight_hh_l0.register_hook(self.grad_hook)
if (not learn_beta):
self.rnn.weight_hh_l0.requires_grad_(False)
self._threshold_buffer(threshold, learn_threshold)
self._graded_spikes_buffer(graded_spikes_factor, learn_graded_spikes_factor)
self.surrogate_disable = surrogate_disable
if self.surrogate_disable:
self.spike_grad = self._surrogate_bypass
def forward(self, input_):
mem = self.rnn(input_)
mem_shift = (mem[0] - self.threshold)
spk = self.spike_grad(mem_shift)
spk = (spk * self.graded_spikes_factor)
return spk
def _surrogate_bypass(input_):
return (input_ > 0).float()
class ATan(torch.autograd.Function):
def forward(ctx, input_, alpha=2.0):
ctx.save_for_backward(input_)
ctx.alpha = alpha
out = (input_ > 0).float()
return out
def backward(ctx, grad_output):
(input_,) = ctx.saved_tensors
grad_input = grad_output.clone()
grad = (((ctx.alpha / 2) / (1 + (((torch.pi / 2) * ctx.alpha) * input_).pow_(2))) * grad_input)
return (grad, None)
def weight_hh_enable(self):
mask = torch.eye(self.hidden_size, self.hidden_size)
self.rnn.weight_hh_l0.data = (self.rnn.weight_hh_l0.data * mask)
def grad_hook(self, grad):
device = grad.device
mask = torch.eye(self.hidden_size, self.hidden_size, device=device)
return (grad * mask)
def _beta_to_weight_hh(self):
with torch.no_grad():
if (self.beta is not None):
if (isinstance(self.beta, float) or isinstance(self.beta, int)):
self.rnn.weight_hh_l0.fill_(self.beta)
elif (isinstance(self.beta, torch.Tensor) or isinstance(self.beta, torch.FloatTensor)):
if (len(self.beta) == 1):
self.rnn.weight_hh_l0.fill_(self.beta[0])
elif (len(self.beta) == self.hidden_size):
for i in range(self.hidden_size):
self.rnn.weight_hh_l0.data[i].fill_(self.beta[i])
else:
raise ValueError("Beta must be either a single value or of length 'hidden_size'.")
def _beta_buffer(self, beta, learn_beta):
if (not isinstance(beta, torch.Tensor)):
if (beta is not None):
beta = torch.as_tensor([beta])
self.register_buffer('beta', beta)
def _graded_spikes_buffer(self, graded_spikes_factor, learn_graded_spikes_factor):
if (not isinstance(graded_spikes_factor, torch.Tensor)):
graded_spikes_factor = torch.as_tensor(graded_spikes_factor)
if learn_graded_spikes_factor:
self.graded_spikes_factor = nn.Parameter(graded_spikes_factor)
else:
self.register_buffer('graded_spikes_factor', graded_spikes_factor)
def _threshold_buffer(self, threshold, learn_threshold):
if (not isinstance(threshold, torch.Tensor)):
threshold = torch.as_tensor(threshold)
if learn_threshold:
self.threshold = nn.Parameter(threshold)
else:
self.register_buffer('threshold', threshold) |
def _rm_hp(cs, k):
if (k in cs._hyperparameters):
cs._hyperparameters.pop(k)
for hp in cs.get_hyperparameters():
if hp.name.startswith('{}'.format(k)):
cs._hyperparameters.pop(hp.name) |
def test_attribute_unpacking():
run_cell('\n class Foo:\n def __init__(self, x):\n self.x = x\n ')
run_cell('x = Foo(5)')
run_cell('y = Foo(6)')
run_cell('w = 42')
run_cell('z = 43')
run_cell('x.x, y.x = w + 2, z + 3')
run_cell('z = 9001')
run_cell('logging.info(x.x)')
assert_not_detected()
run_cell('logging.info(x)')
assert_not_detected()
run_cell('logging.info(y.x)')
assert_detected()
run_cell('logging.info(y)')
assert_detected()
run_cell('y.x = z + 3')
run_cell('logging.info(y.x)')
assert_not_detected()
run_cell('w = 99')
run_cell('logging.info(x.x)')
assert_detected()
run_cell('logging.info(x)')
assert_detected()
run_cell('logging.info(y.x)')
assert_not_detected()
run_cell('logging.info(y)')
assert_not_detected() |
class Cat(nn.Module):
def __init__(self, dim=1):
super(Cat, self).__init__()
self.dim = dim
def forward(self, x):
return torch.cat(x, dim=self.dim) |
def approx_corr(D, X, Y):
D_X = D(X)
with torch.no_grad():
XY = (X Y.transpose(1, 0))
idx_Y = torch.argmax((XY - D_X), dim=0)
Y_inv = X[idx_Y]
W_loss_XY = (D_X - D(Y_inv)).mean()
with torch.no_grad():
W_loss_nograd_XY = (((- (X ** 2).sum(dim=1)) / 2).mean() + ((Y_inv * Y).sum(dim=1) - ((Y_inv ** 2).sum(dim=1) / 2)).mean())
return (W_loss_XY, W_loss_nograd_XY) |
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
hidden_dim = int(round((inp * expand_ratio)))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
if (expand_ratio == 1):
self.conv = nn.Sequential(Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), BatchNorm2d(oup))
else:
self.conv = nn.Sequential(Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), BatchNorm2d(oup))
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x) |
def process_entities(query, doc, mentioned_time: dict) -> dict:
location = []
name = []
organization = []
s_time = []
for ent in doc.ents:
if (ent.label_ == 'GPE'):
location.append(ent.text)
elif (ent.label_ == 'LOC'):
location.append(ent.text)
elif (ent.label_ == 'PERSON'):
name.append(ent)
elif (ent.label_ == 'ORG'):
organization.append(ent)
elif ((ent.label_ == 'DATE') or (ent.label_ == 'TIME')):
s_time.append(ent)
if (s_time == []):
mentioned_time = {'time': [], 'period': []}
location = list(set(location))
result_period = []
for sub in range((len(mentioned_time['period']) // 2)):
from_time = mentioned_time['period'][(2 * sub)]
to_time = mentioned_time['period'][((2 * sub) + 1)]
result_period.append({'from': from_time, 'to': to_time})
if ('last month' in query):
result_period = post_process_last_month()
if ('last week' in query):
result_period = post_process_last_week()
result = {'period': result_period, 'time': mentioned_time['time'], 'location': location, 'name': name, 'organization': organization}
return result |
_experiment
def vpgis_inverted_pendulum(ctxt=None, seed=1):
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make('InvertedPendulum-v2')))
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=100, discount=0.99, max_kl_step=0.01)
runner.setup(algo, env, sampler_cls=ISSampler, sampler_args=dict(n_backtrack=1))
runner.train(n_epochs=40, batch_size=4000) |
class Frame(BaseJsonLogger):
def __init__(self, frame_id: int, timestamp: float=None):
self.frame_id = frame_id
self.timestamp = timestamp
self.bboxes = []
def add_bbox(self, bbox_id: int, top: int, left: int, width: int, height: int):
bboxes_ids = [bbox.bbox_id for bbox in self.bboxes]
if (bbox_id not in bboxes_ids):
self.bboxes.append(Bbox(bbox_id, top, left, width, height))
else:
raise ValueError('Frame with id: {} already has a Bbox with id: {}'.format(self.frame_id, bbox_id))
def add_label_to_bbox(self, bbox_id: int, category: str, confidence: float):
bboxes = {bbox.id: bbox for bbox in self.bboxes}
if (bbox_id in bboxes.keys()):
res = bboxes.get(bbox_id)
res.add_label(category, confidence)
else:
raise ValueError('the bbox with id: {} does not exists!'.format(bbox_id)) |
class SimulationActorAction(AbstractAction):
def __init__(self):
self.arm_cmd = []
self.arm_mode = pb.POSITION_CONTROL |
def get_learning_rate_scheduler(optimizer, args):
if (args.lr_decay_iters is not None):
num_iters = args.lr_decay_iters
else:
num_iters = args.train_iters
init_step = (- 1)
warmup_iter = (args.warmup * num_iters)
lr_scheduler = AnnealingLR(optimizer, start_lr=args.lr, warmup_iter=warmup_iter, num_iters=num_iters, decay_style=args.lr_decay_style, last_iter=init_step, min_lr=args.min_lr, use_checkpoint_lr_scheduler=args.use_checkpoint_lr_scheduler, override_lr_scheduler=args.override_lr_scheduler)
return lr_scheduler |
def train_d4rl_sbc(args):
test_env = gym.make(args.env_id)
test_env.seed(args.seed)
state_space = test_env.observation_space
action_space = test_env.action_space
agent = dc.sbc.SBCAgent(state_space.shape[0], action_space.shape[0], args.log_std_low, args.log_std_high)
dset = d4rl.qlearning_dataset(test_env)
dset_size = dset['observations'].shape[0]
buffer = dc.replay.ReplayBuffer(size=dset_size, state_shape=state_space.shape, state_dtype=float, action_shape=action_space.shape)
buffer.load_experience(dset['observations'], dset['actions'], dset['rewards'], dset['next_observations'], dset['terminals'])
dc.sbc.sbc(agent=agent, test_env=test_env, buffer=buffer, **vars(args)) |
def num_verb_phrases(const_pt):
vp_chunks = []
vp_tag = None
if (settings.LANGUAGE in ['fr']):
lang = settings.LANGUAGE
else:
lang = 'default'
vp_tag = VERB_PHRASE_LANGUAGE_MAP[lang]
for leaf in _leaves(const_pt, vp_tag):
vp_chunks.append(leaf.leaves())
return len(vp_chunks) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.