code stringlengths 101 5.91M |
|---|
class TrainCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
train_parser = parser.add_parser('train', help='CLI tool to train a model on a task.')
train_parser.add_argument('--train_data', type=str, required=True, help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.')
train_parser.add_argument('--column_label', type=int, default=0, help='Column of the dataset csv file with example labels.')
train_parser.add_argument('--column_text', type=int, default=1, help='Column of the dataset csv file with example texts.')
train_parser.add_argument('--column_id', type=int, default=2, help='Column of the dataset csv file with example ids.')
train_parser.add_argument('--skip_first_row', action='store_true', help='Skip the first row of the csv file (headers).')
train_parser.add_argument('--validation_data', type=str, default='', help='path to validation dataset.')
train_parser.add_argument('--validation_split', type=float, default=0.1, help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.')
train_parser.add_argument('--output', type=str, default='./', help='path to saved the trained model.')
train_parser.add_argument('--task', type=str, default='text_classification', help='Task to train the model on.')
train_parser.add_argument('--model', type=str, default='bert-base-uncased', help="Model's name or path to stored model.")
train_parser.add_argument('--train_batch_size', type=int, default=32, help='Batch size for training.')
train_parser.add_argument('--valid_batch_size', type=int, default=64, help='Batch size for validation.')
train_parser.add_argument('--learning_rate', type=float, default=3e-05, help='Learning rate.')
train_parser.add_argument('--adam_epsilon', type=float, default=1e-08, help='Epsilon for Adam optimizer.')
train_parser.set_defaults(func=train_command_factory)
def __init__(self, args: Namespace):
self.logger = logging.get_logger('transformers-cli/training')
self.framework = ('tf' if is_tf_available() else 'torch')
os.makedirs(args.output, exist_ok=True)
self.output = args.output
self.column_label = args.column_label
self.column_text = args.column_text
self.column_id = args.column_id
self.logger.info('Loading {} pipeline for {}'.format(args.task, args.model))
if (args.task == 'text_classification'):
self.pipeline = TextClassificationPipeline.from_pretrained(args.model)
elif (args.task == 'token_classification'):
raise NotImplementedError
elif (args.task == 'question_answering'):
raise NotImplementedError
self.logger.info('Loading dataset from {}'.format(args.train_data))
self.train_dataset = Processor.create_from_csv(args.train_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row)
self.valid_dataset = None
if args.validation_data:
self.logger.info('Loading validation dataset from {}'.format(args.validation_data))
self.valid_dataset = Processor.create_from_csv(args.validation_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row)
self.validation_split = args.validation_split
self.train_batch_size = args.train_batch_size
self.valid_batch_size = args.valid_batch_size
self.learning_rate = args.learning_rate
self.adam_epsilon = args.adam_epsilon
def run(self):
if (self.framework == 'tf'):
return self.run_tf()
return self.run_torch()
def run_torch(self):
raise NotImplementedError
def run_tf(self):
self.pipeline.fit(self.train_dataset, validation_data=self.valid_dataset, validation_split=self.validation_split, learning_rate=self.learning_rate, adam_epsilon=self.adam_epsilon, train_batch_size=self.train_batch_size, valid_batch_size=self.valid_batch_size)
self.pipeline.save_pretrained(self.output) |
def compute_loss(predictions, labels, loss_wts={'malware': 1.0, 'count': 0.1, 'tags': 0.1}):
loss_dict = {'total': 0.0}
if ('malware' in labels):
malware_labels = labels['malware'].float().to(device)
malware_loss = F.binary_cross_entropy(predictions['malware'].reshape(malware_labels.shape), malware_labels)
weight = (loss_wts['malware'] if ('malware' in loss_wts) else 1.0)
loss_dict['malware'] = deepcopy(malware_loss.item())
loss_dict['total'] += (malware_loss * weight)
if ('count' in labels):
count_labels = labels['count'].float().to(device)
count_loss = torch.nn.PoissonNLLLoss()(predictions['count'].reshape(count_labels.shape), count_labels)
weight = (loss_wts['count'] if ('count' in loss_wts) else 1.0)
loss_dict['count'] = deepcopy(count_loss.item())
loss_dict['total'] += (count_loss * weight)
if ('tags' in labels):
tag_labels = labels['tags'].float().to(device)
tags_loss = F.binary_cross_entropy(predictions['tags'], tag_labels)
weight = (loss_wts['tags'] if ('tags' in loss_wts) else 1.0)
loss_dict['tags'] = deepcopy(tags_loss.item())
loss_dict['total'] += (tags_loss * weight)
return loss_dict |
class Engine(object):
def __init__(self, test_id=1, mass=1.0):
self.mass = mass
self.xy_limit = 1
self.action_limit = 0.2
self.pos = np.array([0.0, 0.0])
self.data_path = os.path.join('./tmp/data/test{}'.format(test_id))
if (not os.path.exists(self.data_path)):
os.mkdir(self.data_path)
self.img_path = os.path.join(self.data_path, 'imgs')
if (not os.path.exists(self.img_path)):
os.mkdir(self.img_path)
def step(self, action):
action = (action / self.mass)
self.pos = (self.pos + action).clip(min=0, max=self.xy_limit)
return self.pos
def sample_action(self):
action = ((((np.random.random(2) - 0.5) * 2) * self.action_limit) * self.mass)
return action
def render_data(self, num=10):
state_buffer = []
action_buffer = []
last_pos = env.pos
state_buffer.append(last_pos)
img_path = os.path.join(self.img_path, '0.jpg')
self.plot_circle(last_pos, img_path)
for i in tqdm(range(num)):
action = env.sample_action()
pos = env.step(action)
img_path = os.path.join(self.img_path, '{}.jpg'.format((i + 1)))
self.plot_circle(pos, img_path)
action_buffer.append(action)
state_buffer.append(pos)
last_pos = pos
state_buffer = np.array(state_buffer)
action_buffer = np.array(action_buffer)
np.save(os.path.join(self.data_path, 'state.npy'), state_buffer)
np.save(os.path.join(self.data_path, 'action.npy'), action_buffer)
def plot_circle(self, pos=None, img_path=None):
plt.rcParams['figure.figsize'] = (4.0, 4.0)
(fig, ax) = plt.subplots()
ax.set_xlim((- 0.2), 1.2)
ax.set_ylim((- 0.2), 1.2)
patches = [Circle((pos[0], pos[1]), 0.1)]
colors = (100 * np.random.rand(len(patches)))
p = PatchCollection(patches, alpha=0.4)
p.set_array(np.array(colors))
ax.add_collection(p)
plt.axis('off')
plt.savefig(img_path)
plt.cla()
plt.clf() |
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for (i, size) in enumerate(self.group_sizes):
self.num_samples += (int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu)
def __iter__(self):
indices = []
for (i, size) in enumerate(self.group_sizes):
if (size == 0):
continue
indice = np.where((self.flag == i))[0]
assert (len(indice) == size)
np.random.shuffle(indice)
num_extra = ((int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu) - len(indice))
indice = np.concatenate([indice, np.random.choice(indice, num_extra)])
indices.append(indice)
indices = np.concatenate(indices)
indices = [indices[(i * self.samples_per_gpu):((i + 1) * self.samples_per_gpu)] for i in np.random.permutation(range((len(indices) // self.samples_per_gpu)))]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples |
class BiFpn(nn.Module):
def __init__(self, config, feature_info):
super(BiFpn, self).__init__()
self.num_levels = config.num_levels
norm_layer = (config.norm_layer or nn.BatchNorm2d)
if config.norm_kwargs:
norm_layer = partial(norm_layer, **config.norm_kwargs)
act_layer = (get_act_layer(config.act_type) or _ACT_LAYER)
fpn_config = (config.fpn_config or get_fpn_config(config.fpn_name, min_level=config.min_level, max_level=config.max_level))
feat_sizes = get_feat_sizes(config.image_size, max_level=config.max_level)
prev_feat_size = feat_sizes[config.min_level]
self.resample = nn.ModuleDict()
for level in range(config.num_levels):
feat_size = feat_sizes[(level + config.min_level)]
if (level < len(feature_info)):
in_chs = feature_info[level]['num_chs']
feature_info[level]['size'] = feat_size
else:
self.resample[str(level)] = ResampleFeatureMap(in_channels=in_chs, out_channels=config.fpn_channels, input_size=prev_feat_size, output_size=feat_size, pad_type=config.pad_type, downsample=config.downsample_type, upsample=config.upsample_type, norm_layer=norm_layer, apply_bn=config.apply_resample_bn, redundant_bias=config.redundant_bias)
in_chs = config.fpn_channels
feature_info.append(dict(num_chs=in_chs, size=feat_size))
prev_feat_size = feat_size
self.cell = SequentialList()
for rep in range(config.fpn_cell_repeats):
logging.debug('building cell {}'.format(rep))
fpn_layer = BiFpnLayer(feature_info=feature_info, feat_sizes=feat_sizes, fpn_config=fpn_config, fpn_channels=config.fpn_channels, num_levels=config.num_levels, pad_type=config.pad_type, downsample=config.downsample_type, upsample=config.upsample_type, norm_layer=norm_layer, act_layer=act_layer, separable_conv=config.separable_conv, apply_resample_bn=config.apply_resample_bn, pre_act=(not config.conv_bn_relu_pattern), redundant_bias=config.redundant_bias)
self.cell.add_module(str(rep), fpn_layer)
feature_info = fpn_layer.feature_info
def forward(self, x: List[torch.Tensor]):
for resample in self.resample.values():
x.append(resample(x[(- 1)]))
x = self.cell(x)
return x |
def kl_binary(p_logit, q_logit):
if isinstance(p_logit, chainer.Variable):
xp = cuda.get_array_module(p_logit.data)
else:
xp = cuda.get_array_module(p_logit)
p_logit = F.concat([p_logit, xp.zeros(p_logit.shape, xp.float32)], 1)
q_logit = F.concat([q_logit, xp.zeros(q_logit.shape, xp.float32)], 1)
return kl_categorical(p_logit, q_logit) |
def export_model_run_task(run_dir: str, score: Dict[(str, any)]):
task_name = score['task']
task = TASKS[task_name]()
filename = (score['id'] + '.txt')
pred_path = None
script_dir = os.path.dirname(os.path.realpath(__file__))
checkpoints_dir = os.path.join(script_dir, os.path.pardir, 'checkpoints')
for (root, dirs, files) in os.walk(checkpoints_dir):
if ((filename in files) and (task.spec().output_dir in root)):
pred_path = os.path.join(root, filename)
break
assert (pred_path is not None)
task_info = TASK_NAME_MAPPING.get(task_name, None)
if (task_info is None):
return
with open(pred_path, 'r', encoding='utf-8') as input_file:
lines = input_file.readlines()
lines.insert(0, f'''{task_info['target']}
''')
output_filename = f"test_pred_{task_info['name']}.tsv"
output_path = os.path.join(run_dir, output_filename)
with open(output_path, 'w', encoding='utf-8') as output_file:
for line in lines:
output_file.write(line) |
def get_features_from_audio(audio, tuning_offset, visualize=False):
f_pitch = audio_to_pitch_features(f_audio=audio, Fs=Fs, tuning_offset=tuning_offset, feature_rate=feature_rate, verbose=visualize)
f_chroma = pitch_to_chroma(f_pitch=f_pitch)
f_chroma_quantized = quantize_chroma(f_chroma=f_chroma)
f_pitch_onset = audio_to_pitch_onset_features(f_audio=audio, Fs=Fs, tuning_offset=tuning_offset, verbose=visualize)
f_DLNCO = pitch_onset_features_to_DLNCO(f_peaks=f_pitch_onset, feature_rate=feature_rate, feature_sequence_length=f_chroma_quantized.shape[1], visualize=visualize)
return (f_chroma_quantized, f_DLNCO) |
def test_minimum_spanning_tree():
graph = [[0, 1, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 8, 5], [0, 0, 8, 0, 1], [0, 0, 5, 1, 0]]
graph = np.asarray(graph)
expected = [[0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 5], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]]
expected = np.asarray(expected)
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
mintree_array = mintree.toarray()
npt.assert_array_equal(mintree_array, expected, 'Incorrect spanning tree found.')
npt.assert_array_equal(csgraph.toarray(), graph, 'Original graph was modified.')
mintree = minimum_spanning_tree(csgraph, overwrite=True)
npt.assert_array_equal(mintree.toarray(), expected, 'Graph was not properly modified to contain MST.')
np.random.seed(1234)
for N in (5, 10, 15, 20):
graph = (3 + np.random.random((N, N)))
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
assert_((mintree.nnz < N))
idx = np.arange((N - 1))
graph[(idx, (idx + 1))] = 1
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
expected = np.zeros((N, N))
expected[(idx, (idx + 1))] = 1
npt.assert_array_equal(mintree.toarray(), expected, 'Incorrect spanning tree found.') |
def pivot_dataframe_batch(list_files, settings):
num_elem = len(list_files)
num_chunks = ((num_elem // 10) + 1)
list_chunks = np.array_split(np.arange(num_elem), num_chunks)
if (not settings.debug):
max_workers = (multiprocessing.cpu_count() - 2)
for chunk_idx in tqdm(list_chunks, desc='Pivoting dataframes', ncols=100):
parallel_fn = partial(pivot_dataframe_single, settings=settings)
with ProcessPoolExecutor(max_workers=max_workers) as executor:
(start, end) = (chunk_idx[0], (chunk_idx[(- 1)] + 1))
executor.map(parallel_fn, list_files[start:end])
else:
logging_utils.print_yellow('Beware debugging mode (loop over pivot)')
for fil in list_files:
pivot_dataframe_single(fil, settings)
logging_utils.print_green('Finished pivot') |
def test():
model.eval()
criterion = nn.CrossEntropyLoss(size_average=False)
test_loss = 0
correct = 0
for (batch_idx, (data, target)) in enumerate(test_loader):
(data, target) = (data.cuda(GPU_ID), target.cuda(GPU_ID))
(data, target) = (Variable(data, volatile=True), Variable(target))
output = model(data)[0]
test_loss += criterion(output, target).data[0]
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
if (batch_idx > TEST_BATCH_COUNT):
break
test_loss /= (TEST_BATCH_COUNT * TEST_BATCH_SIZE)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(float(test_loss), correct, (TEST_BATCH_COUNT * TEST_BATCH_SIZE), float(((100.0 * correct) / (TEST_BATCH_COUNT * TEST_BATCH_SIZE))))) |
def freeze_bn(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
m.eval()
freeze_params(m) |
def BatchNorm(nf, ndim=2, norm_type=NormType.Batch, **kwargs):
return _get_norm('BatchNorm', nf, ndim, zero=(norm_type == NormType.BatchZero), **kwargs) |
class RequestField(object):
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
def from_tuples(cls, fieldname, value):
if isinstance(value, tuple):
if (len(value) == 3):
(filename, data, content_type) = value
else:
(filename, data) = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
return format_header_param(name, value)
def _render_parts(self, header_parts):
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for (name, value) in iterable:
if (value is not None):
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append(('%s: %s' % (sort_key, self.headers[sort_key])))
for (header_name, header_value) in self.headers.items():
if (header_name not in sort_keys):
if header_value:
lines.append(('%s: %s' % (header_name, header_value)))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None, content_location=None):
self.headers['Content-Disposition'] = (content_disposition or 'form-data')
self.headers['Content-Disposition'] += '; '.join(['', self._render_parts((('name', self._name), ('filename', self._filename)))])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location |
class ReconciliationProblem2Test(AbstractTest):
def __init__(self):
super().__init__()
self.problem = ReconciliationProblem2()
def name(self):
return 'recon2'
def run(self):
ncf = NcfEpi.new_total_flow(4)
hc = HardCodedPartitioning(partition_vector=[0, 0, 1, 1])
ncf.solve(self.problem, hc)
self.assert_feasibility(ncf)
self.assert_eq_epsilon(ncf.r1_obj_val, 40.0)
self.assert_eq_epsilon(ncf.intra_obj_vals[0], 0.0)
self.assert_eq_epsilon(ncf.intra_obj_vals[1], 0.0)
self.assert_eq_epsilon(ncf.r3_obj_val, 10.0)
self.assert_eq_epsilon(ncf.obj_val, 10.0) |
def _include_file_data(login, record):
if ('files' not in record):
if ('files' in record.get('links', {})):
url = record['links']['files']
elif ('id' in record):
url = (login.base_url + 'api/deposit/depositions/{0}/files'.format(record['id']))
else:
return record
r = login.session.get(url)
if (r.status_code == 200):
r_json = r.json()
record['files'] = r_json
return record |
def elliptic_curve():
EllipticCurveTraces(100000).run()
EllipticCurveTraces(500000).run()
Divpoly(59).run()
EllipticCurvePointMul(1000).run()
EllipticCurvePointMul(2000).run()
EllipticCurvePointMul(2500).run()
EllipticCurveMW([5, 6, 7, 8, 9]).run()
EllipticCurveMW([50, 6, 7, 8, 9]).run()
EllipticCurveMW([1, (- 1), 0, (- 79), 289]).run(trials=1)
EllipticCurveMW([0, 0, 1, (- 79), 342]).run(trials=1) |
class MAP_L21NormPrior(Prior):
def __init__(self, size, gamma=1, axis=0, isotropic=True):
assert ((type(size) == tuple) and (len(size) > 1)), 'size must be a tuple of length > 1'
self.size = size
self.gamma = gamma
self.axis = axis
self.isotropic = isotropic
self.repr_init()
self.N = np.prod(size)
self.d = size[axis]
def sample(self):
warnings.warn('MAP_L21NormPrior.sample not implemented return zero array as a placeholder')
return np.zeros(self.size)
def math(self):
return '$\\Vert . \\Vert_{2,1}$'
def second_moment(self):
raise NotImplementedError
def forward_second_moment_FG(self, tx_hat):
raise NotImplementedError
def compute_forward_posterior(self, ax, bx):
rx = ((1 / ax) * group_soft_threshold(bx, self.gamma, self.axis))
vx = ((1 / ax) * v_group_soft_threshold(bx, self.gamma, self.axis))
if self.isotropic:
vx = vx.mean()
return (rx, vx)
def compute_log_partition(self, ax, bx):
rx = ((1 / ax) * group_soft_threshold(bx, self.gamma, self.axis))
A_sum = (np.sum(((bx * rx) - ((0.5 * ax) * (rx ** 2)))) - (self.gamma * l21_norm(rx, self.axis)))
return (A_sum / self.N)
def b_measure(self, mx_hat, qx_hat, tx0_hat, f):
raise NotImplementedError
def bx_measure(self, mx_hat, qx_hat, tx0_hat, f):
raise NotImplementedError
def beliefs_measure(self, ax, f):
raise NotImplementedError
def measure(self, f):
raise NotImplementedError |
def train(arg1, arg2=None, arg3=None):
(prob, param) = (None, None)
if isinstance(arg1, (list, tuple)):
assert isinstance(arg2, (list, tuple))
(y, x, options) = (arg1, arg2, arg3)
prob = problem(y, x)
param = parameter(options)
elif isinstance(arg1, problem):
prob = arg1
if isinstance(arg2, parameter):
param = arg2
else:
param = parameter(arg2)
if ((prob == None) or (param == None)):
raise TypeError('Wrong types for the arguments')
prob.set_bias(param.bias)
liblinear.set_print_string_function(param.print_func)
err_msg = liblinear.check_parameter(prob, param)
if err_msg:
raise ValueError(('Error: %s' % err_msg))
if param.cross_validation:
(l, nr_fold) = (prob.l, param.nr_fold)
target = (c_double * l)()
liblinear.cross_validation(prob, param, nr_fold, target)
(ACC, MSE, SCC) = evaluations(prob.y[:l], target[:l])
if (param.solver_type in [L2R_L2LOSS_SVR, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL]):
print(('Cross Validation Mean squared error = %g' % MSE))
print(('Cross Validation Squared correlation coefficient = %g' % SCC))
return MSE
else:
print(('Cross Validation Accuracy = %g%%' % ACC))
return ACC
else:
m = liblinear.train(prob, param)
m = toPyModel(m)
return m |
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True)
cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True)
cls.add_method('clear', 'void', [], is_virtual=True)
return |
class TrivialTriangleFactory():
def triangle(self, a, b, c, color=None):
return [a, b, c]
def smooth_triangle(self, a, b, c, da, db, dc, color=None):
return [a, b, c] |
def cmpe_se_3x3_resnet164(use_1x1=True, **kwargs):
return get_cmpe_se_resnet(version=3, num_layers=164, **kwargs) |
class CscTrainingModel(BaseTrainingEngine, ABC):
def __init__(self, cfg, *args, **kwargs):
super().__init__(cfg, *args, **kwargs)
self.w = cfg.MODEL.HYPER_PARAMS[0]
def training_step(self, batch, batch_idx):
(ori_text, cor_text, det_labels) = batch
outputs = self.forward(ori_text, cor_text, det_labels)
loss = ((self.w * outputs[1]) + ((1 - self.w) * outputs[0]))
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, batch_size=len(ori_text))
return loss
def validation_step(self, batch, batch_idx):
(ori_text, cor_text, det_labels) = batch
outputs = self.forward(ori_text, cor_text, det_labels)
loss = ((self.w * outputs[1]) + ((1 - self.w) * outputs[0]))
det_y_hat = (outputs[2] > 0.5).long()
cor_y_hat = torch.argmax(outputs[3], dim=(- 1))
encoded_x = self.tokenizer(cor_text, padding=True, return_tensors='pt')
encoded_x.to(self._device)
cor_y = encoded_x['input_ids']
cor_y_hat *= encoded_x['attention_mask']
results = []
det_acc_labels = []
cor_acc_labels = []
for (src, tgt, predict, det_predict, det_label) in zip(ori_text, cor_y, cor_y_hat, det_y_hat, det_labels):
_src = self.tokenizer(src, add_special_tokens=False)['input_ids']
_tgt = tgt[1:(len(_src) + 1)].cpu().numpy().tolist()
_predict = predict[1:(len(_src) + 1)].cpu().numpy().tolist()
cor_acc_labels.append((1 if operator.eq(_tgt, _predict) else 0))
det_acc_labels.append(det_predict[1:(len(_src) + 1)].equal(det_label[1:(len(_src) + 1)]))
results.append((_src, _tgt, _predict))
return (loss.cpu().item(), det_acc_labels, cor_acc_labels, results)
def validation_epoch_end(self, outputs) -> None:
det_acc_labels = []
cor_acc_labels = []
results = []
for out in outputs:
det_acc_labels += out[1]
cor_acc_labels += out[2]
results += out[3]
loss = np.mean([out[0] for out in outputs])
self.log('val_loss', loss)
logger.info(f'loss: {loss}')
logger.info(f'''Detection:
acc: {np.mean(det_acc_labels):.4f}''')
logger.info(f'''Correction:
acc: {np.mean(cor_acc_labels):.4f}''')
compute_corrector_prf(results, logger)
compute_sentence_level_prf(results, logger)
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def test_epoch_end(self, outputs) -> None:
logger.info('Test.')
self.validation_epoch_end(outputs)
def predict(self, texts):
inputs = self.tokenizer(texts, padding=True, return_tensors='pt')
inputs.to(self.cfg.MODEL.DEVICE)
with torch.no_grad():
outputs = self.forward(texts)
y_hat = torch.argmax(outputs[1], dim=(- 1))
expand_text_lens = (torch.sum(inputs['attention_mask'], dim=(- 1)) - 1)
rst = []
for (t_len, _y_hat) in zip(expand_text_lens, y_hat):
rst.append(self.tokenizer.decode(_y_hat[1:t_len]).replace(' ', ''))
return rst |
class PAPIUtils(object):
def available_counters() -> Dict[(str, int)]:
if (os.name == 'nt'):
return {}
try:
p = subprocess.Popen("papi_avail -d -a | grep -E '^PAPI_'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
(stdout, _) = p.communicate(timeout=60)
except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
return {}
counters = [line.split('\t') for line in stdout.split('\n')]
result = {}
for counter in counters:
if (len(counter) >= 3):
result[counter[0]] = int(counter[2])
return result
def is_papi_used(sdfg: dace.SDFG) -> bool:
for (node, _) in sdfg.all_nodes_recursive():
if (isinstance(node, nodes.EntryNode) and (node.map.instrument == dace.InstrumentationType.PAPI_Counters)):
return True
if (hasattr(node, 'instrument') and (node.instrument == dace.InstrumentationType.PAPI_Counters)):
return True
return False
def reduce_iteration_count(begin, end, step, rparams: dict):
if isinstance(begin, int):
start_syms = []
else:
start_syms = symbolic.symlist(begin).keys()
if isinstance(end, int):
end_syms = []
else:
end_syms = symbolic.symlist(end).keys()
if isinstance(step, int):
step_syms = []
else:
step_syms = symbolic.symlist(step).keys()
def intersection(lista, listb):
return [x for x in lista if (x in listb)]
start_dyn_syms = intersection(start_syms, rparams.keys())
end_dyn_syms = intersection(end_syms, rparams.keys())
step_dyn_syms = intersection(step_syms, rparams.keys())
def replace_func(element, dyn_syms, retparams):
for x in dyn_syms:
target = sp.functions.Min(((retparams[x] * (retparams[x] - 1)) / 2), 0)
bstr = str(element)
element = symbolic.pystr_to_symbolic(bstr)
element = element.subs(x, target)
for (k, v) in retparams.items():
newv = symbolic.pystr_to_symbolic(str(v))
tarsyms = symbolic.symlist(target).keys()
if (x in tarsyms):
continue
tmp = newv.subs(x, target)
if (tmp != v):
retparams[k] = tmp
return element
if (len(start_dyn_syms) > 0):
pass
begin = replace_func(begin, start_dyn_syms, rparams)
if (len(end_dyn_syms) > 0):
pass
end = replace_func(end, end_dyn_syms, rparams)
if (len(step_dyn_syms) > 0):
pass
print(('Dynamic step symbols %s!' % str(step)))
raise NotImplementedError
return (begin, end, step)
def get_iteration_count(map_entry: MapEntry, mapvars: dict):
_map = map_entry.map
_it = _map.params
retparams = dict(**mapvars)
for (i, r) in enumerate(_map.range):
(begin, end, step) = r
end = (end + 1)
if isinstance(begin, symbolic.SymExpr):
begin = begin.expr
if isinstance(end, symbolic.SymExpr):
end = end.expr
if isinstance(step, symbolic.SymExpr):
step = step.expr
(begin, end, step) = PAPIUtils.reduce_iteration_count(begin, end, step, retparams)
num = ((end - begin) / step)
retparams[_it[i]] = num
return retparams
def all_maps(map_entry: EntryNode, dfg: SubgraphView) -> List[EntryNode]:
state: dace.SDFGState = dfg.graph
subgraph = state.scope_subgraph(map_entry, include_entry=False)
return [n for n in subgraph.nodes() if isinstance(n, EntryNode)]
def get_memlet_byte_size(sdfg: dace.SDFG, memlet: Memlet):
if memlet.dynamic:
return 0
memdata = sdfg.arrays[memlet.data]
return (memlet.volume * memdata.dtype.bytes)
def get_out_memlet_costs(sdfg: dace.SDFG, state_id: int, node: nodes.Node, dfg: DataflowGraphView):
scope_dict = sdfg.node(state_id).scope_dict()
out_costs = 0
for edge in dfg.out_edges(node):
(_, uconn, v, _, memlet) = edge
dst_node = dfg.memlet_path(edge)[(- 1)].dst
if (isinstance(node, nodes.CodeNode) and isinstance(dst_node, nodes.AccessNode)):
if ((scope_dict[node] != scope_dict[dst_node]) and scope_contains_scope(scope_dict, node, dst_node)):
continue
if (not uconn):
return 0
if (memlet.subset.data_dims() == 0):
if (memlet.wcr is not None):
out_costs += (3 * PAPIUtils.get_memlet_byte_size(sdfg, memlet))
else:
out_costs += PAPIUtils.get_memlet_byte_size(sdfg, memlet)
return out_costs
def get_tasklet_byte_accesses(tasklet: nodes.CodeNode, dfg: DataflowGraphView, sdfg: dace.SDFG, state_id: int) -> str:
in_accum = []
out_accum = []
in_edges = dfg.in_edges(tasklet)
for ie in in_edges:
in_accum.append(PAPIUtils.get_memlet_byte_size(sdfg, ie.data))
out_accum.append(PAPIUtils.get_out_memlet_costs(sdfg, state_id, tasklet, dfg))
full = in_accum
full.extend(out_accum)
return (('(' + sym2cpp(sum(full))) + ')')
def get_parents(outermost_node: nodes.Node, node: nodes.Node, sdfg: dace.SDFG, state_id: int) -> List[nodes.Node]:
parent = None
for state in sdfg.nodes():
s_d = state.scope_dict()
try:
scope = s_d[node]
except KeyError:
continue
if (scope is not None):
parent = scope
break
if (parent is None):
return []
if (parent == outermost_node):
return [parent]
return (PAPIUtils.get_parents(outermost_node, parent, sdfg, state_id) + [parent])
def get_memory_input_size(node, sdfg, state_id) -> str:
curr_state = sdfg.nodes()[state_id]
input_size = 0
for edge in curr_state.in_edges(node):
num_accesses = edge.data.num_accesses
bytes_per_element = sdfg.arrays[edge.data.data].dtype.bytes
input_size = (input_size + (bytes_per_element * num_accesses))
return sym2cpp(input_size)
def accumulate_byte_movement(outermost_node, node, dfg: DataflowGraphView, sdfg, state_id):
itvars = dict()
if isinstance(node, MapEntry):
children = dfg.scope_children()[node]
else:
children = []
assert (not (node in children))
if (len(children) > 0):
size = 0
for x in children:
size = (size + PAPIUtils.accumulate_byte_movement(outermost_node, x, dfg, sdfg, state_id))
return size
else:
if isinstance(node, MapExit):
return 0
parent_list = PAPIUtils.get_parents(outermost_node, node, sdfg, state_id)
if isinstance(node, MapEntry):
map_list = (parent_list + [node])
else:
map_list = parent_list
for x in map_list:
itvars = PAPIUtils.get_iteration_count(x, itvars)
itcount = 1
for x in itvars.values():
itcount = (itcount * x)
if isinstance(node, MapEntry):
raise ValueError('Unexpected node')
elif isinstance(node, MapExit):
return 0
elif isinstance(node, Tasklet):
return (itcount * symbolic.pystr_to_symbolic(PAPIUtils.get_tasklet_byte_accesses(node, dfg, sdfg, state_id)))
elif isinstance(node, nodes.AccessNode):
return 0
else:
raise NotImplementedError |
def can_move(movable):
return (can_move_x(movable) or can_move_y(movable) or can_move_z(movable)) |
class MetricOptions():
def __init__(self, G_ema=None, G=None, D=None, M=None, G_kwargs={}, D_kwargs={}, M_kwargs={}, dataset_kwargs={}, testset_kwargs={}, num_gpus=1, rank=0, device=None, progress=None, cache=True, txt_recon=True, img_recon=False, metric_only_test=False, use_fmri=False, fmri_vec=None, fmri_vec2=None, structure=2):
assert (0 <= rank < num_gpus)
self.G = G
self.D = D
self.G_kwargs = dnnlib.EasyDict(G_kwargs)
self.D_kwargs = dnnlib.EasyDict(D_kwargs)
self.dataset_kwargs = dnnlib.EasyDict(dataset_kwargs)
self.testset_kwargs = dnnlib.EasyDict(testset_kwargs)
self.num_gpus = num_gpus
self.rank = rank
self.device = (device if (device is not None) else torch.device('cuda', rank))
self.progress = (progress.sub() if ((progress is not None) and (rank == 0)) else ProgressMonitor())
self.cache = cache
self.txt_recon = txt_recon
self.img_recon = img_recon
self.metric_only_test = metric_only_test
self.use_fmri = use_fmri
if use_fmri:
assert (fmri_vec is not None)
self.fmri_vec = fmri_vec
self.structure = structure
if (use_fmri and (structure == 4)):
assert (fmri_vec2 is not None)
self.fmri_vec2 = fmri_vec2 |
def action_android():
(sccache, python, pip) = setup_basic_build_env()
setup_android_ndk()
handle_alternate_actions()
build_android(python, pip)
try:
sccache('-s')
except CommandFailed:
pass |
.torch
def test_item_embedder_weights(tensor_schema):
item_embedder = SasRecModel(tensor_schema.subset(['item_id', 'timestamp']), hidden_size=64, max_len=5, ti_modification=True).item_embedder
assert (item_embedder.get_item_weights(torch.tensor([0, 1, 2, 3])).size() == (4, 64)) |
def multiple_samples_collate(batch, fold=False):
(inputs, labels, video_idx, extra_data) = zip(*batch)
inputs = [item for sublist in inputs for item in sublist]
labels = [item for sublist in labels for item in sublist]
video_idx = [item for sublist in video_idx for item in sublist]
(inputs, labels, video_idx, extra_data) = (default_collate(inputs), default_collate(labels), default_collate(video_idx), default_collate(extra_data))
if fold:
return ([inputs], labels, video_idx, extra_data)
else:
return (inputs, labels, video_idx, extra_data) |
.parametrize('channel_axis', [0, 1, 2, (- 1), (- 2), (- 3)])
def test_laplacian_pyramid_max_layers(channel_axis):
for downscale in [2, 3, 5, 7]:
if (channel_axis is None):
shape = (32, 8)
shape_without_channels = shape
else:
shape_without_channels = (32, 8)
ndim = (len(shape_without_channels) + 1)
n_channels = 5
shape = list(shape_without_channels)
shape.insert((channel_axis % ndim), n_channels)
shape = tuple(shape)
img = np.ones(shape)
pyramid = pyramids.pyramid_laplacian(img, downscale=downscale, channel_axis=channel_axis)
max_layer = math.ceil(math.log(max(shape_without_channels), downscale))
for (layer, out) in enumerate(pyramid):
if (channel_axis is None):
out_shape_without_channels = out.shape
else:
assert (out.shape[channel_axis] == n_channels)
out_shape_without_channels = list(out.shape)
out_shape_without_channels.pop(channel_axis)
out_shape_without_channels = tuple(out_shape_without_channels)
if (layer < max_layer):
assert (max(out_shape_without_channels) > 1)
assert_equal(max_layer, layer)
assert (out_shape_without_channels == (1, 1)) |
_utils.polymorphic_model()
class GdsMesh(Mesh):
type = schema_utils.polymorphic_model_type('mesh.gds_mesh')
material = types.ModelType(Material)
extents = optplan.vec2d()
gds_layer = types.ListType(types.IntType()) |
def save_graph_to_file(sess, graph, graph_file_name):
output_graph_def = graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(graph_file_name, 'wb') as f:
f.write(output_graph_def.SerializeToString())
return |
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, scope='conv'):
with tf.variable_scope(scope):
if scope.__contains__('discriminator'):
weight_init = tf.random_normal_initializer(mean=0.0, stddev=0.02)
else:
weight_init = tf_contrib.layers.variance_scaling_initializer()
if (pad > 0):
h = x.get_shape().as_list()[1]
if ((h % stride) == 0):
pad = (pad * 2)
else:
pad = max((kernel - (h % stride)), 0)
pad_top = (pad // 2)
pad_bottom = (pad - pad_top)
pad_left = (pad // 2)
pad_right = (pad - pad_left)
if (pad_type == 'zero'):
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]])
if (pad_type == 'reflect'):
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], mode='REFLECT')
x = tf.layers.conv2d(inputs=x, filters=channels, kernel_size=kernel, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, strides=stride, use_bias=use_bias)
return x |
def perform_tests_with(clf, cv_test, stopwords=True):
multilabel_doc = (x_test[0] + x_test[1])
multilabel_labels = [y_test[0], y_test[1]]
multilabel_idxs = [clf.get_category_index(y_test[0]), clf.get_category_index(y_test[1])]
new_cat = 'bla'
def_cat = 'music'
def_cat_idx = clf.get_category_index(def_cat)
most_prob_cat = clf.get_most_probable_category()
most_prob_cat_idx = clf.__get_most_probable_category__()
assert (clf.get_category_index('SpOrTs') == (- 1))
y_pred = clf.predict(x_test)
assert (y_pred == y_test)
clf.set_a(0.1)
y_pred = clf.predict(x_test)
assert (y_pred == y_test)
clf.set_a(0)
y_pred = clf.predict(x_test, multilabel=True)
assert (y_pred == [[y] for y in y_test])
y_pred = clf.predict(x_test, multilabel=True, labels=False)
assert (y_pred == [[clf.get_category_index(y)] for y in y_test])
y_pred = clf.predict(x_test, labels=False)
y_pred = [clf.get_category_name(ic) for ic in y_pred]
assert (y_pred == y_test)
y_pred = clf.predict([doc_unknown], def_cat=STR_UNKNOWN)
assert (y_pred[0] == STR_UNKNOWN_CATEGORY)
y_pred = clf.predict([doc_unknown], multilabel=True)
assert (y_pred[0] == [])
y_pred = clf.predict([doc_unknown], def_cat=STR_MOST_PROBABLE, multilabel=True, labels=False)
assert (y_pred[0] == [most_prob_cat_idx])
y_pred = clf.predict([doc_unknown], def_cat=STR_MOST_PROBABLE)
assert (y_pred[0] == most_prob_cat)
assert (y_pred[0] == 'science&technology')
assert (clf.predict([doc_unknown], def_cat=def_cat)[0] == def_cat)
y_pred = clf.predict_proba(x_test)
assert (y_test == [clf.get_category_name(argmax(cv)) for cv in y_pred])
assert ([round(p, 5) for p in y_pred[0]] == cv_test)
y_pred = clf.predict_proba([doc_unknown])
assert (y_pred[0] == ([0] * len(clf.get_categories())))
assert (clf.classify(None)[0][1] == 0)
assert (clf.classify('')[0][1] == 0)
pred = clf.classify(doc_unknown, sort=False, prep=False)
assert (pred == ([0] * len(clf.get_categories())))
pred = clf.classify(doc_unknown, sort=False)
assert (pred == ([0] * len(clf.get_categories())))
pred0 = clf.classify(x_test[0], sort=False)
assert (argmax(pred0) == clf.get_category_index(y_test[0]))
pred1 = clf.classify(x_test[0], sort=True)
assert (pred1[0][0] == clf.get_category_index(y_test[0]))
assert ((argmax(pred0) == pred1[0][0]) and (pred0[argmax(pred0)] == pred1[0][1]))
assert (clf.classify_label(x_test[0]) == y_test[0])
assert (clf.classify_label(x_test[0], labels=False) == clf.get_category_index(y_test[0]))
assert (clf.classify_label('') == most_prob_cat)
assert (clf.classify_label('', def_cat=STR_UNKNOWN) == STR_UNKNOWN_CATEGORY)
assert (clf.classify_label('', def_cat=def_cat) == def_cat)
assert (clf.classify_label(doc_unknown) == most_prob_cat)
assert (clf.classify_label(doc_unknown, def_cat=STR_UNKNOWN) == STR_UNKNOWN_CATEGORY)
assert (clf.classify_label(doc_unknown, def_cat=def_cat) == def_cat)
assert (clf.classify_label(doc_unknown, labels=False) == most_prob_cat_idx)
assert (clf.classify_label(doc_unknown, def_cat=STR_UNKNOWN, labels=False) == (- 1))
assert (clf.classify_label(doc_unknown, def_cat=def_cat, labels=False) == def_cat_idx)
r = clf.classify_multilabel(multilabel_doc)
assert (len(multilabel_labels) == len(r))
assert ((r[0] in multilabel_labels) and (r[1] in multilabel_labels))
r = clf.classify_multilabel(multilabel_doc, labels=False)
assert (len(multilabel_labels) == len(r))
assert ((r[0] in multilabel_idxs) and (r[1] in multilabel_idxs))
assert (clf.classify_multilabel('') == [])
assert (clf.classify_multilabel('', def_cat=STR_MOST_PROBABLE) == [most_prob_cat])
assert (clf.classify_multilabel('', def_cat=def_cat) == [def_cat])
assert (clf.classify_multilabel(doc_unknown) == [])
assert (clf.classify_multilabel(doc_unknown, def_cat=STR_MOST_PROBABLE) == [most_prob_cat])
assert (clf.classify_multilabel(doc_unknown, def_cat=def_cat) == [def_cat])
assert (clf.classify_multilabel(doc_unknown, labels=False) == [])
assert (clf.classify_multilabel(doc_unknown, def_cat=STR_MOST_PROBABLE, labels=False) == [most_prob_cat_idx])
assert (clf.classify_multilabel(doc_unknown, def_cat=def_cat, labels=False) == [def_cat_idx])
clf.learn((doc_unknown * 2), new_cat, update=True)
assert (new_cat in clf.get_categories())
y_pred = clf.predict([doc_unknown])
assert (y_pred[0] == new_cat)
if stopwords:
learned_stopwords = clf.get_stopwords(0.01)
assert ([sw for sw in STOPWORDS if (sw in learned_stopwords)] == STOPWORDS)
pred = clf.classify(doc_blocks0, json=True)
assert ((len(pred['pars']) == 1) and (len(pred['pars'][0]['sents']) == 1))
assert (len(pred['pars'][0]['sents'][0]['words']) == 15)
clf.set_block_delimiters(parag='!', sent='\\?')
pred = clf.classify(doc_blocks0, json=True)
assert (len(pred['pars']) == (2 + 1))
assert (len(pred['pars'][0]['sents']) == 4)
clf.set_block_delimiters(sent='(\\?)')
assert (len(pred['pars'][0]['sents']) == 4)
clf.set_block_delimiters(word='-')
pred = clf.classify(doc_blocks1, json=True)
assert (len(pred['pars'][0]['sents'][0]['words']) == 5)
clf.set_block_delimiters(parag=PARA_DELTR, sent=SENT_DELTR, word=WORD_DELTR) |
def sentnet_color_2d(width, height, frame_count, lr, output=9, model_name='sentnet_color.model'):
network = input_data(shape=[None, width, height, 3], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets')
model = tflearn.DNN(network, max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')
return model |
class KLLoss(loss._Loss):
def forward(self, output, target):
if (not self.training):
return F.cross_entropy(output, target)
assert ((type(output) == tuple) and (len(output) == 2) and (output[0].size() == output[1].size())), 'output must a pair of tensors of same size.'
(model_output, soft_labels) = output
if soft_labels.requires_grad:
raise ValueError('soft labels should not require gradients.')
model_output_log_prob = F.log_softmax(model_output, dim=1)
del model_output
soft_labels = soft_labels.unsqueeze(1)
model_output_log_prob = model_output_log_prob.unsqueeze(2)
cross_entropy_loss = (- torch.bmm(soft_labels, model_output_log_prob))
cross_entropy_loss = cross_entropy_loss.mean()
model_output_log_prob = model_output_log_prob.squeeze(2)
return (cross_entropy_loss, model_output_log_prob) |
def get_chunks_by_qa(qa_pair, article_seg_json):
chunks = {}
for (key, value) in article_seg_json.items():
if ('slack' in qa_pair['article_segment_id']):
ids = set(['-'.join(sent['id'].split('-')[:2]) for sent in value['seg_dialog']])
for article_full_id in qa_pair['article_full_id']:
if ('-'.join(article_full_id.split('-')[:2]) in ids):
chunks[key] = value
else:
ids = set([sent['id'].rsplit('-', 1)[0] for sent in value['seg_dialog']])
for article_full_id in qa_pair['article_full_id']:
if (article_full_id in ids):
chunks[key] = value
return chunks |
def debug_code_agents(agent_test_config, memory_json_file, workspace: Workspace):
agents = []
goals = [['1- Run test.py using the execute_python_file command.', '2- Read code.py using the read_file command.', '3- Modify code.py using the write_to_file command.Repeat step 1, 2 and 3 until test.py runs without errors.'], ['1- Run test.py.', '2- Read code.py.', '3- Modify code.py.Repeat step 1, 2 and 3 until test.py runs without errors.'], ['1- Make test.py run without errors.']]
for goal in goals:
ai_config = AIConfig(ai_name='Debug Code Agent', ai_role='an autonomous agent that specializes in debugging python code', ai_goals=goal)
command_registry = get_command_registry(agent_test_config)
ai_config.command_registry = command_registry
system_prompt = ai_config.construct_full_prompt()
Config().set_continuous_mode(False)
agents.append(Agent(ai_name='Debug Code Agent', memory=memory_json_file, command_registry=command_registry, ai_config=ai_config, config=agent_test_config, next_action_count=0, system_prompt=system_prompt, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, workspace_directory=workspace.root))
return agents |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('from_path')
parser.add_argument('to_path')
return parser.parse_args() |
def ignore_in_to_list(getitem_function):
getitem_function.ignore_in_to_list = True
return getitem_function |
class semanticModule(nn.Module):
def __init__(self, in_dim):
super(semanticModule, self).__init__()
self.chanel_in = in_dim
self.enc1 = _EncoderBlock(in_dim, (in_dim * 2))
self.enc2 = _EncoderBlock((in_dim * 2), (in_dim * 4))
self.dec2 = _DecoderBlock((in_dim * 4), (in_dim * 2), (in_dim * 2))
self.dec1 = _DecoderBlock((in_dim * 2), in_dim, in_dim)
def forward(self, x):
enc1 = self.enc1(x)
enc2 = self.enc2(enc1)
dec2 = self.dec2(enc2)
dec1 = self.dec1(F.upsample(dec2, enc1.size()[2:], mode='bilinear'))
return (enc2.view((- 1)), dec1) |
def create_project(project_id: str, base_path: str='', meta: Dict[(str, Any)]=None):
project = Project(base_path, project_id)
project._YAML = meta
return project |
class TimmMixup(Mixup):
def __call__(self, x, target):
if (self.mode == 'elem'):
lam = self._mix_elem(x)
elif (self.mode == 'pair'):
assert ((len(x) % 2) == 0), 'Batch size should be even when using this'
lam = self._mix_pair(x)
else:
lam = self._mix_batch(x)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device)
return (x, target) |
class ResNet(Classifier):
def __init__(self, N_class, resolution=(1, 32, 32), blocks=[3, 3, 3], normalization=True, channels=64, **kwargs):
super(ResNet, self).__init__(N_class, resolution, **kwargs)
self.blocks = blocks
self.channels = channels
self.normalization = normalization
conv1 = torch.nn.Conv2d(self.resolution[0], self.channels, kernel_size=3, stride=1, padding=1, bias=False)
torch.nn.init.kaiming_normal_(conv1.weight, mode='fan_out', nonlinearity='relu')
self.append_layer('conv1', conv1)
if self.normalization:
norm1 = torch.nn.BatchNorm2d(self.channels)
torch.nn.init.constant_(norm1.weight, 1)
torch.nn.init.constant_(norm1.bias, 0)
self.append_layer('norm1', norm1)
relu = torch.nn.ReLU(inplace=True)
self.append_layer('relu1', relu)
downsampled = 1
for i in range(len(self.blocks)):
in_planes = ((2 ** max(0, (i - 1))) * self.channels)
out_planes = ((2 ** i) * self.channels)
layers = self.blocks[i]
stride = (2 if (i > 0) else 1)
downsample = None
if ((stride != 1) or (in_planes != out_planes)):
conv = torch.nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
torch.nn.init.kaiming_normal_(conv.weight, mode='fan_out', nonlinearity='relu')
if self.normalization:
bn = torch.nn.BatchNorm2d(out_planes)
torch.nn.init.constant_(bn.weight, 1)
torch.nn.init.constant_(bn.bias, 0)
downsample = torch.nn.Sequential(*[conv, bn])
else:
downsample = torch.nn.Sequential(*[conv])
sequence = []
sequence.append(ResNetBlock(in_planes, out_planes, stride=stride, downsample=downsample, normalization=self.normalization))
for _ in range(1, layers):
sequence.append(ResNetBlock(out_planes, out_planes, stride=1, downsample=None, normalization=self.normalization))
self.append_layer(('block%d' % i), torch.nn.Sequential(*sequence))
downsampled *= stride
representation = out_planes
pool = torch.nn.AvgPool2d(((self.resolution[1] // downsampled), (self.resolution[2] // downsampled)), stride=1)
self.append_layer('avgpool', pool)
view = common.torch.View((- 1), representation)
self.append_layer('view', view)
gain = torch.nn.init.calculate_gain('relu')
logits = torch.nn.Linear(representation, self._N_output)
torch.nn.init.kaiming_normal_(logits.weight, gain)
torch.nn.init.constant_(logits.bias, 0)
self.append_layer('logits', logits) |
class Context(object):
def __init__(self, command, parent=None, info_name=None, obj=None, auto_envvar_prefix=None, default_map=None, terminal_width=None, max_content_width=None, resilient_parsing=False, allow_extra_args=None, allow_interspersed_args=None, ignore_unknown_options=None, help_option_names=None, token_normalize_func=None, color=None, show_default=None):
self.parent = parent
self.command = command
self.info_name = info_name
self.params = {}
self.args = []
self.protected_args = []
if ((obj is None) and (parent is not None)):
obj = parent.obj
self.obj = obj
self._meta = getattr(parent, 'meta', {})
if ((default_map is None) and (parent is not None) and (parent.default_map is not None)):
default_map = parent.default_map.get(info_name)
self.default_map = default_map
self.invoked_subcommand = None
if ((terminal_width is None) and (parent is not None)):
terminal_width = parent.terminal_width
self.terminal_width = terminal_width
if ((max_content_width is None) and (parent is not None)):
max_content_width = parent.max_content_width
self.max_content_width = max_content_width
if (allow_extra_args is None):
allow_extra_args = command.allow_extra_args
self.allow_extra_args = allow_extra_args
if (allow_interspersed_args is None):
allow_interspersed_args = command.allow_interspersed_args
self.allow_interspersed_args = allow_interspersed_args
if (ignore_unknown_options is None):
ignore_unknown_options = command.ignore_unknown_options
self.ignore_unknown_options = ignore_unknown_options
if (help_option_names is None):
if (parent is not None):
help_option_names = parent.help_option_names
else:
help_option_names = ['--help']
self.help_option_names = help_option_names
if ((token_normalize_func is None) and (parent is not None)):
token_normalize_func = parent.token_normalize_func
self.token_normalize_func = token_normalize_func
self.resilient_parsing = resilient_parsing
if (auto_envvar_prefix is None):
if ((parent is not None) and (parent.auto_envvar_prefix is not None) and (self.info_name is not None)):
auto_envvar_prefix = '{}_{}'.format(parent.auto_envvar_prefix, self.info_name.upper())
else:
auto_envvar_prefix = auto_envvar_prefix.upper()
if (auto_envvar_prefix is not None):
auto_envvar_prefix = auto_envvar_prefix.replace('-', '_')
self.auto_envvar_prefix = auto_envvar_prefix
if ((color is None) and (parent is not None)):
color = parent.color
self.color = color
self.show_default = show_default
self._close_callbacks = []
self._depth = 0
def __enter__(self):
self._depth += 1
push_context(self)
return self
def __exit__(self, exc_type, exc_value, tb):
self._depth -= 1
if (self._depth == 0):
self.close()
pop_context()
def scope(self, cleanup=True):
if (not cleanup):
self._depth += 1
try:
with self as rv:
(yield rv)
finally:
if (not cleanup):
self._depth -= 1
def meta(self):
return self._meta
def make_formatter(self):
return HelpFormatter(width=self.terminal_width, max_width=self.max_content_width)
def call_on_close(self, f):
self._close_callbacks.append(f)
return f
def close(self):
for cb in self._close_callbacks:
cb()
self._close_callbacks = []
def command_path(self):
rv = ''
if (self.info_name is not None):
rv = self.info_name
if (self.parent is not None):
rv = '{} {}'.format(self.parent.command_path, rv)
return rv.lstrip()
def find_root(self):
node = self
while (node.parent is not None):
node = node.parent
return node
def find_object(self, object_type):
node = self
while (node is not None):
if isinstance(node.obj, object_type):
return node.obj
node = node.parent
def ensure_object(self, object_type):
rv = self.find_object(object_type)
if (rv is None):
self.obj = rv = object_type()
return rv
def lookup_default(self, name):
if (self.default_map is not None):
rv = self.default_map.get(name)
if callable(rv):
rv = rv()
return rv
def fail(self, message):
raise UsageError(message, self)
def abort(self):
raise Abort()
def exit(self, code=0):
raise Exit(code)
def get_usage(self):
return self.command.get_usage(self)
def get_help(self):
return self.command.get_help(self)
def invoke(*args, **kwargs):
(self, callback) = args[:2]
ctx = self
if isinstance(callback, Command):
other_cmd = callback
callback = other_cmd.callback
ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
if (callback is None):
raise TypeError('The given command does not have a callback that can be invoked.')
for param in other_cmd.params:
if ((param.name not in kwargs) and param.expose_value):
kwargs[param.name] = param.get_default(ctx)
args = args[2:]
with augment_usage_errors(self):
with ctx:
return callback(*args, **kwargs)
def forward(*args, **kwargs):
(self, cmd) = args[:2]
if (not isinstance(cmd, Command)):
raise TypeError('Callback is not a command.')
for param in self.params:
if (param not in kwargs):
kwargs[param] = self.params[param]
return self.invoke(cmd, **kwargs) |
def test_callbacks():
from functools import partial
def func1():
return 'func1'
def func2(a, b, c, d):
return ('func2', a, b, c, d)
def func3(a):
return 'func3({})'.format(a)
assert (m.test_callback1(func1) == 'func1')
assert (m.test_callback2(func2) == ('func2', 'Hello', 'x', True, 5))
assert (m.test_callback1(partial(func2, 1, 2, 3, 4)) == ('func2', 1, 2, 3, 4))
assert (m.test_callback1(partial(func3, 'partial')) == 'func3(partial)')
assert (m.test_callback3((lambda i: (i + 1))) == 'func(43) = 44')
f = m.test_callback4()
assert (f(43) == 44)
f = m.test_callback5()
assert (f(number=43) == 44) |
('/get_transaction/<txhash>', methods=('GET',))
def get_transaction(txhash):
web3 = connect_to_geth(app.web3_url, app.consensus)
try:
tx = dict(web3.eth.get_transaction(txhash))
except:
tx = {'status': 'No such transaction'}
resp = Response(json.dumps(tx, cls=HexJsonEncoder, indent=5))
resp.headers['Content-Type'] = 'application/json'
return resp |
def findtask(description):
task_list = ee.data.getTaskList()
for t in task_list:
if (t['description'] == description):
if ((t['state'] == 'READY') or (t['state'] == 'RUNNING')):
return True
return False |
def cumulative_distribution(image, nbins=256):
(hist, bin_centers) = histogram(image, nbins)
img_cdf = hist.cumsum()
img_cdf = (img_cdf / float(img_cdf[(- 1)]))
cdf_dtype = utils._supported_float_type(image.dtype)
img_cdf = img_cdf.astype(cdf_dtype, copy=False)
return (img_cdf, bin_centers) |
def haar_init_(A):
torch.nn.init.orthogonal_(A)
with torch.no_grad():
if (A.det() < 0.0):
idx = np.random.randint(0, A.size(0))
A[idx] *= (- 1.0)
An = la.logm(A.data.cpu().numpy()).real
An = (0.5 * (An - An.T))
A.copy_(torch.tensor(An))
return A |
def is_word_exist(new_word, words_in_db):
for key in words_in_db:
word = words_in_db[key]
if (word['word'] == new_word):
return True
return False |
def text2tensor(text, size=256):
nums = [ord(x) for x in text]
assert (len(nums) < size)
nums.extend(([0] * (size - len(nums))))
nums = np.array(nums, dtype=np.uint8)
return torch.from_numpy(nums) |
class MultitaskDatasetWrapper(BaseWrapperDataset):
def __init__(self, dataset, target_language_id, sample=1.0, name=''):
super().__init__(dataset)
self.target_language_id = target_language_id
self.sample = sample
self.name = name
def collater(self, *args, **kwargs):
ans = self.dataset.collater(*args, **kwargs)
if ('net_input' in ans):
ans['net_input']['target_language_id'] = self.target_language_id
ans['net_input']['dataset_name'] = self.name
return ans
def num_tokens(self, *args, **kwargs):
return self.dataset.num_tokens(*args, **kwargs)
def ordered_indices(self, *args, **kwargs):
indices = self.dataset.ordered_indices(*args, **kwargs)
size = int((self.sample * indices.shape[0]))
return indices.take(np.sort(np.random.permutation(indices.shape[0])[:size]))
def size(self, index: int):
return self.dataset.size(index)
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
return self.dataset.prefetch(indices) |
def train_speaker_dependent(config: Config, data: DataLoader, model_name: str) -> None:
results = []
for (fold, (train_index, test_index)) in enumerate(data.get_stratified_k_fold()):
config.fold = (fold + 1)
print('Present Fold:', config.fold)
(train_input, train_output, test_input, test_output) = train_io(config=config, data=data, train_index=train_index, test_index=test_index)
clf = svm_train(config=config, train_input=train_input, train_output=train_output)
(result_dict, result_str) = svm_test(clf, test_input, test_output)
results.append(result_dict)
if (not os.path.exists(os.path.dirname(RESULT_FILE))):
os.makedirs(os.path.dirname(RESULT_FILE))
with open(RESULT_FILE.format(model_name), 'w') as file:
json.dump(results, file) |
class RecorderWrapper(gym.Wrapper):
def __init__(self, env, fps, save_dir, label, record_every):
super().__init__(env)
self.record_every = record_every
self.save_dir = save_dir
self.label = label
assert (self.label in ('emulator', 'preproc'))
self.fps = fps
self.recordings = 0
self.writer = None
self.frames_written = 0
self.last_recording = 0
self.scale_factor = None
def step(self, action):
(observation, rew, done, info) = self.env.step(action)
if (done and self.is_recording):
self.writer.close()
self.writer = None
self.frames_written = 0
self.last_recording = time.time()
info[(self.label + '_recording')] = (self.save_dir + f'/{self.label}_{self.recordings}.mp4')
self.recordings += 1
if (((time.time() - self.last_recording) > self.record_every) and (not self.is_recording) and done):
self.frames_written = 0
self.writer = imageio.get_writer((self.save_dir + f'/{self.label}_{self.recordings}.mp4'), fps=self.fps, macro_block_size=1)
self.last_recording = time.time()
if ((self.writer is not None) and (self.frames_written < (((60 * 60) * 16) if (self.label == 'preproc') else ((60 * 60) * 9)))):
if (self.scale_factor is None):
self.scale_factor = (EMULATOR_REC_SCALE if (self.label == 'emulator') else PREPROC_REC_SCALE)
if ((observation.shape[0] <= 64) and (observation.shape[1] <= 64)):
self.scale_factor *= 2
rec_observation = cv2.resize(observation, ((observation.shape[1] * self.scale_factor), (observation.shape[0] * self.scale_factor)), interpolation=cv2.INTER_NEAREST)
self.frames_written += 1
self.writer.append_data(rec_observation.squeeze())
return (observation, rew, done, info)
def is_recording(self):
return (self.writer is not None) |
def get_arguments(traj_data):
task_type = traj_data['task_type']
try:
r_idx = traj_data['repeat_idx']
except:
r_idx = 0
language_goal_instr = traj_data['turk_annotations']['anns'][r_idx]['task_desc']
sliced = exist_or_no(traj_data['pddl_params']['object_sliced'])
mrecep_target = none_or_str(traj_data['pddl_params']['mrecep_target'])
object_target = none_or_str(traj_data['pddl_params']['object_target'])
parent_target = none_or_str(traj_data['pddl_params']['parent_target'])
return (language_goal_instr, task_type, mrecep_target, object_target, parent_target, sliced) |
class ViTMSNPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def pc_loss(f, K, labels):
sigmoid = nn.Sigmoid()
fbar = f.gather(1, labels.long().view((- 1), 1)).repeat(1, K)
loss_matrix = sigmoid(((- 1.0) * (f - fbar)))
(M1, M2) = (((K * (K - 1)) / 2), (K - 1))
pc_loss = ((((torch.sum(loss_matrix) * (K - 1)) / len(labels)) - M1) + M2)
return pc_loss |
class ConfigurationTestCase(unittest.TestCase):
def test_default(self):
config = Configuration()
self.assertEqual(config.log, 'info')
self.assertTrue(config.interactive)
def test_example(self):
SAGE_BOOTSTRAP = ' loG:CrItIcAl, interactive:TRUE'
result = run_config_with(SAGE_BOOTSTRAP)
self.assertEqual(len(result), 4)
self.assertEqual(result['log'], u'critical')
self.assertTrue(result['interactive'])
self.assertEqual(result['stdout'], 'default stdout')
self.assertEqual(result['stderr'], 'default stderr')
def test_logging(self):
for level in LOG_LEVELS:
self.assertEqual(run_config_with('LOG:{0}'.format(level.upper()))['log'], level)
def test_overriding(self):
interactive = run_config_with('interactive:true')
self.assertTrue(interactive['interactive'])
self.assertEqual(interactive['stdout'], 'default stdout')
self.assertEqual(interactive['stderr'], 'default stderr')
in_pipe = run_config_with('interactive:false')
self.assertFalse(in_pipe['interactive'])
self.assertEqual(in_pipe['stdout'], u"<class 'sage_bootstrap.stdio.UnbufferedStream'>")
self.assertEqual(in_pipe['stderr'], 'default stderr') |
class TestSnapshotter():
def setup_method(self):
self.temp_dir = tempfile.TemporaryDirectory()
def teardown_method(self):
self.temp_dir.cleanup()
.parametrize('mode, files', [*configurations])
def test_snapshotter(self, mode, files):
snapshotter = Snapshotter(self.temp_dir.name, mode, 2)
assert (snapshotter.snapshot_dir == self.temp_dir.name)
assert (snapshotter.snapshot_mode == mode)
assert (snapshotter.snapshot_gap == 2)
snapshot_data = [{'testparam': 1}, {'testparam': 4}]
snapshotter.save_itr_params(1, snapshot_data[0])
snapshotter.save_itr_params(2, snapshot_data[1])
for (f, num) in files.items():
filename = osp.join(self.temp_dir.name, f)
assert osp.exists(filename)
with open(filename, 'rb') as pkl_file:
data = pickle.load(pkl_file)
assert (data == snapshot_data[num])
def test_invalid_snapshot_mode(self):
with pytest.raises(ValueError):
snapshotter = Snapshotter(snapshot_dir=self.temp_dir.name, snapshot_mode='invalid')
snapshotter.save_itr_params(2, {'testparam': 'invalid'}) |
def profile_likelihood(ln, lk, n, k, ww, plot=False):
def p_d(d):
return _compute_binomial_logl(d, lk, k, ln, n, w=ww)
dx = 10.0
d_left = D_MIN
d_right = ((D_MAX + dx) + d_left)
elements = 0
counter = 0
while (elements < 3):
dx /= 10.0
counter += 1
d_range = np.arange(d_left, d_right, dx)
P = np.array([p_d(di) for di in d_range])
P = P.reshape(P.shape[0])
P -= P.min()
P = np.exp((- P))
mask = (P > 1e-20)
elements = mask.sum()
ind = np.where(mask)[0]
d_left = ((d_range[ind[0]] - (0.5 * dx)) if ((d_range[ind[0]] - dx) > 0) else D_MIN)
d_right = (d_range[ind[(- 1)]] + (0.5 * dx))
d_range = np.linspace(d_left, d_right, 1000)
dx = (d_range[1] - d_range[0])
P = np.array([p_d(di) for di in d_range])
P = P.reshape(P.shape[0])
P -= P.min()
P = np.exp((- P))
P /= P.sum()
if plot:
plt.figure()
plt.plot(d_range, P)
plt.xlabel('d')
plt.ylabel('P(d)')
plt.title('Posterior of d')
E_d_emp = np.dot(d_range, P)
S_d_emp = np.sqrt((((d_range * d_range) * P).sum() - (E_d_emp * E_d_emp)))
if plot:
print('empirical average:\t', E_d_emp, '\nempirical std:\t\t', S_d_emp)
return (E_d_emp, S_d_emp, d_range, P) |
class BaseGenerator(metaclass=abc.ABCMeta):
def generate(self, query: str, stop_tokens=None, max_output_len=None):
pass |
class MetaModule(nn.Module):
def meta_named_parameters(self, prefix='', recurse=True):
gen = self._named_members((lambda module: (module._parameters.items() if isinstance(module, MetaModule) else [])), prefix=prefix, recurse=recurse)
for elem in gen:
(yield elem)
def meta_parameters(self, recurse=True):
for (name, param) in self.meta_named_parameters(recurse=recurse):
(yield param) |
class TimeEstimator():
def __init__(self, total_iter, step_size):
self.avg_time_window = []
self.exp_avg_time = None
self.alpha = 0.7
self.last_time = time.time()
self.total_iter = total_iter
self.step_size = step_size
self.buffering_exp = True
def update(self):
curr_time = time.time()
time_per_iter = (curr_time - self.last_time)
self.last_time = curr_time
self.avg_time_window.append(time_per_iter)
if self.buffering_exp:
if (self.exp_avg_time is not None):
self.buffering_exp = False
self.exp_avg_time = time_per_iter
else:
self.exp_avg_time = ((self.alpha * self.exp_avg_time) + ((1 - self.alpha) * time_per_iter))
def get_est_remaining(self, it):
if (self.exp_avg_time is None):
return 0
remaining_iter = (self.total_iter - it)
return ((remaining_iter * self.exp_avg_time) / self.step_size)
def get_and_reset_avg_time(self):
avg = ((sum(self.avg_time_window) / len(self.avg_time_window)) / self.step_size)
self.avg_time_window = []
return avg |
def load_image(file_path, input_height=128, input_width=None, output_height=128, output_width=None, crop_height=None, crop_width=None, is_random_crop=True, is_mirror=True, is_gray=False):
if (input_width is None):
input_width = input_height
if (output_width is None):
output_width = output_height
if (crop_width is None):
crop_width = crop_height
img = Image.open(file_path)
if ((is_gray is False) and (img.mode is not 'RGB')):
img = img.convert('RGB')
if (is_gray and (img.mode is not 'L')):
img = img.convert('L')
if (is_mirror and (random.randint(0, 1) is 0)):
img = ImageOps.mirror(img)
if (input_height is not None):
img = img.resize((input_width, input_height), Image.BICUBIC)
if (crop_height is not None):
[w, h] = img.size
if is_random_crop:
cx1 = random.randint(0, (w - crop_width))
cx2 = ((w - crop_width) - cx1)
cy1 = random.randint(0, (h - crop_height))
cy2 = ((h - crop_height) - cy1)
else:
cx2 = cx1 = int(round(((w - crop_width) / 2.0)))
cy2 = cy1 = int(round(((h - crop_height) / 2.0)))
img = ImageOps.crop(img, (cx1, cy1, cx2, cy2))
img = img.resize((output_width, output_height), Image.BICUBIC)
return img |
class PhaseShiftTest(tf.test.TestCase):
def test_upper(self):
ps = PhaseShiftUpper(RANDOM_PHASE_SHIFT)
ps_inv = PhaseShiftUpper((- RANDOM_PHASE_SHIFT))
self.assertAllClose((ps.matrix ps.inverse_matrix), IDENTITY)
self.assertAllClose(ps.matrix.conj(), ps.inverse_matrix)
self.assertAllClose(ps.matrix.conj(), ps_inv.matrix)
def test_lower(self):
ps = PhaseShiftLower(RANDOM_PHASE_SHIFT)
ps_inv = PhaseShiftLower((- RANDOM_PHASE_SHIFT))
self.assertAllClose((ps.matrix ps.inverse_matrix), IDENTITY)
self.assertAllClose(ps.matrix.conj(), ps.inverse_matrix)
self.assertAllClose(ps.matrix.conj(), ps_inv.matrix)
def test_common_mode(self):
ps = PhaseShiftCommonMode(RANDOM_PHASE_SHIFT)
ps_inv = PhaseShiftCommonMode((- RANDOM_PHASE_SHIFT))
self.assertAllClose((ps.matrix ps.inverse_matrix), IDENTITY)
self.assertAllClose(ps.matrix, (np.exp((1j * RANDOM_PHASE_SHIFT)) * IDENTITY))
self.assertAllClose(ps.matrix.conj(), ps.inverse_matrix)
self.assertAllClose(ps.matrix.conj(), ps_inv.matrix)
def test_differential_mode(self):
ps = PhaseShiftDifferentialMode(RANDOM_PHASE_SHIFT)
ps_inv = PhaseShiftDifferentialMode((- RANDOM_PHASE_SHIFT))
self.assertAllClose((ps.matrix ps.inverse_matrix), IDENTITY)
self.assertAllClose(ps.matrix.conj(), ps.inverse_matrix)
self.assertAllClose(ps.matrix.conj(), ps_inv.matrix) |
class MeasureStatistics(metaclass=Singleton):
def __init__(self, folder):
self.enabled = False
self.folder = os.path.join(base_dir, 'distance', folder)
self.stats = {}
self.stats_names = ['dist']
def save_measure(self, tensor, id):
if (id not in self.stats):
self.stats[id] = np.array([])
t = tensor.view(tensor.shape[0], (- 1))
d = torch.sum((t ** 2), dim=(- 1))
stat_arr = d.cpu().numpy()
s = np.concatenate([self.stats[id], stat_arr])
self.stats[id] = s
def __enter__(self):
self.enabled = True
self.stats.clear()
return self
def __exit__(self, *args):
if (self.enabled and (len(self.stats) > 0)):
self.enabled = False
if os.path.exists(self.folder):
shutil.rmtree(self.folder)
if (not os.path.exists(self.folder)):
os.makedirs(self.folder)
path = os.path.join(self.folder, 'distance.csv')
pairs = [i for i in self.stats.items()]
cols = [i[0] for i in pairs]
data = np.array([i[1] for i in pairs]).transpose()
df = pd.DataFrame(data=data, columns=cols)
df.to_csv(path, index=False) |
class Sphinx(PythonModule):
def __init__(self):
PythonModule.__init__(self, 'sphinx', spkg='sphinx', type='standard') |
def audio_to_sequence_example(filename, labels, sample_rate, n_samples):
segments = _audio_to_segments(filename, sample_rate=sample_rate, n_samples=n_samples)
sequence_example = _segments_to_sequence_example(segments, labels)
return sequence_example |
class WHUBuildingDataset(Dataset):
def __init__(self, data_root='data/whubuilding/train', mode='train', img_dir='image', mask_dir='label', img_suffix='.tif', mask_suffix='.tif', transform=None, mosaic_ratio=0.25, img_size=ORIGIN_IMG_SIZE):
self.data_root = data_root
self.img_dir = img_dir
self.mask_dir = mask_dir
self.img_suffix = img_suffix
self.mask_suffix = mask_suffix
self.transform = transform
self.mode = mode
self.mosaic_ratio = mosaic_ratio
self.img_size = img_size
self.img_ids = self.get_img_ids(self.data_root, self.img_dir, self.mask_dir)
def __getitem__(self, index):
p_ratio = random.random()
if ((p_ratio > self.mosaic_ratio) or (self.mode == 'val') or (self.mode == 'test')):
(img, mask) = self.load_img_and_mask(index)
if self.transform:
(img, mask) = self.transform(img, mask)
else:
(img, mask) = self.load_mosaic_img_and_mask(index)
if self.transform:
(img, mask) = self.transform(img, mask)
img = torch.from_numpy(img).permute(2, 0, 1).float()
mask = torch.from_numpy(mask).long()
img_id = self.img_ids[index]
results = dict(img_id=img_id, img=img, gt_semantic_seg=mask)
return results
def __len__(self):
return len(self.img_ids)
def get_img_ids(self, data_root, img_dir, mask_dir):
img_filename_list = os.listdir(osp.join(data_root, img_dir))
mask_filename_list = os.listdir(osp.join(data_root, mask_dir))
assert (len(img_filename_list) == len(mask_filename_list))
img_ids = [str(id.split('.')[0]) for id in mask_filename_list]
return img_ids
def load_img_and_mask(self, index):
img_id = self.img_ids[index]
img_name = osp.join(self.data_root, self.img_dir, (img_id + self.img_suffix))
mask_name = osp.join(self.data_root, self.mask_dir, (img_id + self.mask_suffix))
img = Image.open(img_name).convert('RGB')
mask = Image.open(mask_name).convert('L')
return (img, mask)
def load_mosaic_img_and_mask(self, index):
indexes = ([index] + [random.randint(0, (len(self.img_ids) - 1)) for _ in range(3)])
(img_a, mask_a) = self.load_img_and_mask(indexes[0])
(img_b, mask_b) = self.load_img_and_mask(indexes[1])
(img_c, mask_c) = self.load_img_and_mask(indexes[2])
(img_d, mask_d) = self.load_img_and_mask(indexes[3])
(img_a, mask_a) = (np.array(img_a), np.array(mask_a))
(img_b, mask_b) = (np.array(img_b), np.array(mask_b))
(img_c, mask_c) = (np.array(img_c), np.array(mask_c))
(img_d, mask_d) = (np.array(img_d), np.array(mask_d))
w = self.img_size[1]
h = self.img_size[0]
start_x = (w // 4)
strat_y = (h // 4)
offset_x = random.randint(start_x, (w - start_x))
offset_y = random.randint(strat_y, (h - strat_y))
crop_size_a = (offset_x, offset_y)
crop_size_b = ((w - offset_x), offset_y)
crop_size_c = (offset_x, (h - offset_y))
crop_size_d = ((w - offset_x), (h - offset_y))
random_crop_a = albu.RandomCrop(width=crop_size_a[0], height=crop_size_a[1])
random_crop_b = albu.RandomCrop(width=crop_size_b[0], height=crop_size_b[1])
random_crop_c = albu.RandomCrop(width=crop_size_c[0], height=crop_size_c[1])
random_crop_d = albu.RandomCrop(width=crop_size_d[0], height=crop_size_d[1])
croped_a = random_crop_a(image=img_a.copy(), mask=mask_a.copy())
croped_b = random_crop_b(image=img_b.copy(), mask=mask_b.copy())
croped_c = random_crop_c(image=img_c.copy(), mask=mask_c.copy())
croped_d = random_crop_d(image=img_d.copy(), mask=mask_d.copy())
(img_crop_a, mask_crop_a) = (croped_a['image'], croped_a['mask'])
(img_crop_b, mask_crop_b) = (croped_b['image'], croped_b['mask'])
(img_crop_c, mask_crop_c) = (croped_c['image'], croped_c['mask'])
(img_crop_d, mask_crop_d) = (croped_d['image'], croped_d['mask'])
top = np.concatenate((img_crop_a, img_crop_b), axis=1)
bottom = np.concatenate((img_crop_c, img_crop_d), axis=1)
img = np.concatenate((top, bottom), axis=0)
top_mask = np.concatenate((mask_crop_a, mask_crop_b), axis=1)
bottom_mask = np.concatenate((mask_crop_c, mask_crop_d), axis=1)
mask = np.concatenate((top_mask, bottom_mask), axis=0)
mask = np.ascontiguousarray(mask)
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
mask = Image.fromarray(mask)
return (img, mask) |
def add_data_arguments(parser):
parser.add_argument('--ent_emb', default=['tzw'], nargs='+', help='sources for entity embeddings')
parser.add_argument('-ds', '--dataset', default='csqa', choices=DATASET_LIST, help='dataset name')
parser.add_argument('--data_dir', default='data', type=str, help='Path to the data directory')
parser.add_argument('-ih', '--inhouse', type=utils.bool_flag, nargs='?', const=True, help='run in-house setting')
parser.add_argument('--inhouse_train_qids', default='data/{dataset}/inhouse_split_qids.txt', help='qids of the in-house training set')
parser.add_argument('--train_statements', default='{data_dir}/{dataset}/statement/train.statement.jsonl')
parser.add_argument('--dev_statements', default='{data_dir}/{dataset}/statement/dev.statement.jsonl')
parser.add_argument('--test_statements', default='{data_dir}/{dataset}/statement/test.statement.jsonl')
parser.add_argument('-sl', '--max_seq_len', default=100, type=int)
(args, _) = parser.parse_known_args()
parser.set_defaults(ent_emb_paths=[EMB_PATHS.get(s) for s in args.ent_emb], inhouse=(DATASET_SETTING[args.dataset] == 'inhouse'), inhouse_train_qids=args.inhouse_train_qids.format(dataset=args.dataset))
data_splits = (('train', 'dev') if (args.dataset in DATASET_NO_TEST) else ('train', 'dev', 'test'))
for split in data_splits:
for attribute in ('statements',):
attr_name = f'{split}_{attribute}'
parser.set_defaults(**{attr_name: getattr(args, attr_name).format(dataset=args.dataset, data_dir=args.data_dir)})
if ('test' not in data_splits):
parser.set_defaults(test_statements=None) |
class ActionTokenTester(TokenTester):
def test_consistent(self, env, dom, dom_elem):
raise NotImplementedError() |
def download_wiki2():
URL = '
path_to_zip_file = download_file(URL)
print(f'-I- Donwloaded wikitext2 to {path_to_zip_file}. Extracting...')
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(DATA_DIR)
print('-I- Done') |
def helper_prod_test_service(request: Request, expected_text: str):
service = get_prod_service()
auth = get_authentication()
result = service.make_request(auth, request)
print(result)
assert result.success
assert (len(result.completions) == request.num_completions)
for completion in result.completions:
assert (''.join((token.text for token in completion.tokens)) == completion.text)
if request.echo_prompt:
assert completion.text.startswith(request.prompt)
if (not request.echo_prompt):
assert (len(completion.tokens) <= request.max_tokens)
assert (completion.logprob == sum((token.logprob for token in completion.tokens)))
for token in completion.tokens[1:]:
assert (len(token.top_logprobs) == request.top_k_per_token)
if (token.text in token.top_logprobs):
assert (token.logprob == token.top_logprobs[token.text])
if (request.temperature == 0):
assert (token.text in token.top_logprobs)
assert (token.logprob == max(token.top_logprobs.values()))
assert any(((completion.text == expected_text) for completion in result.completions)) |
def test_case14():
url = (brokerIp + '/ngsi-ld/v1/entities/')
r = requests.post(url, data=json.dumps(ld_data.subdata14))
print(r.content)
print(r.status_code)
assert (r.status_code == 400) |
class input_file(cmd_arg):
def find_node(self, base_path):
assert isinstance(base_path, Node.Node)
self.node = base_path.find_resource(self.name)
if (self.node is None):
raise Errors.WafError(('Input file %s not found in ' % (self.name, base_path)))
def get_path(self, env, absolute):
if absolute:
return (self.template % self.node.abspath())
else:
return (self.template % self.node.srcpath()) |
def match_declarations_with_differentiability_info(declarations, differentiability_infos):
infos_by_signature = {f['signature']: f for f in differentiability_infos}
def find_info(declaration):
signature = get_signature(declaration)
if (signature in infos_by_signature):
return infos_by_signature[signature]
signature = get_signature(declaration, use_base_variant=True)
return infos_by_signature.get(signature)
for declaration in declarations:
info = find_info(declaration)
declaration['derivative'] = (info['autograd_fn'] if info else None)
declaration['non_differentiable_arg_names'] = (info['non_differentiable_arg_names'] if info else [])
declaration['output_differentiability'] = (info['output_differentiability'] if info else None) |
def get_root():
parser = xml.sax.make_parser()
myHandler = MyHandler()
parser.setContentHandler(myHandler)
parser.setFeature(feature_external_ges, True)
parser.parse('resources/config.xml')
return parser |
class EngineType(Enum):
BD = 0
GDMA = 1
GDE = 2
SORT = 3
NMS = 4
CDMA = 5
UNKNOWN = (- 1) |
def issigned_long_longarray(var):
return (isarray(var) and (var.get('typespec') in ['integer', 'logical']) and (get_kind(var) == '8')) |
def param2pystr(p):
if ((param_kind(p) == IN_ARRAY) or (param_kind(p) == OUT_ARRAY) or (param_kind(p) == IN_ARRAY) or (param_kind(p) == INOUT_ARRAY) or (param_kind(p) == OUT)):
return ('ctypes.POINTER(%s)' % type2pystr(param_type(p)))
else:
return type2pystr(param_type(p)) |
.core
.usefixtures('interactions_full_pandas_dataset')
def test_feature_schema_schema_copy(interactions_full_pandas_dataset):
feature_list = get_features(interactions_full_pandas_dataset)
feature_list_copy = feature_list.copy()
for feature in feature_list_copy.values():
if (feature.feature_type == FeatureType.CATEGORICAL):
assert (feature.cardinality is None)
assert (bool(feature_list_copy) is True)
assert (len(feature_list_copy) == len(feature_list))
assert (len(feature_list_copy.subset(['user_id'])) == len(feature_list.subset(['user_id']))) |
def select_child(state_dict, string):
if (string[(- 1)] != '.'):
string = (string + '.')
return {k.replace(string, ''): v for (k, v) in state_dict.items() if k.startswith(string)} |
class TestFairseqEncoderBase(unittest.TestCase):
def setUpClass(cls):
if (cls is TestFairseqEncoderBase):
raise unittest.SkipTest('Skipping test case in base')
super().setUpClass()
def setUpEncoder(self, encoder):
self.assertTrue(isinstance(encoder, FairseqEncoder), msg='This class is only used for test FairseqEncoder')
self.encoder = encoder
def setUpInput(self, input=None):
self.forward_input = (get_dummy_input() if (input is None) else input)
self.forward_input.pop('prev_output_tokens', None)
def setUp(self):
self.encoder = None
self.forward_input = None
def test_forward(self):
if (self.encoder and self.forward_input):
bsz = self.forward_input['src_tokens'].size(0)
forward_output = self.encoder.forward(**self.forward_input)
(succ, msg) = check_encoder_output(forward_output, batch_size=bsz)
if (not succ):
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output |
def load(filename, batch_size=None, exclude_parameter=False, parameter_only=False, extension='.nntxt', parameter_scope=None, rng=None):
from nnabla.utils import nnabla_pb2
from nnabla.utils.get_file_handle import get_initial_file_loader, load_files, FileHandlerContext
ctx = FileHandlerContext()
ctx.exclude_parameter = exclude_parameter
ctx.parameter_only = parameter_only
ctx.proto = nnabla_pb2.NNablaProtoBuf()
if (parameter_scope is None):
ctx.parameter_scope = OrderedDict()
else:
ctx.parameter_scope = parameter_scope
file_loaders = get_initial_file_loader()
if (rng is None):
rng = np.random.RandomState(0)
load_files(ctx, file_loaders, filename, extension)
g = ProtoGraph.from_proto(ctx.proto, batch_size=batch_size, param_scope=ctx.parameter_scope, rng=rng)
return g |
def text_query(clip_model, scene_pcd, scene_graph, device='cuda:0'):
query = input("Please query an object: (input 'q' to quit)\n")
while (query != 'q'):
text_feature = get_clip_feature(clip_model, query, normalize=True, device=device)
text_feature = text_feature.cpu().detach().numpy().flatten()
distances = []
for node_id in scene_graph.nodes:
instance_feature = scene_graph.nodes[node_id]['feature']
feature_distance = distance.cosine(text_feature, instance_feature)
distances.append(feature_distance)
distances = np.array(distances)
ascending_indices = distances.argsort()
ptr = [0]
pcd_vis = copy.deepcopy(scene_pcd)
matched_id = ascending_indices[0]
indices_3d = scene_graph.nodes[matched_id]['pt_indices']
pcd_colors = np.asarray(pcd_vis.colors)
pcd_colors[indices_3d] = (0, 1, 1)
print(f'matched_id = {matched_id!r}, feature cosine distance: {distances[matched_id]}')
def show_next_instance_callback(visualizer, action, mod, pressed_key):
if (action == 0):
if (pressed_key == 'J'):
ptr[0] += 1
elif (pressed_key == 'K'):
ptr[0] -= 1
idx = (ptr[0] % len(scene_graph.nodes))
instance_id = ascending_indices[idx]
print(f'idx = {idx!r}, feature cosine distance: {distances[instance_id]}')
pt_indices = scene_graph.nodes[instance_id]['pt_indices']
colors = np.asarray(pcd_vis.colors)
colors[:] = np.asarray(scene_pcd.colors)
colors[pt_indices] = (0, 1, 1)
return True
vis = o3d.visualization.VisualizerWithKeyCallback()
for key in ['J', 'K']:
vis.register_key_action_callback(ord(key), partial(show_next_instance_callback, pressed_key=key))
vis.create_window()
vis.add_geometry(pcd_vis)
vis.run()
query = input("Please query an object: (input 'q' to quit)\n") |
.expansion
class ExpandGemvMKL(ExpandTransformation):
environments = [environments.intel_mkl.IntelMKL]
def expansion(*args, **kwargs):
return ExpandGemvOpenBLAS.expansion(*args, **kwargs) |
class Checkpointer(object):
def __init__(self, cfg, models, auxiliary=None, save=True):
self.models = models
self.auxiliary = auxiliary
self.cfg = cfg
self._save = save
def save(self, _name, **kwargs):
if (not self._save):
return
data = dict()
data['models'] = dict()
data['auxiliary'] = dict()
for (name, model) in self.models.items():
data['models'][name] = get_model_dict(model)
if (self.auxiliary is not None):
for (name, item) in self.auxiliary.items():
data['auxiliary'][name] = item.state_dict()
data.update(kwargs)
_func
def save_data():
save_file = os.path.join(self.cfg.OUTPUT_DIR, ('%s.pth' % _name))
save_file = os.path.join('/deep/group/sharonz/ALAE', save_file)
print(('Saving checkpoint to %s' % save_file))
torch.save(data, save_file)
self.tag_last_checkpoint(save_file)
return save_data()
def load(self, ignore_last_checkpoint=False, file_name=None):
save_file = os.path.join(self.cfg.OUTPUT_DIR, 'last_checkpoint')
save_file = os.path.join('/deep/group/sharonz/ALAE', save_file)
try:
with open(save_file, 'r') as last_checkpoint:
f = last_checkpoint.read().strip()
except IOError:
print('No checkpoint found. Initializing model from scratch')
if (file_name is None):
return {}
if ignore_last_checkpoint:
print('Forced to Initialize model from scratch')
return {}
if (file_name is not None):
f = file_name
print('Loading checkpoint from {}'.format(f))
if ('sharonz' not in f):
f = ('/deep/group/sharonz/ALAE/' + f)
checkpoint = torch.load(f, map_location=torch.device('cuda:0'))
for (name, model) in self.models.items():
if (name in checkpoint['models']):
try:
model_dict = checkpoint['models'].pop(name)
if (model_dict is not None):
self.models[name].load_state_dict(model_dict, strict=False)
else:
print(('WARNING:' + ('State dict for model "%s" is None ' % name)))
except RuntimeError as e:
print(('WARNING:' + ('%s\nFailed to load: %s\n%s' % (('!' * 160), name, ('!' * 160)))))
print(('WARNING:' + ('\nFailed to load: %s' % str(e))))
else:
print(('WARNING:' + ('No state dict for model: %s' % name)))
checkpoint.pop('models')
if (('auxiliary' in checkpoint) and self.auxiliary):
print('Loading auxiliary from {}'.format(f))
for (name, item) in self.auxiliary.items():
try:
if (name in checkpoint['auxiliary']):
self.auxiliary[name].load_state_dict(checkpoint['auxiliary'].pop(name))
if (('optimizers' in checkpoint) and (name in checkpoint['optimizers'])):
self.auxiliary[name].load_state_dict(checkpoint['optimizers'].pop(name))
if (name in checkpoint):
self.auxiliary[name].load_state_dict(checkpoint.pop(name))
except IndexError:
print(('WARNING:' + ('%s\nFailed to load: %s\n%s' % (('!' * 160), name, ('!' * 160)))))
checkpoint.pop('auxiliary')
return checkpoint
def tag_last_checkpoint(self, last_filename):
save_file = os.path.join(self.cfg.OUTPUT_DIR, 'last_checkpoint')
save_file = os.path.join('/deep/group/sharonz/ALAE', save_file)
with open(save_file, 'w') as f:
f.write(last_filename) |
class FakeProtocol():
def __init__(self, name, memories=[]):
self.name = name
self.other_is_setted = False
self.is_started = False
self.rule = Rule(None, None, None, None, None)
self.rule.protocols.append(self)
self.memories = memories
self.own = None
def is_ready(self):
return self.other_is_setted
def set_others(self, other, arg2, arg3):
self.other_is_setted = True
def start(self):
self.is_started = True |
def multi_dimensional_attention(rep_tensor, rep_mask, scope=None, keep_prob=1.0, is_train=None, wd=0.0, activation='elu', tensor_dict=None, name=None):
(bs, sl, vec) = (tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2])
ivec = rep_tensor.get_shape()[2]
with tf.variable_scope((scope or 'multi_dimensional_attention')):
map1 = bn_dense_layer(rep_tensor, ivec, True, 0.0, 'bn_dense_map1', activation, False, wd, keep_prob, is_train)
map2 = bn_dense_layer(map1, ivec, True, 0.0, 'bn_dense_map2', 'linear', False, wd, keep_prob, is_train)
map2_masked = exp_mask_for_high_rank(map2, rep_mask)
soft = tf.nn.softmax(map2_masked, 1)
attn_output = tf.reduce_sum((soft * rep_tensor), 1)
if ((tensor_dict is not None) and (name is not None)):
tensor_dict[name] = soft
return attn_output |
class TestKerasBaseWeightsQuantizer(BaseKerasTrainableInfrastructureTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_weights_quantization_config(self):
return TrainableQuantizerWeightsConfig(weights_quantization_method=QuantizationMethod.UNIFORM, weights_n_bits=8, weights_quantization_params={}, enable_weights_quantization=True, weights_channels_axis=3, weights_per_channel_threshold=True, min_threshold=0)
def run_test(self):
with self.unit_test.assertRaises(Exception) as e:
ZeroWeightsQuantizer(self.get_weights_quantization_config())
self.unit_test.assertEqual(f'Quantization method mismatch expected: [<QuantizationMethod.POWER_OF_TWO: 0>, <QuantizationMethod.SYMMETRIC: 3>] and got QuantizationMethod.UNIFORM', str(e.exception))
with self.unit_test.assertRaises(Exception) as e:
ZeroWeightsQuantizer(self.get_activation_quantization_config())
self.unit_test.assertEqual(f'Expect weight quantization got activation', str(e.exception))
weight_quantization_config = super(TestKerasBaseWeightsQuantizer, self).get_weights_quantization_config()
quantizer = ZeroWeightsQuantizer(weight_quantization_config)
self.unit_test.assertTrue((quantizer.quantization_config == weight_quantization_config))
config_data = config_serialization(weight_quantization_config)
self.unit_test.assertTrue(config_data['enable_weights_quantization'])
deserialized_config = config_deserialization(config_data)
self.unit_test.assertTrue((weight_quantization_config.__dict__ == deserialized_config.__dict__)) |
()
def sample_report() -> CoverageReport:
return CoverageReport(module='cov_demo', source=['def foo():\n', ' pass\n', '\n', '\n', 'def baz():\n', ' assert 3 == 5 and 3 == -3\n', '\n', '\n', 'def bar(x: int):\n', ' if x:\n', ' return 5\n', ' else:\n', ' return 6\n'], branches=CoverageEntry(covered=2, existing=6), branchless_code_objects=CoverageEntry(covered=1, existing=2), lines=CoverageEntry(covered=2, existing=8), line_annotations=[LineAnnotation(line_no=1, total=CoverageEntry(covered=2, existing=3), branches=CoverageEntry(covered=0, existing=0), branchless_code_objects=CoverageEntry(covered=1, existing=2), lines=CoverageEntry(covered=1, existing=1)), LineAnnotation(line_no=2, total=CoverageEntry(covered=0, existing=1), branches=CoverageEntry(covered=0, existing=0), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=0, existing=1)), LineAnnotation(line_no=3, total=CoverageEntry(covered=0, existing=0), branches=CoverageEntry(covered=0, existing=0), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=0, existing=0)), LineAnnotation(line_no=4, total=CoverageEntry(covered=0, existing=0), branches=CoverageEntry(covered=0, existing=0), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=0, existing=0)), LineAnnotation(line_no=5, total=CoverageEntry(covered=1, existing=1), branches=CoverageEntry(covered=0, existing=0), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=1, existing=1)), LineAnnotation(line_no=6, total=CoverageEntry(covered=2, existing=5), branches=CoverageEntry(covered=2, existing=4), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=0, existing=1)), LineAnnotation(line_no=7, total=CoverageEntry(covered=0, existing=0), branches=CoverageEntry(covered=0, existing=0), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=0, existing=0)), LineAnnotation(line_no=8, total=CoverageEntry(covered=0, existing=0), branches=CoverageEntry(covered=0, existing=0), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=0, existing=0)), LineAnnotation(line_no=9, total=CoverageEntry(covered=0, existing=1), branches=CoverageEntry(covered=0, existing=0), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=0, existing=1)), LineAnnotation(line_no=10, total=CoverageEntry(covered=0, existing=3), branches=CoverageEntry(covered=0, existing=2), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=0, existing=1)), LineAnnotation(line_no=11, total=CoverageEntry(covered=0, existing=1), branches=CoverageEntry(covered=0, existing=0), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=0, existing=1)), LineAnnotation(line_no=12, total=CoverageEntry(covered=0, existing=0), branches=CoverageEntry(covered=0, existing=0), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=0, existing=0)), LineAnnotation(line_no=13, total=CoverageEntry(covered=0, existing=1), branches=CoverageEntry(covered=0, existing=0), branchless_code_objects=CoverageEntry(covered=0, existing=0), lines=CoverageEntry(covered=0, existing=1))], branch_coverage=0.375, line_coverage=0.25) |
class ColorizationModel(Pix2PixModel):
def modify_commandline_options(parser, is_train=True):
Pix2PixModel.modify_commandline_options(parser, is_train)
parser.set_defaults(dataset_mode='colorization')
return parser
def __init__(self, opt):
Pix2PixModel.__init__(self, opt)
self.visual_names = ['real_A', 'real_B_rgb', 'fake_B_rgb']
def lab2rgb(self, L, AB):
AB2 = (AB * 110.0)
L2 = ((L + 1.0) * 50.0)
Lab = torch.cat([L2, AB2], dim=1)
Lab = Lab[0].data.cpu().float().numpy()
Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0))
rgb = (color.lab2rgb(Lab) * 255)
return rgb
def compute_visuals(self):
self.real_B_rgb = self.lab2rgb(self.real_A, self.real_B)
self.fake_B_rgb = self.lab2rgb(self.real_A, self.fake_B) |
def plot_fig(test_img, scores, gts, threshold, save_dir, class_name):
num = len(scores)
for i in range(num):
img = test_img[i]
img = denormalization(img)
gt = gts[i].transpose(1, 2, 0).squeeze()
heat_map = (scores[i] * 255)
mask = scores[i]
mask[(mask > threshold)] = 1
mask[(mask <= threshold)] = 0
kernel = morphology.disk(4)
mask = morphology.opening(mask, kernel)
mask *= 255
vis_img = mark_boundaries(img, mask, color=(1, 0, 0), mode='thick')
fig = plt.figure()
ax0 = fig.add_subplot(221)
ax0.axis('off')
ax0.imshow(img)
ax0.title.set_text('Image')
ax1 = fig.add_subplot(222)
ax1.axis('off')
ax1.imshow(gt, cmap='gray')
ax1.title.set_text('GroundTruth')
ax2 = fig.add_subplot(223)
ax2.axis('off')
ax2.imshow(img, cmap='gray', interpolation='none')
ax2.imshow(heat_map, cmap='jet', alpha=0.5, interpolation='none')
ax2.title.set_text('Predicted heat map')
ax3 = fig.add_subplot(224)
ax3.axis('off')
ax3.imshow(vis_img)
ax3.title.set_text('Segmentation result')
fig.tight_layout()
fig.savefig(os.path.join(save_dir, (class_name + '_{}'.format(i))), dpi=100)
plt.close() |
class PeriodicCheckpointer():
def __init__(self, checkpointer: Any, period: int, max_epoch: int=None):
self.checkpointer = checkpointer
self.period = int(period)
self.max_epoch = max_epoch
self.best_metric = (- 1)
def step(self, epoch: int, **kwargs: Any):
epoch = int(epoch)
additional_state = {'epoch': epoch}
additional_state.update(kwargs)
if ((((epoch + 1) % self.period) == 0) and (epoch < (self.max_epoch - 1))):
if (additional_state['metric'] > self.best_metric):
self.checkpointer.save('model_best', **additional_state)
self.best_metric = additional_state['metric']
self.checkpointer.save('model_{:04d}'.format(epoch), **additional_state)
if (epoch >= (self.max_epoch - 1)):
if (additional_state['metric'] > self.best_metric):
self.checkpointer.save('model_best', **additional_state)
self.checkpointer.save('model_final', **additional_state)
def save(self, name: str, **kwargs: Any):
self.checkpointer.save(name, **kwargs) |
def test_bilevel():
expected = np.zeros((10, 10), bool)
expected[::2] = 1
img = imread(fetch('data/checker_bilevel.png'))
assert_array_equal(img.astype(bool), expected) |
def init_weight(dim_in, dim_out, name=None, stddev=1.0):
return tf.Variable(tf.truncated_normal([dim_in, dim_out], stddev=(stddev / math.sqrt(float(dim_in)))), name=name) |
class DebertaTokenizerFast(GPT2TokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask', 'token_type_ids']
slow_tokenizer_class = DebertaTokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='[CLS]', eos_token='[SEP]', sep_token='[SEP]', cls_token='[CLS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]', add_prefix_space=False, **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs)
def mask_token(self) -> str:
if ((self._mask_token is None) and self.verbose):
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
_token.setter
def mask_token(self, value):
value = (AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value)
self._mask_token = value
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len(((((cls + token_ids_0) + sep) + token_ids_1) + sep)) * [0]) |
class TestLoopBlockingSolver(TestLoopBlockingFixture):
def setUp(self):
super(TestLoopBlockingSolver, self).setUp()
self.optkeys_bypsol = ['BYPSOL_{}'.format(dce) for dce in range(de.NUM)]
for reside_dce in range(de.NUM):
opt_dict = self.options['BYPSOL']._asdict()
byp = ([True] * de.NUM)
byp[reside_dce] = False
opt_dict['sw_gbuf_bypass'] = tuple(byp)
self.options[self.optkeys_bypsol[reside_dce]] = Option(**opt_dict)
def test_reside_sol(self):
for reside_dce in range(de.NUM):
optkey = self.optkeys_bypsol[reside_dce]
for (bl_ts, bl_ords) in loop_blocking_solver.gen_loopblocking_gbuf_reside(self.nld['BASE'], self.resource['BASE'], self.options[optkey]):
lbs = self._lbs(bl_ts, bl_ords, optkey=optkey)
self.assertTrue(lbs.stored_in_gbuf[reside_dce])
self.assertFalse(any((lbs.stored_in_gbuf[dce] for dce in range(de.NUM) if (dce != reside_dce))))
def test_reside_sol_opt(self, rsrckey='BASE', wlkey='BASE'):
def _cost(lbs):
access = lbs.get_access()
return [int(sum(access[me.DRAM])), int(sum(access[me.GBUF]))]
min_sch_dict = {}
sol_sch_dict = {}
for (bl_ts, bl_ords) in self._gen_loopblocking_all(wlkey=wlkey):
lbs = self._lbs(bl_ts, bl_ords, wlkey=wlkey, rsrckey=rsrckey, optkey='BYP')
if (not lbs.is_valid()):
continue
all_reside_dce = [dce for dce in range(de.NUM) if lbs.stored_in_gbuf[dce]]
if (not all_reside_dce):
min_sch = min_sch_dict.get(None, None)
if ((not min_sch) or (_cost(lbs) < min_sch)):
min_sch_dict[None] = _cost(lbs)
elif (len(all_reside_dce) == 1):
(dce,) = all_reside_dce
min_sch = min_sch_dict.get(dce, None)
if ((not min_sch) or (_cost(lbs) < min_sch)):
min_sch_dict[dce] = _cost(lbs)
for reside_dce in range(de.NUM):
optkey = self.optkeys_bypsol[reside_dce]
for (bl_ts, bl_ords) in loop_blocking_solver.gen_loopblocking_gbuf_reside(self.nld[wlkey], self.resource[rsrckey], self.options[optkey]):
lbs = self._lbs(bl_ts, bl_ords, wlkey=wlkey, rsrckey=rsrckey, optkey='BYP')
self.assertTrue(lbs.is_valid())
self.assertFalse(any((lbs.stored_in_gbuf[dce] for dce in range(de.NUM) if (dce != reside_dce))))
true_reside_dce = (reside_dce if lbs.stored_in_gbuf[reside_dce] else None)
sol_sch = sol_sch_dict.get(true_reside_dce, None)
if ((not sol_sch) or (_cost(lbs) < sol_sch)):
sol_sch_dict[true_reside_dce] = _cost(lbs)
self.assertTrue((sol_sch_dict.items() <= min_sch_dict.items()), 'test_reside_sol_opt: wlkey {} rsrckey {}: solutions do not cover all optimal ones. sol {} opt {}.'.format(wlkey, rsrckey, sol_sch_dict, min_sch_dict))
self.assertListEqual(min(sol_sch_dict.values()), min(min_sch_dict.values()), 'test_reside_sol_opt: wlkey {} rsrckey {}: solutions do not cover the optimal one. sol {} opt {}.'.format(wlkey, rsrckey, sol_sch_dict, min_sch_dict))
def test_reside_sol_opt_resource(self):
for rsrckey in ['LG', 'SM']:
self.test_reside_sol_opt(rsrckey=rsrckey)
def test_reside_sol_opt_pool(self):
with self.assertRaisesRegex(ValueError, 'loop_blocking_solver: .*'):
self.test_reside_sol_opt(wlkey='POOL')
def test_reside_sol_opt_zero(self):
for wlkey in ['ZERO_FIL', 'ZERO_IFM']:
self.test_reside_sol_opt(wlkey=wlkey)
def test_reside_sol_cnt(self):
all_set = set(loop_blocking_solver.gen_loopblocking_gbuf_reside(self.nld['BASE'], self.resource['BASE'], self.options['BYPSOL']))
union_set = set()
reside_set_list = []
for reside_dce in range(de.NUM):
optkey = self.optkeys_bypsol[reside_dce]
s = set(loop_blocking_solver.gen_loopblocking_gbuf_reside(self.nld['BASE'], self.resource['BASE'], self.options[optkey]))
reside_set_list.append(s)
union_set |= s
self.assertSetEqual(all_set, union_set)
self.assertEqual(len(union_set), sum((len(s) for s in reside_set_list))) |
def preprocess_rl_variant(variant):
if variant.get('do_state_exp', False):
if ('observation_key' not in variant):
variant['observation_key'] = 'state_observation'
if ('desired_goal_key' not in variant):
variant['desired_goal_key'] = 'state_desired_goal'
if ('achieved_goal_key' not in variant):
variant['achieved_goal_key'] = 'state_acheived_goal'
else:
if ('observation_key' not in variant):
variant['observation_key'] = 'latent_observation'
if ('desired_goal_key' not in variant):
variant['desired_goal_key'] = 'latent_desired_goal'
if ('achieved_goal_key' not in variant):
variant['achieved_goal_key'] = 'latent_acheived_goal' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.