code stringlengths 101 5.91M |
|---|
def create_json(foldername, trainingcsv):
dataset_name = foldername
dataset_id = str(uuid.uuid4())
columns = list()
colnames = list(pd.read_csv(trainingcsv))
for i in range(len(colnames)):
if (colnames[i] != 'class_'):
columns.append({'colIndex': i, 'colName': colnames[i], 'colType': 'real', 'role': ['attribute']})
else:
columns.append({'colIndex': i, 'colName': 'class_', 'colType': 'real', 'role': ['suggestedTarget']})
data = {'about': {'datasetID': dataset_id, 'datasetName': dataset_name, 'humanSubjectsResearch': False, 'license': 'CC', 'datasetSchemaVersion': '3.0', 'redacted': False}, 'dataResources': [{'resID': '0', 'resPath': ((os.getcwd() + '/') + trainingcsv), 'resType': 'table', 'resFormat': ['text/csv'], 'isCollection': False, 'columns': columns}]}
filename = 'datasetDoc.json'
jsonfile = open(filename, 'w')
json.dump(data, jsonfile)
jsonfile.close()
return (dataset_id, filename) |
def torch_default_param_init_fn_(module: nn.Module, verbose: int=0, **kwargs):
del kwargs
if (verbose > 1):
warnings.warn(f"Initializing network using module's reset_parameters attribute")
if hasattr(module, 'reset_parameters'):
module.reset_parameters() |
def test_psp_head():
with pytest.raises(AssertionError):
PSPHead(in_channels=4, channels=2, num_classes=19, pool_scales=1)
head = PSPHead(in_channels=4, channels=2, num_classes=19)
assert (not _conv_has_norm(head, sync_bn=False))
head = PSPHead(in_channels=4, channels=2, num_classes=19, norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 4, 23, 23)]
head = PSPHead(in_channels=4, channels=2, num_classes=19, pool_scales=(1, 2, 3))
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
assert (head.psp_modules[0][0].output_size == 1)
assert (head.psp_modules[1][0].output_size == 2)
assert (head.psp_modules[2][0].output_size == 3)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 23, 23)) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None, NL=True):
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.nolinear = NL
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
if self.nolinear:
out = self.relu(out)
return out |
(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
_events_with_registered_handlers_to_subset
def test_function_call(events):
assert (_RECORDED_EVENTS == [])
run_cell('\n def foo(x):\n return [x]\n ')
throw_and_print_diff_if_recorded_not_equal_to(filter_events_to_subset([TraceEvent.init_module, TraceEvent.before_stmt, TraceEvent.after_stmt, TraceEvent.after_module_stmt], events))
run_cell('foo([42])')
throw_and_print_diff_if_recorded_not_equal_to(filter_events_to_subset([TraceEvent.init_module, TraceEvent.before_stmt, TraceEvent.before_load_complex_symbol, TraceEvent.load_name, TraceEvent.before_call, TraceEvent.before_list_literal, TraceEvent.list_elt, TraceEvent.after_list_literal, TraceEvent.after_argument, TraceEvent.call, TraceEvent.before_function_body, TraceEvent.before_stmt, TraceEvent.before_return, TraceEvent.before_list_literal, TraceEvent.load_name, TraceEvent.list_elt, TraceEvent.after_list_literal, TraceEvent.after_return, TraceEvent.after_function_execution, TraceEvent.return_, TraceEvent.after_call, TraceEvent.after_load_complex_symbol, TraceEvent.after_stmt, TraceEvent.after_module_stmt], events)) |
def edge_density_and_new_pairs(pairs, cycle):
new_pairs = list()
all_pairs = list()
for (i, e1) in enumerate(cycle):
for e2 in cycle[(i + 1):(- 1)]:
all_pairs.append(sorted([e1, e2]))
if ((not (e2 in pairs[e1])) and (not (e1 in pairs[e2]))):
new_pairs.append(sorted([e1, e2]))
n = len(all_pairs)
return ((1 - (float(len(new_pairs)) / ((n * (n - 1)) / 2))), new_pairs) |
def make_data_loader(args, yaml_file, tokenizer, is_distributed=True, is_train=True, start_iter=0, is_pretrain=False, transform=None):
if is_pretrain:
assert is_train
dataset = build_dataset(yaml_file, tokenizer, args, is_train, transform)
else:
dataset = build_dataset(yaml_file, tokenizer, args, is_train=(is_train and (not args.scst)), transform=transform)
logger = logging.getLogger(__name__)
if is_train:
shuffle = args.train_shuffle
images_per_gpu = args.per_gpu_train_batch_size
images_per_batch = (images_per_gpu * get_world_size())
iters_per_batch = (len(dataset) // images_per_batch)
num_iters = (iters_per_batch * args.num_train_epochs)
logger.info('Train with {} images per GPU.'.format(images_per_gpu))
logger.info('Total batch size {}'.format(images_per_batch))
logger.info('Total training steps {}'.format(num_iters))
logging.info('shuffle = {}'.format(shuffle))
else:
shuffle = False
images_per_gpu = args.per_gpu_eval_batch_size
num_iters = None
start_iter = 0
sampler = make_data_sampler(dataset, shuffle, is_distributed, images_per_gpu)
batch_sampler = make_batch_data_sampler(sampler, images_per_gpu, num_iters, start_iter)
from src.data_layer.builder import collate_fn
data_loader = torch.utils.data.DataLoader(dataset, num_workers=args.num_workers, batch_sampler=batch_sampler, pin_memory=True, collate_fn=collate_fn)
return data_loader |
def test_elliptical_cold_vt():
idf = dehnendf(beta=0.0, profileParams=((1.0 / 3.0), 1.0, 0.0125))
cp = 0.05
pot = [LogarithmicHaloPotential(normalize=1.0), EllipticalDiskPotential(cp=cp, sp=0.0, p=0.0, tform=(- 150.0), tsteady=125.0)]
edf = evolveddiskdf(idf, pot=pot, to=(- 150.0))
(mvt, grid) = edf.meanvT(0.9, phi=((- numpy.pi) / 4.0), integrate_method='rk6_c', grid=True, returnGrid=True, gridpoints=_GRIDPOINTS)
assert (numpy.fabs((mvt - 1.0)) < (10.0 ** (- 3.0))), 'Cold elliptical disk does not agree with analytical calculation for vt'
(mvt, grid) = edf.meanvT(0.9, phi=0.0, integrate_method='rk6_c', grid=True, nsigma=7.0, returnGrid=True, gridpoints=_GRIDPOINTS)
assert (numpy.fabs(((mvt - 1.0) + cp)) < (10.0 ** (- 3.0))), 'Cold elliptical disk does not agree with analytical calculation for vt'
return None |
def _create_dummy_loader():
loader = dict(type='HardDiskLoader', repeat=1, parser=dict(type='LineJsonParser', keys=['file_name', 'height', 'width', 'annotations']))
return loader |
def test_ChandrasekharDynamicalFrictionForce_constLambda():
from galpy.orbit import Orbit
from galpy.util import conversion
(ro, vo) = (8.0, 220.0)
GMs = ((10.0 ** 9.0) / conversion.mass_in_msol(vo, ro))
const_lnLambda = 7.0
r_init = 2.0
dt = (2.0 / conversion.time_in_Gyr(vo, ro))
lp = potential.LogarithmicHaloPotential(normalize=1.0, q=1.0)
cdfc = potential.ChandrasekharDynamicalFrictionForce(GMs=GMs, const_lnLambda=const_lnLambda, dens=lp)
o = Orbit([r_init, 0.0, 1.0, 0.0, 0.0, 0.0])
ts = numpy.linspace(0.0, dt, 1001)
o.integrate(ts, [lp, cdfc], method='odeint')
r_pred = numpy.sqrt(((o.r() ** 2.0) - ((((0.604 * const_lnLambda) * GMs) * numpy.sqrt(2.0)) * dt)))
assert (numpy.fabs((r_pred - o.r(ts[(- 1)]))) < 0.01), 'ChandrasekharDynamicalFrictionForce with constant lnLambda for circular orbits does not agree with analytical prediction'
return None |
class MaCowUnit(Flow):
def __init__(self, in_channels, kernel_size, s_channels, scale=True, inverse=False):
super(MaCowUnit, self).__init__(inverse)
self.actnorm1 = ActNorm2dFlow(in_channels, inverse=inverse)
self.actnorm2 = ActNorm2dFlow(in_channels, inverse=inverse)
self.conv1 = MaskedConvFlow(in_channels, (kernel_size[0], kernel_size[1]), s_channels=s_channels, order='A', scale=scale, inverse=inverse)
self.conv2 = MaskedConvFlow(in_channels, (kernel_size[0], kernel_size[1]), s_channels=s_channels, order='B', scale=scale, inverse=inverse)
self.conv3 = MaskedConvFlow(in_channels, (kernel_size[1], kernel_size[0]), s_channels=s_channels, order='C', scale=scale, inverse=inverse)
self.conv4 = MaskedConvFlow(in_channels, (kernel_size[1], kernel_size[0]), s_channels=s_channels, order='D', scale=scale, inverse=inverse)
def forward(self, input: torch.Tensor, s=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
(out, logdet_accum) = self.actnorm1.forward(input)
(out, logdet) = self.conv1.forward(out, s=s)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.conv2.forward(out, s=s)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.actnorm2.forward(out)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.conv3.forward(out, s=s)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.conv4.forward(out, s=s)
logdet_accum = (logdet_accum + logdet)
return (out, logdet_accum)
def backward(self, input: torch.Tensor, s=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
(out, logdet_accum) = self.conv4.backward(input, s=s)
(out, logdet) = self.conv3.backward(out, s=s)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.actnorm2.backward(out)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.conv2.backward(out, s=s)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.conv1.backward(out, s=s)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.actnorm1.backward(out)
logdet_accum = (logdet_accum + logdet)
return (out, logdet_accum)
def init(self, data, s=None, init_scale=1.0) -> Tuple[(torch.Tensor, torch.Tensor)]:
(out, logdet_accum) = self.actnorm1.init(data, init_scale=init_scale)
(out, logdet) = self.conv1.init(out, s=s, init_scale=init_scale)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.conv2.init(out, s=s, init_scale=init_scale)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.actnorm2.init(out, init_scale=init_scale)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.conv3.init(out, s=s, init_scale=init_scale)
logdet_accum = (logdet_accum + logdet)
(out, logdet) = self.conv4.init(out, s=s, init_scale=init_scale)
logdet_accum = (logdet_accum + logdet)
return (out, logdet_accum)
def from_params(cls, params: Dict) -> 'MaCowUnit':
return MaCowUnit(**params) |
def shortest_path_length(length_by_edge, startnode, goalnode):
unvisited_nodes = []
heappush(unvisited_nodes, (0, startnode))
visited_nodes = set()
while (len(unvisited_nodes) > 0):
(distance, node) = heappop(unvisited_nodes)
if (node is goalnode):
return distance
visited_nodes.add(node)
for nextnode in node.successors:
if (nextnode in visited_nodes):
continue
insert_or_update(unvisited_nodes, (min((get(unvisited_nodes, nextnode) or float('inf')), (distance + length_by_edge[(node, nextnode)])), nextnode))
return float('inf') |
def interpolate_3d(vec1, vec2, n_points):
ret = []
m = (vec2 - vec1)
step = (m / (n_points + 1))
for i in range(1, (n_points + 1)):
ret.append((vec1 + (step * i)))
return np.array(ret) |
def get_parser(desc, default_task='translation'):
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument('--user-dir', default=None)
(usr_args, _) = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')
parser.add_argument('--log-interval', type=int, default=1000, metavar='N', help='log progress every N batches (when progress bar is disabled)')
parser.add_argument('--log-format', default=None, help='log format to use', choices=['json', 'none', 'simple', 'tqdm'])
parser.add_argument('--tensorboard-logdir', metavar='DIR', default='', help='path to save logs for tensorboard, should match --logdir of running tensorboard (default: no tensorboard logging)')
parser.add_argument('--seed', default=1, type=int, metavar='N', help='pseudo random number generator seed')
parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA')
parser.add_argument('--fp16', action='store_true', help='use FP16')
parser.add_argument('--memory-efficient-fp16', action='store_true', help='use a memory-efficient version of FP16 training; implies --fp16')
parser.add_argument('--fp16-no-flatten-grads', action='store_true', help="don't flatten FP16 grads tensor")
parser.add_argument('--fp16-init-scale', default=(2 ** 7), type=int, help='default FP16 loss scale')
parser.add_argument('--fp16-scale-window', type=int, help='number of updates before increasing loss scale')
parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float, help='pct of updates that can overflow before decreasing the loss scale')
parser.add_argument('--min-loss-scale', default=0.0001, type=float, metavar='D', help='minimum FP16 loss scale, after which training is stopped')
parser.add_argument('--threshold-loss-scale', type=float, help='threshold FP16 loss scale from below')
parser.add_argument('--user-dir', default=None, help='path to a python module containing custom extensions (tasks and/or architectures)')
parser.add_argument('--empty-cache-freq', default=0, type=int, help='how often to clear the PyTorch CUDA cache (0 to disable)')
parser.add_argument('--all-gather-list-size', default=16384, type=int, help='number of bytes reserved for gathering stats from workers')
from fairseq.registry import REGISTRIES
for (registry_name, REGISTRY) in REGISTRIES.items():
parser.add_argument(('--' + registry_name.replace('_', '-')), default=REGISTRY['default'], choices=REGISTRY['registry'].keys())
from fairseq.tasks import TASK_REGISTRY
parser.add_argument('--task', metavar='TASK', default=default_task, choices=TASK_REGISTRY.keys(), help='task')
return parser |
class ExpansionNet_v2(CaptioningModel):
def __init__(self, d_model, N_enc, N_dec, ff, num_heads, num_exp_enc_list, num_exp_dec, output_word2idx, output_idx2word, max_seq_len, drop_args, img_feature_dim=2048, rank=0):
super().__init__()
self.output_word2idx = output_word2idx
self.output_idx2word = output_idx2word
self.max_seq_len = max_seq_len
self.num_exp_dec = num_exp_dec
self.num_exp_enc_list = num_exp_enc_list
self.N_enc = N_enc
self.N_dec = N_dec
self.d_model = d_model
self.encoders = nn.ModuleList([EncoderLayer(d_model, ff, num_exp_enc_list, drop_args.enc) for _ in range(N_enc)])
self.decoders = nn.ModuleList([DecoderLayer(d_model, num_heads, ff, num_exp_dec, drop_args.dec) for _ in range(N_dec)])
self.input_embedder_dropout = nn.Dropout(drop_args.enc_input)
self.input_linear = torch.nn.Linear(img_feature_dim, d_model)
self.vocab_linear = torch.nn.Linear(d_model, len(output_word2idx))
self.log_softmax = nn.LogSoftmax(dim=(- 1))
self.out_enc_dropout = nn.Dropout(drop_args.other)
self.out_dec_dropout = nn.Dropout(drop_args.other)
self.out_embedder = EmbeddingLayer(len(output_word2idx), d_model, drop_args.dec_input)
self.pos_encoder = nn.Embedding(max_seq_len, d_model)
self.enc_reduce_group = nn.Linear((d_model * self.N_enc), d_model)
self.enc_reduce_norm = nn.LayerNorm(d_model)
self.dec_reduce_group = nn.Linear((d_model * self.N_dec), d_model)
self.dec_reduce_norm = nn.LayerNorm(d_model)
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p)
self.trained_steps = 0
self.rank = rank
def forward_enc(self, enc_input, enc_input_num_pads):
x = self.input_embedder_dropout(self.input_linear(enc_input))
sum_num_enc = sum(self.num_exp_enc_list)
pos_x = torch.arange(sum_num_enc).unsqueeze(0).expand(enc_input.size(0), sum_num_enc).to(self.rank)
pad_mask = create_pad_mask(mask_size=(enc_input.size(0), sum_num_enc, enc_input.size(1)), pad_row=([0] * enc_input.size(0)), pad_column=enc_input_num_pads, rank=self.rank)
x_list = []
for i in range(self.N_enc):
x = self.encoders[i](x=x, n_indexes=pos_x, mask=pad_mask)
x_list.append(x)
x_list = torch.cat(x_list, dim=(- 1))
x = (x + self.out_enc_dropout(self.enc_reduce_group(x_list)))
x = self.enc_reduce_norm(x)
return x
def forward_dec(self, cross_input, enc_input_num_pads, dec_input, dec_input_num_pads, apply_log_softmax=False):
no_peak_and_pad_mask = create_no_peak_and_pad_mask(mask_size=(dec_input.size(0), dec_input.size(1), dec_input.size(1)), num_pads=torch.tensor(dec_input_num_pads), rank=self.rank)
pad_mask = create_pad_mask(mask_size=(dec_input.size(0), dec_input.size(1), cross_input.size(1)), pad_row=torch.tensor(dec_input_num_pads), pad_column=torch.tensor(enc_input_num_pads), rank=self.rank)
y = self.out_embedder(dec_input)
pos_x = torch.arange(self.num_exp_dec).unsqueeze(0).expand(dec_input.size(0), self.num_exp_dec).to(self.rank)
pos_y = torch.arange(dec_input.size(1)).unsqueeze(0).expand(dec_input.size(0), dec_input.size(1)).to(self.rank)
y = (y + self.pos_encoder(pos_y))
y_list = []
for i in range(self.N_dec):
y = self.decoders[i](x=y, n_indexes=pos_x, cross_connection_x=cross_input, input_attention_mask=no_peak_and_pad_mask, cross_attention_mask=pad_mask)
y_list.append(y)
y_list = torch.cat(y_list, dim=(- 1))
y = (y + self.out_dec_dropout(self.dec_reduce_group(y_list)))
y = self.dec_reduce_norm(y)
y = self.vocab_linear(y)
if apply_log_softmax:
y = self.log_softmax(y)
return y |
class BatchSampler(BaseSampler):
def __init__(self, algo, env):
super().__init__(algo, env)
warnings.warn(DeprecationWarning('BatchSampler is deprecated, and will be removed in the next release. Please use one of the samplers which implements garage.sampler.Sampler, such as LocalSampler.'))
def start_worker(self):
parallel_sampler.populate_task(self.env, self.algo.policy, scope=self.algo.scope)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr, batch_size=None, whole_paths=True):
if (not batch_size):
batch_size = self.algo.max_path_length
cur_params = self.algo.policy.get_param_values()
paths = parallel_sampler.sample_paths(policy_params=cur_params, max_samples=batch_size, max_path_length=self.algo.max_path_length, scope=self.algo.scope)
return (paths if whole_paths else truncate_paths(paths, batch_size)) |
class Critic(nn.Module):
def __init__(self, encoder_cfg, action_shape, hidden_dim, hidden_depth):
super().__init__()
self.encoder = Encoder(**encoder_cfg)
self.Q1 = utils.mlp((self.encoder.feature_dim + action_shape[0]), hidden_dim, 1, hidden_depth)
self.Q2 = utils.mlp((self.encoder.feature_dim + action_shape[0]), hidden_dim, 1, hidden_depth)
self.outputs = dict()
self.apply(utils.weight_init)
def forward(self, obs, action, detach_encoder=False):
assert (obs.size(0) == action.size(0))
obs = self.encoder(obs, detach=detach_encoder)
obs_action = torch.cat([obs, action], dim=(- 1))
q1 = self.Q1(obs_action)
q2 = self.Q2(obs_action)
self.outputs['q1'] = q1
self.outputs['q2'] = q2
return (q1, q2)
def log(self, logger, step):
self.encoder.log(logger, step)
for (k, v) in self.outputs.items():
logger.log_histogram(f'train_critic/{k}_hist', v, step)
assert (len(self.Q1) == len(self.Q2))
for (i, (m1, m2)) in enumerate(zip(self.Q1, self.Q2)):
assert (type(m1) == type(m2))
if (type(m1) is nn.Linear):
logger.log_param(f'train_critic/q1_fc{i}', m1, step)
logger.log_param(f'train_critic/q2_fc{i}', m2, step) |
class InceptionV3(nn.Module):
def __init__(self):
super().__init__()
inception = models.inception_v3(pretrained=True)
self.block1 = nn.Sequential(inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2))
self.block2 = nn.Sequential(inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2))
self.block3 = nn.Sequential(inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e)
self.block4 = nn.Sequential(inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1)))
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
return x.view(x.size(0), (- 1)) |
class McLoader(object):
def __init__(self, mclient_path):
assert (mclient_path is not None), "Please specify 'data_mclient_path' in the config."
self.mclient_path = mclient_path
server_list_config_file = '{}/server_list.conf'.format(self.mclient_path)
client_config_file = '{}/client.conf'.format(self.mclient_path)
self.mclient = mc.MemcachedClient.GetInstance(server_list_config_file, client_config_file)
def __call__(self, fn):
try:
img_value = mc.pyvector()
self.mclient.Get(fn, img_value)
img_value_str = mc.ConvertBuffer(img_value)
img = pil_loader(img_value_str)
except:
print('Read image failed ({})'.format(fn))
return None
else:
return img |
def test_clone():
from sklearn.base import clone
a = sgd.FMRegression()
b = clone(a)
assert (a.get_params() == b.get_params())
a = sgd.FMClassification()
b = clone(a)
assert (a.get_params() == b.get_params()) |
class AdapterOutput():
up: SamplerOutput = None
down: SamplerOutput = None
pre_norm: LayerNormOutput = None
post_norm: LayerNormOutput = None |
class Instances():
def __init__(self, image_size: Tuple[(int, int)], **kwargs: Any):
self._image_size = image_size
self._fields: Dict[(str, Any)] = {}
for (k, v) in kwargs.items():
self.set(k, v)
def image_size(self) -> Tuple[(int, int)]:
return self._image_size
def __setattr__(self, name: str, val: Any) -> None:
if name.startswith('_'):
super().__setattr__(name, val)
else:
self.set(name, val)
def __getattr__(self, name: str) -> Any:
if ((name == '_fields') or (name not in self._fields)):
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
return self._fields[name]
def set(self, name: str, value: Any) -> None:
with warnings.catch_warnings(record=True):
data_len = len(value)
if len(self._fields):
assert (len(self) == data_len), 'Adding a field of length {} to a Instances of length {}'.format(data_len, len(self))
self._fields[name] = value
def has(self, name: str) -> bool:
return (name in self._fields)
def remove(self, name: str) -> None:
del self._fields[name]
def get(self, name: str) -> Any:
return self._fields[name]
def get_fields(self) -> Dict[(str, Any)]:
return self._fields
def to(self, *args: Any, **kwargs: Any) -> 'Instances':
ret = Instances(self._image_size)
for (k, v) in self._fields.items():
if hasattr(v, 'to'):
v = v.to(*args, **kwargs)
ret.set(k, v)
return ret
def __getitem__(self, item: Union[(int, slice, torch.BoolTensor)]) -> 'Instances':
if (type(item) == int):
if ((item >= len(self)) or (item < (- len(self)))):
raise IndexError('Instances index out of range!')
else:
item = slice(item, None, len(self))
ret = Instances(self._image_size)
for (k, v) in self._fields.items():
if (k == 'boxes_pre'):
pass
else:
ret.set(k, v[item])
return ret
def __len__(self) -> int:
for v in self._fields.values():
return v.__len__()
raise NotImplementedError('Empty Instances does not support __len__!')
def __iter__(self):
raise NotImplementedError('`Instances` object is not iterable!')
def cat(instance_lists: List['Instances']) -> 'Instances':
assert all((isinstance(i, Instances) for i in instance_lists))
assert (len(instance_lists) > 0)
if (len(instance_lists) == 1):
return instance_lists[0]
image_size = instance_lists[0].image_size
if (not isinstance(image_size, torch.Tensor)):
for i in instance_lists[1:]:
assert (i.image_size == image_size)
ret = Instances(image_size)
for k in instance_lists[0]._fields.keys():
values = [i.get(k) for i in instance_lists]
v0 = values[0]
if isinstance(v0, torch.Tensor):
values = torch.cat(values, dim=0)
elif isinstance(v0, list):
values = list(itertools.chain(*values))
elif hasattr(type(v0), 'cat'):
values = type(v0).cat(values)
else:
raise ValueError('Unsupported type {} for concatenation'.format(type(v0)))
ret.set(k, values)
return ret
def __str__(self) -> str:
s = (self.__class__.__name__ + '(')
s += 'num_instances={}, '.format(len(self))
s += 'image_height={}, '.format(self._image_size[0])
s += 'image_width={}, '.format(self._image_size[1])
s += 'fields=[{}])'.format(', '.join((f'{k}: {v}' for (k, v) in self._fields.items())))
return s
__repr__ = __str__ |
def twomassPath(dr='tgas'):
return os.path.join(_GAIA_TOOLS_DATA, 'Gaia', 'gdr1', 'dstn_match', 'tgas-matched-2mass.fits.gz') |
def is_module_wrapper(module: nn.Module) -> bool:
def is_module_in_wrapper(module, module_wrapper):
module_wrappers = tuple(module_wrapper.module_dict.values())
if isinstance(module, module_wrappers):
return True
for child in module_wrapper.children.values():
if is_module_in_wrapper(module, child):
return True
return is_module_in_wrapper(module, MODULE_WRAPPERS) |
_tf
class UtilsFunctionsTest(unittest.TestCase):
def test_top_k_top_p_filtering(self):
logits = tf.convert_to_tensor([[8.2220991, (- 0.5620044), 5., 4.0386393, (- 6.8798378), (- 0.), (- 3.2012153), 2., 1., 7., 8., (- 9.), (- 5.), (- 1.), (- 7.1115294), (- 0.8369633), (- 5.3186408), 7., 0., (- 0.), (- 5.9179796), 0., (- 6.), 4., (- 0.), 7., 9., 2., (- 9.), 2.], [0., 4., (- 5.), (- 6.), (- 7.), (- 4.), 1., (- 6.), 1., (- 9.643119), 0., 0., (- 8.), 6., 2., 4., 4., 8.8275313, 5., (- 4.4735794), 7., (- 2.), 2., (- 2.5674762), (- 9.), (- 4.), (- 1.), 9., (- 5.), 1.]], dtype=tf.float32)
non_inf_expected_idx = tf.convert_to_tensor([[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.int32)
non_inf_expected_output = tf.convert_to_tensor([8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023], dtype=tf.float32)
output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[(output != (- float('inf')))]
non_inf_idx = tf.cast(tf.where(tf.not_equal(output, tf.constant((- float('inf')), dtype=tf.float32))), dtype=tf.int32)
tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)
tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx) |
class APCModel(nn.Module):
def __init__(self, mel_dim, prenet_config, rnn_config):
super(APCModel, self).__init__()
self.mel_dim = mel_dim
if (prenet_config is not None):
assert (prenet_config.input_size == mel_dim)
assert (prenet_config.hidden_size == rnn_config.input_size)
assert (rnn_config.input_size == rnn_config.hidden_size)
self.prenet = Prenet(input_size=prenet_config.input_size, num_layers=prenet_config.num_layers, hidden_size=prenet_config.hidden_size, dropout=prenet_config.dropout)
else:
assert (rnn_config.input_size == mel_dim)
self.prenet = None
in_sizes = ([rnn_config.input_size] + ([rnn_config.hidden_size] * (rnn_config.num_layers - 1)))
out_sizes = ([rnn_config.hidden_size] * rnn_config.num_layers)
self.rnns = nn.ModuleList([nn.GRU(input_size=in_size, hidden_size=out_size, batch_first=True) for (in_size, out_size) in zip(in_sizes, out_sizes)])
self.rnn_dropout = nn.Dropout(rnn_config.dropout)
self.rnn_residual = rnn_config.residual
self.postnet = Postnet(input_size=rnn_config.hidden_size, output_size=self.mel_dim)
def forward(self, inputs, lengths):
seq_len = inputs.size(1)
if (self.prenet is not None):
rnn_inputs = self.prenet(inputs)
internal_reps = [rnn_inputs]
else:
rnn_inputs = inputs
internal_reps = []
packed_rnn_inputs = pack_padded_sequence(rnn_inputs, lengths, True)
for (i, layer) in enumerate(self.rnns):
(packed_rnn_outputs, _) = layer(packed_rnn_inputs)
(rnn_outputs, _) = pad_packed_sequence(packed_rnn_outputs, True, total_length=seq_len)
if ((i + 1) < len(self.rnns)):
rnn_outputs = self.rnn_dropout(rnn_outputs)
(rnn_inputs, _) = pad_packed_sequence(packed_rnn_inputs, True, total_length=seq_len)
if (self.rnn_residual and (rnn_inputs.size((- 1)) == rnn_outputs.size((- 1)))):
rnn_outputs = (rnn_outputs + rnn_inputs)
internal_reps.append(rnn_outputs)
packed_rnn_inputs = pack_padded_sequence(rnn_outputs, lengths, True)
predicted_mel = self.postnet(rnn_outputs)
internal_reps = torch.stack(internal_reps)
return (predicted_mel, internal_reps) |
class _ParseType():
def __name__(self) -> str:
name = self.__class__.__name__
assert isinstance(name, str)
return name
def __str__(self) -> str:
return self.__name__ |
def tag_json_files(json_file):
for sentence in json_file:
tagged_sent = nlp(sentence['text'])
conllu = ''
for (i, token) in enumerate(tagged_sent.iter_tokens()):
head = token.head
conllu += '{}\t{}\t{}\t{}\t{}\t_\t{}\t{}\t_\t_\n'.format((i + 1), token, token.lemma_, token.pos_, token.tag_, head.i, token.dep_)
sentence['conllu'] = conllu |
_model
def SoT_Base(pretrained=False, **kwargs):
ViTConfig['embed_dim'] = 528
ViTConfig['depth'] = 24
ViTConfig['num_heads'] = 8
ViTConfig['mlp_ratio'] = 3
representationConfig['args']['dim'] = 528
representationConfig['args']['num_heads'] = 6
representationConfig['args']['wr_dim'] = 38
representationConfig['args']['normalization']['input_dim'] = 38
representationConfig['args']['normalization']['regular'] = nn.Dropout(0.7)
if pretrained:
kwargs.setdefault('qk_scale', (256 ** (- 0.5)))
model = SoT(visualTokenConfig=visualTokenConfig, ViTConfig=ViTConfig, representationConfig=representationConfig, **kwargs)
if pretrained:
load_pretrained(model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model |
def using_backend(test_backend):
require_set_backend()
if isinstance(test_backend, str):
return (backend.BACKEND_NAME == test_backend)
return isinstance(backend, test_backend) |
def test_constructor_path(waveform):
sound = waveform.waveform
assert isinstance(sound, np.ndarray) |
def set_random_seed(seed):
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
mpu.model_parallel_cuda_manual_seed(seed) |
class ISeg2017SemiInterface(MedicalDatasetSemiInterface):
def __init__(self, root_dir=DATA_PATH, labeled_data_ratio: float=0.2, unlabeled_data_ratio: float=0.8, seed: int=0, verbose: bool=True) -> None:
super().__init__(ISeg2017Dataset, root_dir, labeled_data_ratio, unlabeled_data_ratio, seed, verbose)
def _create_semi_supervised_datasets(self, labeled_transform: SequentialWrapper=None, unlabeled_transform: SequentialWrapper=None, val_transform: SequentialWrapper=None) -> Tuple[(MedicalImageSegmentationDataset, MedicalImageSegmentationDataset, MedicalImageSegmentationDataset)]:
train_set = self.DataClass(root_dir=self.root_dir, mode='train', subfolders=['T1', 'T2', 'Labels'], transforms=None, verbose=self.verbose)
val_set = self.DataClass(root_dir=self.root_dir, mode='val', subfolders=['T1', 'T2', 'Labels'], transforms=None, verbose=self.verbose)
if (self.labeled_ratio == 1):
labeled_set = dcopy(train_set)
unlabeled_set = dcopy(train_set)
print('labeled_ratio==1, return train_set as both the labeled and unlabeled datasets.')
else:
(labeled_patients, unlabeled_patients) = train_test_split(train_set.get_group_list(), test_size=self.unlabeled_ratio, train_size=self.labeled_ratio, random_state=self.seed)
labeled_set = SubMedicalDatasetBasedOnIndex(train_set, labeled_patients)
unlabeled_set = SubMedicalDatasetBasedOnIndex(train_set, unlabeled_patients)
assert ((len(labeled_set) + len(unlabeled_set)) == len(train_set)), 'wrong on labeled/unlabeled split.'
del train_set
if self.verbose:
print(f'labeled_dataset:{labeled_set.get_group_list().__len__()} Patients')
print(f'unlabeled_dataset:{unlabeled_set.get_group_list().__len__()} Patients')
if labeled_transform:
labeled_set.set_transform(labeled_transform)
if unlabeled_transform:
unlabeled_set.set_transform(unlabeled_transform)
if val_transform:
val_set.set_transform(val_transform)
return (labeled_set, unlabeled_set, val_set) |
def _update_avg_gradients(avg_gradients, gradients, step):
if (avg_gradients is None):
avg_gradients = [np.zeros_like(gradient) for gradient in gradients]
for i in range(len(gradients)):
avg_gradients[i] = ((avg_gradients[i] * (1.0 - (1.0 / (step + 1)))) + (gradients[i] / (step + 1)))
return avg_gradients |
def train(config):
logger = logging.getLogger('')
du = DataUtil(config=config)
du.load_vocab(src_vocab=config.src_vocab, dst_vocab=config.dst_vocab, src_vocab_size=config.src_vocab_size_a, dst_vocab_size=config.src_vocab_size_b)
model = Model(config=config)
model.build_variational_train_model()
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess_config.allow_soft_placement = True
with model.graph.as_default():
saver = tf.train.Saver(var_list=tf.global_variables())
summary_writer = tf.summary.FileWriter(config.train.logdir, graph=model.graph)
with tf.Session(config=sess_config) as sess:
sess.run(tf.global_variables_initializer())
reload_pretrain_embedding = False
try:
saver.restore(sess, tf.train.latest_checkpoint(config.train.logdir))
except:
logger.info('Failed to reload model.')
reload_pretrain_embedding = True
if reload_pretrain_embedding:
logger.info('reload the pretrained embeddings for the encoders')
src_pretrained_embedding = {}
dst_pretrained_embedding = {}
try:
for l in codecs.open(config.train.src_pretrain_wordemb_path, 'r', 'utf-8'):
word_emb = l.strip().split()
if (len(word_emb) == (config.hidden_units + 1)):
(word, emb) = (word_emb[0], np.array(map(float, word_emb[1:])))
src_pretrained_embedding[word] = emb
for l in codecs.open(config.train.dst_pretrain_wordemb_path, 'r', 'utf-8'):
word_emb = l.strip().split()
if (len(word_emb) == (config.hidden_units + 1)):
(word, emb) = (word_emb[0], np.array(map(float, word_emb[1:])))
dst_pretrained_embedding[word] = emb
logger.info('reload the word embedding done')
tf.get_variable_scope().reuse_variables()
src_embed_a = tf.get_variable('enc_aembedding/src_embedding/kernel')
src_embed_b = tf.get_variable('enc_bembedding/src_embedding/kernel')
dst_embed_a = tf.get_variable('dec_aembedding/dst_embedding/kernel')
dst_embed_b = tf.get_variable('dec_bembedding/dst_embedding/kernel')
count_a = 0
src_value_a = sess.run(src_embed_a)
dst_value_a = sess.run(dst_embed_a)
for word in src_pretrained_embedding:
if (word in du.src2idx):
id = du.src2idx[word]
src_value_a[id] = src_pretrained_embedding[word]
dst_value_a[id] = src_pretrained_embedding[word]
count_a += 1
sess.run(src_embed_a.assign(src_value_a))
sess.run(dst_embed_a.assign(dst_value_a))
count_b = 0
src_value_b = sess.run(src_embed_b)
dst_value_b = sess.run(dst_embed_b)
for word in dst_pretrained_embedding:
if (word in du.dst2idx):
id = du.dst2idx[word]
src_value_b[id] = dst_pretrained_embedding[word]
dst_value_b[id] = dst_pretrained_embedding[word]
count_b += 1
sess.run(src_embed_b.assign(src_value_b))
sess.run(dst_embed_b.assign(dst_value_b))
logger.info(('restore %d src_embedding and %d dst_embedding done' % (count_a, count_b)))
except:
logger.info('Failed to load the pretriaed embeddings')
for epoch in range(1, (config.train.num_epochs + 1)):
for batch in du.get_training_batches_with_buckets():
def get_shuffle_k_indices(length, shuffle_k):
shuffle_k_indices = []
rand_start = np.random.randint(shuffle_k)
indices_list_start = list(np.random.permutation(np.arange(0, rand_start)))
shuffle_k_indices.extend(indices_list_start)
for i in range(rand_start, length, shuffle_k):
if ((i + shuffle_k) > length):
indices_list_i = list(np.random.permutation(np.arange(i, length)))
else:
indices_list_i = list(np.random.permutation(np.arange(i, (i + shuffle_k))))
shuffle_k_indices.extend(indices_list_i)
return np.array(shuffle_k_indices)
batch_shuffle = []
shuffle_0_indices = get_shuffle_k_indices(batch[0].shape[1], config.train.shuffle_k)
shuffle_1_indices = get_shuffle_k_indices(batch[1].shape[1], config.train.shuffle_k)
batch_shuffle.append(batch[0].transpose()[shuffle_0_indices].transpose())
batch_shuffle.append(batch[1].transpose()[shuffle_1_indices].transpose())
start_time = time.time()
step = sess.run(model.global_step)
(step, lr, gnorm_aa, loss_aa, acc_aa, _) = sess.run([model.global_step, model.learning_rate, model.grads_norm_aa, model.loss_aa, model.acc_aa, model.train_op_aa], feed_dict={model.src_a_pl: batch_shuffle[0], model.dst_a_pl: batch[0]})
(step, lr, gnorm_bb, loss_bb, acc_bb, _) = sess.run([model.global_step, model.learning_rate, model.grads_norm_bb, model.loss_bb, model.acc_bb, model.train_op_bb], feed_dict={model.src_b_pl: batch_shuffle[1], model.dst_b_pl: batch[1]})
(generate_ab, generate_ba) = sess.run([model.generate_ab, model.generate_ba], feed_dict={model.src_a_pl: batch[0], model.src_b_pl: batch[1]})
(generate_ab_dealed, _) = deal_generated_samples(generate_ab, du.dst2idx)
(generate_ba_dealed, _) = deal_generated_samples(generate_ba, du.src2idx)
(step, acc_ab, loss_ab, _) = sess.run([model.global_step, model.acc_ab, model.loss_ab, model.train_op_ab], feed_dict={model.src_a_pl: generate_ba_dealed, model.dst_b_pl: batch[1]})
(step, acc_ba, loss_ba, _) = sess.run([model.global_step, model.acc_ba, model.loss_ba, model.train_op_ba], feed_dict={model.src_b_pl: generate_ab_dealed, model.dst_a_pl: batch[0]})
if ((step % config.train.disp_freq) == 0):
logger.info('epoch: {0}\tstep: {1}\tlr: {2:.6f}\tgnorm: {3:.4f}\tloss: {4:.4f}\tacc: {5:.4f}\tcross_loss: {6:.4f}\tcross_acc: {7:.4f}\ttime: {8:.4f}'.format(epoch, step, lr, gnorm_aa, loss_aa, acc_aa, loss_ab, acc_ab, (time.time() - start_time)))
if ((step % config.train.save_freq) == 0):
mp = (config.train.logdir + ('/model_epoch_%d_step_%d' % (epoch, step)))
saver.save(sess, mp)
logger.info(('Save model in %s.' % mp))
logger.info('Finish training.') |
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument('-n', '--name', type=str, const=True, default='', nargs='?', help='postfix for logdir')
parser.add_argument('-r', '--resume', type=str, const=True, default='', nargs='?', help='resume from logdir or checkpoint in logdir')
parser.add_argument('-b', '--base', nargs='*', metavar='base_config.yaml', help='paths to base configs. Loaded from left-to-right. Parameters can be overwritten or added with command-line options of the form `--key value`.', default=list())
parser.add_argument('-t', '--train', type=str2bool, const=True, default=False, nargs='?', help='train')
parser.add_argument('--no-test', type=str2bool, const=True, default=False, nargs='?', help='disable test')
parser.add_argument('-p', '--project', help='name of new or path to existing project')
parser.add_argument('-d', '--debug', type=str2bool, nargs='?', const=True, default=False, help='enable post-mortem debugging')
parser.add_argument('-s', '--seed', type=int, default=23, help='seed for seed_everything')
parser.add_argument('-f', '--postfix', type=str, default='', help='post-postfix for default name')
parser.add_argument('-l', '--logdir', type=str, default='logs', help='directory for logging dat shit')
parser.add_argument('--scale_lr', type=str2bool, nargs='?', const=False, default=False, help='scale base-lr by ngpu * batch_size * n_accumulate')
parser.add_argument('--datadir_in_name', type=str2bool, nargs='?', const=True, default=True, help='Prepend the final directory in the data_root to the output directory name')
parser.add_argument('--actual_resume', type=str, required=True, help='Path to model to actually resume from')
parser.add_argument('--data_root', type=str, required=True, help='Path to directory with training images')
parser.add_argument('--reg_data_root', type=str, required=True, help='Path to directory with regularization images')
parser.add_argument('--embedding_manager_ckpt', type=str, default='', help='Initialize embedding manager from a checkpoint')
parser.add_argument('--class_word', type=str, default='dog', help='Placeholder token which will be used to denote the concept in future prompts')
parser.add_argument('--init_word', type=str, help='Word to use as source for initial token embedding')
return parser |
def mkdir_p(dirname):
assert (dirname is not None)
if ((dirname == '') or os.path.isdir(dirname)):
return
try:
os.makedirs(dirname)
except OSError as e:
if (e.errno != errno.EEXIST):
raise e |
class MnliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev_matched.tsv')), 'dev_matched')
def get_test_examples(self, data_dir, matched=True):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test_matched.tsv')), 'test')
def get_labels(self):
return ['contradiction', 'entailment', 'neutral']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, line[0]))
text_a = line[8]
text_b = line[9]
if (set_type != 'test'):
label = line[(- 1)]
else:
label = self.get_labels()[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
class DIV2K(srdata.SRData):
def __init__(self, args, train=True):
super(DIV2K, self).__init__(args, train)
self.repeat = (args.test_every // (args.n_train // args.batch_size))
def _scan(self):
list_hr = []
if self.train:
idx_begin = 0
idx_end = self.args.n_train
else:
idx_begin = self.args.n_train
idx_end = (self.args.offset_val + self.args.n_val)
for i in range((idx_begin + 1), (idx_end + 1)):
filename = '{:0>4}'.format(i)
list_hr.append(os.path.join(self.dir_hr, (filename + self.ext)))
return list_hr
def _set_filesystem(self, dir_data):
self.apath = (dir_data + '/DIV2K')
self.dir_hr = os.path.join(self.apath, 'DIV2K_train_Color_HQ')
self.ext = '.png'
def _name_hrbin(self):
return os.path.join(self.apath, 'bin', '{}_bin_HR.npy'.format(self.split))
def __len__(self):
if self.train:
return (len(self.images_hr) * self.repeat)
else:
return len(self.images_hr)
def _get_index(self, idx):
if self.train:
return (idx % len(self.images_hr))
else:
return idx |
class TestHerReplayBuffer():
def setup_method(self):
self.env = GarageEnv(DummyDictEnv())
self.obs = self.env.reset()
self._replay_k = 4
self.replay_buffer = HERReplayBuffer(env_spec=self.env.spec, capacity_in_transitions=10, replay_k=self._replay_k, reward_fn=self.env.compute_reward)
def test_replay_k(self):
self.replay_buffer = HERReplayBuffer(env_spec=self.env.spec, capacity_in_transitions=10, replay_k=0, reward_fn=self.env.compute_reward)
with pytest.raises(ValueError):
self.replay_buffer = HERReplayBuffer(env_spec=self.env.spec, capacity_in_transitions=10, replay_k=0.2, reward_fn=self.env.compute_reward)
def _add_one_path(self):
path = dict(observations=np.asarray([self.obs, self.obs]), actions=np.asarray([self.env.action_space.sample(), self.env.action_space.sample()]), rewards=np.asarray([[1], [1]]), terminals=np.asarray([[False], [False]]), next_observations=np.asarray([self.obs, self.obs]))
self.replay_buffer.add_path(path)
def test_add_path(self):
self._add_one_path()
path_len = 2
total_expected_transitions = (sum([(self._replay_k + 1) for _ in range((path_len - 1))]) + 1)
assert (self.replay_buffer.n_transitions_stored == total_expected_transitions)
assert (len(self.replay_buffer._path_segments) == (total_expected_transitions - 1))
assert ({'observations', 'next_observations', 'actions', 'rewards', 'terminals'} <= set(self.replay_buffer._buffer))
obs = self.replay_buffer._buffer['observations'][0]
next_obs = self.replay_buffer._buffer['next_observations'][0]
assert (obs.shape == self.env.spec.observation_space.flat_dim)
assert (next_obs.shape == self.env.spec.observation_space.flat_dim)
def test_pickleable(self):
self._add_one_path()
replay_buffer_pickled = pickle.loads(pickle.dumps(self.replay_buffer))
assert (replay_buffer_pickled._buffer.keys() == self.replay_buffer._buffer.keys())
for k in replay_buffer_pickled._buffer:
assert (replay_buffer_pickled._buffer[k].shape == self.replay_buffer._buffer[k].shape)
sample = self.replay_buffer.sample_transitions(1)
sample2 = replay_buffer_pickled.sample_transitions(1)
for k in sample.keys():
assert (sample[k].shape == sample2[k].shape)
assert (len(sample) == len(sample2)) |
def get_a2j_conf_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--phase', default='train')
parser.add_argument('--dataset', default='nyu')
parser.add_argument('--num_epoch', default=20, type=int)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--learning_rate', default=0.001, type=float)
parser.add_argument('--split', default=1, type=int, help='Divide the train dataset into s parts, each as an epoch.')
parser.add_argument('--gpus', type=int, nargs='+')
parser.add_argument('--log_dir', default='./logs/base')
parser.add_argument('--model_saved_path', default='./checkpoint/base')
parser.add_argument('--pre_a2j', default='./checkpoint/model.pth')
parser.add_argument('--pre_conf', default=None)
parser.add_argument('--resume_training', default=False, type=bool, help='If resume_training is False, log dir will not be remove.')
parser.add_argument('--reg_weight', default=0.001, type=float, help='Regularization weight.')
parser.add_argument('--learning_decay_rate', default=0.8, help='Learning decay rate each epoch.')
parser.add_argument('--level', default=5, type=int, help='Specify the number of virtual views. Levels 1, 2, 3, 4, 5 have 3, 9, 15, 25, 81 views, respectively.')
parser.add_argument('--random_sample', default=False, type=bool)
parser.add_argument('--n_head', default=1, type=int)
parser.add_argument('--d_attn', default=256, type=int)
parser.add_argument('--d_k', default=64, type=int)
parser.add_argument('--d_v', default=64, type=int)
parser.add_argument('--d_inner', default=256, type=int)
parser.add_argument('--dropout_rate', default=0.5, type=float)
parser.add_argument('--num_worker', default=4, type=int, help='Number worker of Dataloader.')
parser.add_argument('--max_jitter', default=10.0, type=float, help='Sigma of jittering center of mass.')
parser.add_argument('--depth_sigma', default=1.0, type=float, help='Sigma of jittering depth of pixel.')
parser.add_argument('--cube_len', default=None, type=float)
parser.add_argument('--min_scale', default=1.0, type=float)
parser.add_argument('--max_scale', default=1.0, type=float)
parser.add_argument('--offset', default=20.0, type=float, help='Offset of bounding box.')
parser.add_argument('--hand_thickness', default=20.0, type=float)
parser.add_argument('--random_flip', default=False, type=bool, help='Whether flip image randomly.')
parser.add_argument('--random_rotate', default=False, type=bool, help='Whether rotate image randomly.')
parser.add_argument('--use_joint', default=False, type=bool)
parser.add_argument('--save_result', default=False, type=bool)
parser.add_argument('--config', default='./config/nyu/train_a2j_conf.yaml')
return parser |
def _make_group(N, ni, nf, block, stride, drop_p):
return [block((ni if (i == 0) else nf), nf, (stride if (i == 0) else 1), drop_p) for i in range(N)] |
class MultiprocessLoader(object):
def __init__(self, dataloader, num_workers=2):
self.dl = dataloader
self.queue_size = (2 * num_workers)
def __iter__(self):
output_queue = queue.Queue(self.queue_size)
output_thread = threading.Thread(target=_multiproc_iter, args=(self.dl, output_queue))
output_thread.daemon = True
output_thread.start()
while output_thread.is_alive():
(yield output_queue.get(block=True))
else:
print(RuntimeError('TF record data loader thread exited unexpectedly')) |
class TestDrawAverageWithSTD(TestCase):
def setUp(self) -> None:
config = {'avg': AveragewithStd()}
self.METER = MeterInterface(config)
columns_to_draw = [['avg_mean', 'avg_lstd', 'avg_hstd']]
from pathlib import Path
self.drawer = DrawCSV2(columns_to_draw=columns_to_draw, save_dir=Path(__file__).parent)
def _train_loop(self, data, epoch):
for i in data:
self.METER['avg'].add(i)
time.sleep(0.1)
def test_torch(self):
for i in range(100):
data = (torch.randn(10, 1) / (i + 1))
self._train_loop(data, i)
self.METER.step()
summary = self.METER.summary()
self.drawer.draw(summary)
def test_numpy(self):
for i in range(100):
data = (np.random.randn(10, 1) / (i + 1))
self._train_loop(data, i)
self.METER.step()
summary = self.METER.summary()
self.drawer.draw(summary)
def test_list(self):
for i in range(100):
data = (np.random.randn(10, 1) / (i + 1)).squeeze().tolist()
self._train_loop(data, i)
self.METER.step()
summary = self.METER.summary()
self.drawer.draw(summary) |
class Args(object):
def __init__(self, config):
is_test = False
if is_test:
self.experiment_id = (('KPConvNet' + time.strftime('%m%d%H%M')) + 'Test')
else:
self.experiment_id = ('KPConvNet' + time.strftime('%m%d%H%M'))
self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
self.verbose = True
self.config = config
self.snapshot_interval = 5
snapshot_root = f'snapshot/{config.dataset}_{self.experiment_id}'
tensorboard_root = f'tensorboard/{config.dataset}_{self.experiment_id}'
os.makedirs(snapshot_root, exist_ok=True)
os.makedirs(tensorboard_root, exist_ok=True)
shutil.copy2(os.path.join('.', 'training_ShapeNetPart.py'), os.path.join(snapshot_root, 'train.py'))
shutil.copy2(os.path.join('datasets', 'ShapeNet.py'), os.path.join(snapshot_root, 'dataset.py'))
shutil.copy2(os.path.join('datasets', 'dataloader.py'), os.path.join(snapshot_root, 'dataloader.py'))
self.save_dir = os.path.join(snapshot_root, 'models/')
self.result_dir = os.path.join(snapshot_root, 'results/')
self.tboard_dir = tensorboard_root
self.train_set = ShapeNetDataset(root=config.data_train_dir, split='train', first_subsampling_dl=config.first_subsampling_dl, classification=False, class_choice=['Chair'], config=config)
self.test_set = ShapeNetDataset(root=config.data_test_dir, split='test', first_subsampling_dl=config.first_subsampling_dl, classification=False, class_choice=['Chair'], config=config)
self.train_loader = get_dataloader(dataset=self.train_set, batch_size=config.train_batch_size, shuffle=True, num_workers=config.train_batch_size)
self.test_loader = get_dataloader(dataset=self.test_set, batch_size=config.test_batch_size, shuffle=False, num_workers=config.test_batch_size)
print('Training set size:', self.train_loader.dataset.__len__())
print('Test set size:', self.test_loader.dataset.__len__())
self.model = KPFCNN(config)
self.resume = config.resume
self.start_epoch = 0
self.epoch = config.max_epoch
self.optimizer = optim.SGD(self.model.parameters(), lr=config.learning_rate, momentum=config.momentum, weight_decay=1e-06)
self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=config.exp_gamma)
self.scheduler_interval = config.exp_interval
self.evaluate_interval = 1
self.evaluate_metric = nn.CrossEntropyLoss(reduction='mean')
self.check_args()
def check_args(self):
if (not os.path.exists(self.save_dir)):
os.makedirs(self.save_dir)
if (not os.path.exists(self.result_dir)):
os.makedirs(self.result_dir)
if (not os.path.exists(self.tboard_dir)):
os.makedirs(self.tboard_dir)
return self |
(jax.jit, static_argnames=('backup_entropy', 'update_target'))
def _update_jit(rng: PRNGKey, actor: Model, critic: Model, target_critic: Model, temp: Model, batch: Batch, discount: float, tau: float, target_entropy: float, backup_entropy: bool, update_target: bool) -> Tuple[(PRNGKey, Model, Model, Model, Model, InfoDict)]:
(rng, key) = jax.random.split(rng)
(new_critic, critic_info) = update_critic(key, actor, critic, target_critic, temp, batch, discount, backup_entropy=backup_entropy)
if update_target:
new_target_critic = target_update(new_critic, target_critic, tau)
else:
new_target_critic = target_critic
(rng, key) = jax.random.split(rng)
(new_actor, actor_info) = update_actor(key, actor, new_critic, temp, batch)
(new_temp, alpha_info) = temperature.update(temp, actor_info['entropy'], target_entropy)
return (rng, new_actor, new_critic, new_target_critic, new_temp, {**critic_info, **actor_info, **alpha_info}) |
def article_recommendation(json):
json = json.get('recommendations')
if (not json):
return ('No recommendations submitted.', 400)
if (len(json) > app.config['max_users_per_recommendation']):
return (('Requests must not contain more than %s users.' % app.config['max_users_per_recommendation']), 400)
check_funcs = {nonexistent_users, too_many_recommendations, contains_ineligible_articles, score_is_not_float, missing_explanation, too_long_explanation}
for check_func in check_funcs:
err = check_func(json)
if err:
return err
return None |
def dump(obj, file=None, file_format=None, **kwargs):
if isinstance(file, Path):
file = str(file)
if (file_format is None):
if is_str(file):
file_format = file.split('.')[(- 1)]
elif (file is None):
raise ValueError('file_format must be specified since file is None')
if (file_format not in file_handlers):
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if (file is None):
return handler.dump_to_str(obj, **kwargs)
elif is_str(file):
handler.dump_to_path(obj, file, **kwargs)
elif hasattr(file, 'write'):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object') |
class ConvGroupBlock(nn.Module):
def __init__(self, channels, multi_blocks, groups, dropout_rate):
super(ConvGroupBlock, self).__init__()
self.conv = ChannelwiseConv2d(groups=groups, dropout_rate=dropout_rate)
self.block = SimpleGroupBlock(channels=channels, multi_blocks=multi_blocks, groups=groups, dropout_rate=dropout_rate)
def forward(self, x):
x = self.conv(x)
x = self.block(x)
return x |
class EncoderModel(nn.Module):
def __init__(self, encoder, **kwargs):
super().__init__()
if (encoder.proto is not None):
path = encoder.pop('proto')
enc_config = AutoConfig.from_pretrained(path)
self.encoder = AutoModel.from_pretrained(path, config=enc_config)
else:
enc_config = BertGenerationConfig(**encoder, is_decoder=False, add_cross_attention=False)
self.encoder = BertGenerationEncoder(enc_config)
if encoder.add_pooling_layer:
self.pooler = BertPooler(encoder)
def forward(self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, **kwargs):
out = self.encoder(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
if hasattr(self, 'pooler'):
pooled_output = self.pooler(hidden_states=out.last_hidden_state)
setattr(out, 'pooler_output', pooled_output)
return out
def __repr__(self):
s = (((str(type(self.encoder).__name__) + '(') + str(self.encoder.config)) + ')\n')
return s |
def build_VAE(cfg, device='cpu'):
x_dim = cfg.getint('Network', 'x_dim')
z_dim = cfg.getint('Network', 'z_dim')
activation = cfg.get('Network', 'activation')
dropout_p = cfg.getfloat('Network', 'dropout_p')
dense_x_z = ([] if (cfg.get('Network', 'dense_x_z') == '') else [int(i) for i in cfg.get('Network', 'dense_x_z').split(',')])
beta = cfg.getfloat('Training', 'beta')
model = VAE(x_dim=x_dim, z_dim=z_dim, dense_x_z=dense_x_z, activation=activation, dropout_p=dropout_p, beta=beta, device=device).to(device)
return model |
class ConvPool2D():
def __init__(self, n_layers, filters, kernel_size, activation, pooling='max', initializer='glorot_uniform', batchnorm=False, use_bias=True, name=None):
self.n_layers = n_layers
self.filters = filters
self.kernel_size = kernel_size
self.activation = activation
self.initializer = initializer
self.use_bias = use_bias
self.pooling = pooling
if ((activation.lower() is not 'selu') and batchnorm):
self.batchnorm = True
else:
self.batchnorm = False
if (activation.lower() is 'selu'):
self.initializer = 'lecun_normal'
if (pooling is 'average'):
self.Pooling2D = layers.AveragePooling2D
else:
self.Pooling2D = layers.MaxPooling2D
self.name = name
def __call__(self, inputs):
outputs = ConvBlock2D(self.n_layers, self.filters, self.kernel_size, self.activation, self.initializer, self.batchnorm, self.use_bias)(inputs)
outputs = self.Pooling2D()(outputs)
return outputs |
class ReformerTokenizer():
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) |
def bernoulli_nll(x, p):
(x_exp, p_exp) = ([], [])
for (x_size, p_size) in zip(x.size(), p.size()):
if (x_size > p_size):
x_exp.append((- 1))
p_exp.append(x_size)
elif (x_size < p_size):
x_exp.append(p_size)
p_exp.append((- 1))
else:
x_exp.append((- 1))
p_exp.append((- 1))
x = x.expand(*x_exp)
p = p.expand(*p_exp)
return binary_cross_entropy(p, x, reduction='none') |
def cyl_vol_func(X, Y, Z, xymin=0.0, xymax=0.15, zmin=0.05, zmax=0.15):
xy = numpy.sqrt(((X ** 2.0) + (Y ** 2.0)))
out = numpy.zeros_like(X)
out[((((xy >= xymin) * (xy < xymax)) * (Z >= zmin)) * (Z < zmax))] = 1.0
return out |
def print_available_pretrained_models():
print('The following pretrained models are available:\n')
av_models = get_available_models()
for m in av_models.keys():
print('')
print(m)
print(av_models[m]['description']) |
def store_json(fpath, obj, pretty=False):
kwargs = {}
if pretty:
kwargs['indent'] = 2
kwargs['sort_keys'] = True
with open(fpath, 'w') as fp:
json.dump(obj, fp, **kwargs) |
class RandomFPHook(Hook):
def after_train_epoch(self, runner):
dataset = runner.data_loader.dataset
if (not hasattr(dataset, 'add_random_fp')):
return
data_infos = dataset.add_random_fp()
ori_infos = runner.data_loader.dataset.data_infos
assert (len(data_infos) == len(ori_infos))
runner.data_loader.dataset.data_infos = data_infos |
def biattention_layer(is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope((scope or 'attention_layer')):
h = tf.expand_dims(h, 1)
h_mask = tf.expand_dims(h_mask, 1)
(u_a, h_a) = bi_attention(is_train, h, u, h_mask=h_mask, u_mask=u_mask, tensor_dict=tensor_dict)
p0 = tf.concat(axis=3, values=[h, u_a, (h * u_a), (h * h_a)])
return p0 |
def fdmobilenet_w1(**kwargs):
return get_mobilenet(version='fd', width_scale=1.0, model_name='fdmobilenet_w1', **kwargs) |
def fake_transition() -> chex.ArrayTree:
return {'obs': jnp.array((5, 4)), 'reward': jnp.zeros((3,))} |
def init_dist(rank, world_size):
os.environ['LOCAL_RANK'] = str(rank)
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['NPROC_PER_NODE'] = str(world_size)
atorch.init_distributed('nccl')
torch.cuda.device(atorch.local_rank())
parallel_config = ([('model', world_size)], None)
create_parallel_group(parallel_config) |
def character_metric_detect(preds, targs):
assert (len(preds) == len(targs)), f'{len(preds)},{len(targs)}'
(tp, targ_p, pred_p, hit) = (0, 0, 0, 0)
for (pred_item, targ_item) in zip(preds, targs):
assert (pred_item[0] == targ_item[0])
(pred, targ) = (sorted(pred_item[1:]), sorted(targ_item[1:]))
if (targ != []):
targ_p += len(targ)
if (pred != []):
pred_p += len(pred)
candidate_index = [i for (i, _) in targ]
for (index, _) in pred:
if (index in candidate_index):
tp += 1
p = (tp / pred_p)
r = (tp / targ_p)
f1 = ((((2 * p) * r) / (p + r)) if ((p + r) > 0) else 0.0)
results = {'char-detect-f1': (f1 * 100)}
return results |
def build_fake_yaml_disable_first_quantization():
fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: input\n outputs: op_to_store\n device: cpu\n quantization:\n recipes:\n first_conv_or_matmul_quantization: False\n model_wise:\n weight:\n granularity: per_tensor\n scheme: sym\n dtype: int8\n algorithm: minmax\n evaluation:\n accuracy:\n metric:\n topk: 1\n tuning:\n strategy:\n name: basic\n accuracy_criterion:\n relative: 0.1\n exit_policy:\n performance_only: True\n workspace:\n path: saved\n '
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml_disable_first_quantization.yaml', 'w', encoding='utf-8') as f:
yaml.dump(y, f)
f.close() |
class Encoder(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.initial_block = DownsamplerBlock(3, 16)
self.layers = nn.ModuleList()
self.layers.append(DownsamplerBlock(16, 64))
for x in range(0, 5):
self.layers.append(non_bottleneck_1d(64, 0.03, 1))
self.layers.append(DownsamplerBlock(64, 128))
for x in range(0, 2):
self.layers.append(non_bottleneck_1d(128, 0.3, 2))
self.layers.append(non_bottleneck_1d(128, 0.3, 4))
self.layers.append(non_bottleneck_1d(128, 0.3, 8))
self.layers.append(non_bottleneck_1d(128, 0.3, 16))
self.output_conv = nn.Conv2d(128, num_classes, 1, stride=1, padding=0, bias=True)
def forward(self, input, predict=False):
output = self.initial_block(input)
for layer in self.layers:
output = layer(output)
if predict:
output = self.output_conv(output)
return output |
def cross_entropy(pred, label, weight=None, class_weight=None, reduction='mean', avg_factor=None, ignore_index=(- 100)):
loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none', ignore_index=ignore_index)
if (weight is not None):
weight = weight.float()
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss |
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.other_layers = nn.ModuleList()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer_one = self.conv1
self.other_layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.other_layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.other_layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.other_layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = GlobalpoolFC((512 * block.expansion), num_classes)
self.other_layers.append(self.linear)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.other_layers.append(layers[(- 1)])
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer_one(x)
self.layer_one_out = x
self.layer_one_out.requires_grad_()
self.layer_one_out.retain_grad()
x = self.layer_one_out
for layer in self.other_layers:
x = layer(x)
'\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n '
return x |
def unpad_input(padded: torch.Tensor, attention_mask: torch.Tensor) -> tuple[(torch.Tensor, Callable, torch.Tensor, int)]:
(batch_size, padded_seqlen) = padded.shape[:2]
(unpadded, indices, cu_seqlens, max_seqlen) = bert_padding.unpad_input(padded, attention_mask)
def pad_back(unpadded: torch.Tensor):
return bert_padding.pad_input(unpadded, indices, batch_size, padded_seqlen)
return (unpadded, pad_back, cu_seqlens, max_seqlen) |
.parametrize(['space', 'lower_x_hyperplane', 'upper_x_hyperplane', 'lower_y_hyperplane', 'upper_y_hyperplane', 'lower_z_hyperplane', 'upper_z_hyperplane'], [(Space(x1=0, x2=1, y1=0, y2=1, z1=0, z2=1), Space(x1=(- jnp.inf), x2=0, y1=(- jnp.inf), y2=jnp.inf, z1=(- jnp.inf), z2=jnp.inf), Space(x1=1, x2=jnp.inf, y1=(- jnp.inf), y2=jnp.inf, z1=(- jnp.inf), z2=jnp.inf), Space(x1=(- jnp.inf), x2=jnp.inf, y1=(- jnp.inf), y2=0, z1=(- jnp.inf), z2=jnp.inf), Space(x1=(- jnp.inf), x2=jnp.inf, y1=1, y2=jnp.inf, z1=(- jnp.inf), z2=jnp.inf), Space(x1=(- jnp.inf), x2=jnp.inf, y1=(- jnp.inf), y2=jnp.inf, z1=(- jnp.inf), z2=0), Space(x1=(- jnp.inf), x2=jnp.inf, y1=(- jnp.inf), y2=jnp.inf, z1=1, z2=jnp.inf))])
def test_space__hyperplane(space: Space, lower_x_hyperplane: Space, upper_x_hyperplane: Space, lower_y_hyperplane: Space, upper_y_hyperplane: Space, lower_z_hyperplane: Space, upper_z_hyperplane: Space) -> None:
assert (space.hyperplane('x', 'lower') == lower_x_hyperplane)
assert (space.hyperplane('x', 'upper') == upper_x_hyperplane)
assert (space.hyperplane('y', 'lower') == lower_y_hyperplane)
assert (space.hyperplane('y', 'upper') == upper_y_hyperplane)
assert (space.hyperplane('z', 'lower') == lower_z_hyperplane)
assert (space.hyperplane('z', 'upper') == upper_z_hyperplane) |
def main():
if args.save_images:
result_dir_img = os.path.join(args.result_dir, 'png')
result_dir_mat = os.path.join(args.result_dir, 'mat')
utils.mkdir(result_dir_img)
utils.mkdir(result_dir_mat)
test_dataset = get_test_data(args.input_dir)
test_loader = DataLoader(dataset=test_dataset, batch_size=args.bs, shuffle=False, num_workers=8, drop_last=False)
model_restoration = Net()
utils.load_checkpoint(model_restoration, args.weights)
print('===>Testing using weights: ', args.weights)
model_restoration.cuda()
model_restoration = nn.DataParallel(model_restoration)
model_restoration.eval()
with torch.no_grad():
for (ii, data_test) in enumerate(tqdm(test_loader), 0):
rgb_noisy = data_test[0].cuda()
filenames = data_test[1]
if args.ensemble:
rgb_restored = torch.clamp(utils.forward_chop(x=rgb_noisy, nn_model=model_restoration), 0.0, 1.0)
else:
rgb_restored = torch.clamp(utils.forward_chop(x=rgb_noisy, nn_model=model_restoration, ensemble=False), 0.0, 1.0)
rgb_noisy = rgb_noisy.permute(0, 2, 3, 1).cpu().detach().numpy()
rgb_restored = rgb_restored.permute(0, 2, 3, 1).cpu().detach().numpy()
if args.save_images:
for batch in range(len(rgb_noisy)):
denoised_img = img_as_ubyte(rgb_restored[batch])
utils.save_img(os.path.join(result_dir_img, (filenames[batch][:(- 4)] + '.png')), denoised_img)
sio.savemat(os.path.join(result_dir_mat, (filenames[batch][:(- 4)] + '.mat')), {'Idenoised_crop': np.float32(rgb_restored[batch])})
if args.save_images:
bundle_submissions_srgb_v1(result_dir_mat, 'bundled')
os.system('rm {}'.format((result_dir_mat + '/*.mat'))) |
def hf_bucket_url(model_id: str, filename: str, subfolder: Optional[str]=None, revision: Optional[str]=None, mirror=None) -> str:
if (subfolder is not None):
filename = f'{subfolder}/{filename}'
if mirror:
if (mirror in ['tuna', 'bfsu']):
raise ValueError('The Tuna and BFSU mirrors are no longer available. Try removing the mirror argument.')
legacy_format = ('/' not in model_id)
if legacy_format:
return f'{mirror}/{model_id}-{filename}'
else:
return f'{mirror}/{model_id}/{filename}'
if (revision is None):
revision = 'main'
return HUGGINGFACE_CO_PREFIX.format(model_id=model_id, revision=revision, filename=filename) |
def dtype_to_name(dtype_mapping, dtype):
return list(dtype_mapping.keys())[list(dtype_mapping.values()).index(dtype)] |
class _ROIPool(Function):
def forward(ctx, input, rois, output_size, spatial_scale):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
(output, argmax) = C_ROIPooling.roi_pool_forward(input, rois, spatial_scale, output_size[0], output_size[1])
ctx.save_for_backward(input, rois, argmax)
return output
_differentiable
def backward(ctx, grad_output):
(input, rois, argmax) = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
(bs, ch, h, w) = ctx.input_shape
grad_input = C_ROIPooling.roi_pool_backward(grad_output, input, rois, argmax, spatial_scale, output_size[0], output_size[1], bs, ch, h, w)
return (grad_input, None, None, None) |
def build_model(model, device, channel=1):
if (model == 'unet'):
from other_models import U_Net
net = U_Net(img_ch=channel, output_ch=1).to(device)
elif (model == 'cenet'):
print('input channel of CE-Net must be 3, param channel no used')
from imed_models import CE_Net
net = CE_Net(num_classes=1).to(device)
elif (model == 'resunet'):
from other_models import ResUNet
net = ResUNet(img_ch=channel, output_ch=1).to(device)
elif (model == 'csnet'):
from imed_models import CS_Net
net = CS_Net(in_channels=channel, out_channels=1).to(device)
elif (model == 'srfunet'):
from other_models import SRF_UNet
net = SRF_UNet(img_ch=channel, output_ch=1).to(device)
else:
raise NotImplementedError(('model [%s] is not implemented' % model))
return net |
def shuffle_data(inputs):
input = torch.cat(inputs)
output = input.new_empty(input.size())
req = dist.all_to_all_single(output, input)
output = output.reshape(my_size, (- 1))
return output |
class MobileViTLayer(nn.Module):
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, hidden_size: int, num_stages: int, dilation: int=1) -> None:
super().__init__()
self.patch_width = config.patch_size
self.patch_height = config.patch_size
if (stride == 2):
self.downsampling_layer = MobileViTInvertedResidual(config, in_channels=in_channels, out_channels=out_channels, stride=(stride if (dilation == 1) else 1), dilation=((dilation // 2) if (dilation > 1) else 1))
in_channels = out_channels
else:
self.downsampling_layer = None
self.conv_kxk = MobileViTConvLayer(config, in_channels=in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size)
self.conv_1x1 = MobileViTConvLayer(config, in_channels=in_channels, out_channels=hidden_size, kernel_size=1, use_normalization=False, use_activation=False)
self.transformer = MobileViTTransformer(config, hidden_size=hidden_size, num_stages=num_stages)
self.layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.conv_projection = MobileViTConvLayer(config, in_channels=hidden_size, out_channels=in_channels, kernel_size=1)
self.fusion = MobileViTConvLayer(config, in_channels=(2 * in_channels), out_channels=in_channels, kernel_size=config.conv_kernel_size)
def unfolding(self, features: torch.Tensor) -> Tuple[(torch.Tensor, Dict)]:
(patch_width, patch_height) = (self.patch_width, self.patch_height)
patch_area = int((patch_width * patch_height))
(batch_size, channels, orig_height, orig_width) = features.shape
new_height = int((math.ceil((orig_height / patch_height)) * patch_height))
new_width = int((math.ceil((orig_width / patch_width)) * patch_width))
interpolate = False
if ((new_width != orig_width) or (new_height != orig_height)):
features = nn.functional.interpolate(features, size=(new_height, new_width), mode='bilinear', align_corners=False)
interpolate = True
num_patch_width = (new_width // patch_width)
num_patch_height = (new_height // patch_height)
num_patches = (num_patch_height * num_patch_width)
patches = features.reshape(((batch_size * channels) * num_patch_height), patch_height, num_patch_width, patch_width)
patches = patches.transpose(1, 2)
patches = patches.reshape(batch_size, channels, num_patches, patch_area)
patches = patches.transpose(1, 3)
patches = patches.reshape((batch_size * patch_area), num_patches, (- 1))
info_dict = {'orig_size': (orig_height, orig_width), 'batch_size': batch_size, 'channels': channels, 'interpolate': interpolate, 'num_patches': num_patches, 'num_patches_width': num_patch_width, 'num_patches_height': num_patch_height}
return (patches, info_dict)
def folding(self, patches: torch.Tensor, info_dict: Dict) -> torch.Tensor:
(patch_width, patch_height) = (self.patch_width, self.patch_height)
patch_area = int((patch_width * patch_height))
batch_size = info_dict['batch_size']
channels = info_dict['channels']
num_patches = info_dict['num_patches']
num_patch_height = info_dict['num_patches_height']
num_patch_width = info_dict['num_patches_width']
features = patches.contiguous().view(batch_size, patch_area, num_patches, (- 1))
features = features.transpose(1, 3)
features = features.reshape(((batch_size * channels) * num_patch_height), num_patch_width, patch_height, patch_width)
features = features.transpose(1, 2)
features = features.reshape(batch_size, channels, (num_patch_height * patch_height), (num_patch_width * patch_width))
if info_dict['interpolate']:
features = nn.functional.interpolate(features, size=info_dict['orig_size'], mode='bilinear', align_corners=False)
return features
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.downsampling_layer:
features = self.downsampling_layer(features)
residual = features
features = self.conv_kxk(features)
features = self.conv_1x1(features)
(patches, info_dict) = self.unfolding(features)
patches = self.transformer(patches)
patches = self.layernorm(patches)
features = self.folding(patches, info_dict)
features = self.conv_projection(features)
features = self.fusion(torch.cat((residual, features), dim=1))
return features |
class TestAdaptorONNXRT(unittest.TestCase):
qlinear_backend = QuantizationMode.QLinearOps
qdq_backend = 'qdq'
integer_backend = QuantizationMode.IntegerOps
static_q_config = {'weight': {'dtype': 3, 'algorithm': 'minmax', 'scheme': 'sym', 'granularity': 'per_tensor'}, 'activation': {'dtype': 2, 'algorithm': 'minmax', 'scheme': 'asym', 'granularity': 'per_tensor', 'quant_mode': 'static'}}
dynamic_q_config = {'weight': {'dtype': 3, 'algorithm': 'minmax', 'scheme': 'sym', 'granularity': 'per_tensor'}, 'activation': {'dtype': 2, 'algorithm': 'minmax', 'scheme': 'asym', 'granularity': 'per_tensor', 'quant_mode': 'dynamic'}}
def setUpClass(cls):
os.makedirs('./onnxrt_test')
def tearDownClass(cls):
shutil.rmtree('./onnxrt_test', ignore_errors=True)
def qlinear_test(self, model, q_config, quantize_params, quantizable_op_types, **kwargs):
quantizer = Quantizer(copy.deepcopy(model), q_config, self.qlinear_backend, True, quantize_params, quantizable_op_types, **kwargs)
quantizer.quantize_model()
assert quantizer.model.model
return quantizer.model
def qdq_test(self, model, q_config, quantize_params, quantizable_op_types, **kwargs):
quantizer = Quantizer(copy.deepcopy(model), q_config, self.qdq_backend, True, quantize_params, quantizable_op_types, **kwargs)
quantizer.quantize_model()
assert quantizer.model.model
return quantizer.model
def dynamic_test(self, model, q_config, quantize_params, quantizable_op_types):
quantizer = Quantizer(copy.deepcopy(model), q_config, self.integer_backend, False, quantize_params, quantizable_op_types)
quantizer.quantize_model()
assert quantizer.model.model
return quantizer.model
def test_resize(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 2, 26, 42])
conv_weight_arr = np.random.randint((- 1), 2, [3, 2, 3, 3]).astype(np.float32)
conv_weight_initializer = onnx.numpy_helper.from_array(conv_weight_arr, name='conv1_weight')
conv_node = onnx.helper.make_node('Conv', ['input', 'conv1_weight'], ['conv_output'], name='conv_node')
initializers = [conv_weight_initializer]
output_tensor = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 48, 80])
resize_inputs = ['conv_output']
resize_attrs = {'coordinate_transformation_mode': 'asymmetric', 'mode': 'nearest', 'nearest_mode': 'floor'}
resize_node = helper.make_node('Resize', resize_inputs, ['output'], name='resize_node', **resize_attrs)
resize_roi = [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]
resize_roi_name = 'resize_roi'
resize_roi_initializer = helper.make_tensor(resize_roi_name, TensorProto.FLOAT, [len(resize_roi)], resize_roi)
initializers.extend([resize_roi_initializer])
resize_node.input.extend([resize_roi_name])
resize_scales = [1.0, 1.0, 2.0, 2.0]
resize_scales_name = 'resize_scales'
resize_scales_initializer = helper.make_tensor(resize_scales_name, TensorProto.FLOAT, [len(resize_scales)], resize_scales)
initializers.extend([resize_scales_initializer])
resize_node.input.extend([resize_scales_name])
graph = helper.make_graph([conv_node, resize_node], 'TestOpQuantizerResize_test_model', [input_tensor], [output_tensor], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'conv_node': self.static_q_config, 'resize_node': self.static_q_config}
quantize_params = {'input': [np.uint8(0), np.float32(10.0)], 'conv1_weight': [np.uint8(0), np.float32(10.0)], 'conv_output': [np.uint8(0), np.float32(10.0)], 'output': [np.uint8(0), np.float32(10.0)]}
q_model = self.qlinear_test(model, q_config, quantize_params, ['Resize', 'Conv'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, ['Resize', 'Conv'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 4)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 3)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 10)])
model.ir_version = 7
q_model = self.qlinear_test(model, q_config, quantize_params, ['Resize', 'Conv'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, ['Resize', 'Conv'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 3)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 2)
def test_argmax(self):
input_name = 'input'
output_name = 'output'
input_shape = [1, 256, 128, 128]
output_shape = [1, 32, 128]
initializers = []
conv_weight_name = 'conv_weight'
conv_weight_arr = np.random.randint((- 1), 2, [32, 256, 1, 1]).astype(np.float32)
conv_weight_initializer = onnx.numpy_helper.from_array(conv_weight_arr, name=conv_weight_name)
conv_output_name = 'conv_output'
conv_inputs = [input_name, conv_weight_name]
conv_outputs = [conv_output_name]
conv_name = 'conv_node'
conv_node = onnx.helper.make_node('Conv', conv_inputs, conv_outputs, dilations=[1, 1], kernel_shape=[1, 1], pads=[0, 0, 0, 0], strides=[1, 1], name=conv_name)
argmax_inputs = [conv_output_name]
argmax_outputs = [output_name]
argmax_name = 'argmax_node'
argmax_node = onnx.helper.make_node('ArgMax', argmax_inputs, argmax_outputs, axis=3, keepdims=0, name=argmax_name)
initializers = [conv_weight_initializer]
input_tensor = helper.make_tensor_value_info(input_name, TensorProto.FLOAT, input_shape)
output_tensor = helper.make_tensor_value_info(output_name, TensorProto.INT64, output_shape)
graph_name = 'ArgMax_Quant_Test'
graph = helper.make_graph([conv_node, argmax_node], graph_name, [input_tensor], [output_tensor], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'conv_node': self.static_q_config, 'argmax_node': self.static_q_config}
quantize_params = {'input': [np.uint8(0), np.float32(10.0)], 'conv_weight': [np.uint8(0), np.float32(10.0)], 'conv_output': [np.uint8(0), np.float32(10.0)], 'output': [np.uint8(0), np.float32(10.0)]}
q_model = self.qlinear_test(model, q_config, quantize_params, ['Conv', 'ArgMax'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
def test_gemm(self):
input_name = 'input'
output_name = 'output'
initializers = []
weight_shape = [100, 10]
weight_name = 'linear1.weight'
bias_shape = [100]
bias_name = 'linear1.bias'
node_name = 'gemm'
weight_data = np.random.normal(0, 0.1, weight_shape).astype(np.float32)
initializers.append(onnx.numpy_helper.from_array(weight_data, name=weight_name))
bias_data = np.random.normal(0, 0.1, bias_shape).astype(np.float32)
initializers.append(onnx.numpy_helper.from_array(bias_data, name=bias_name))
gemm1_node = onnx.helper.make_node('Gemm', [input_name, weight_name, bias_name], [output_name], alpha=1.0, beta=1.0, transB=1, name=node_name)
gemm1_output_name = 'gemm1_output'
input_tensor = helper.make_tensor_value_info(input_name, TensorProto.FLOAT, [(- 1), 10])
output_tensor = helper.make_tensor_value_info(output_name, TensorProto.FLOAT, [(- 1), 100])
graph_name = 'gemm_test'
graph = helper.make_graph([gemm1_node], graph_name, [input_tensor], [output_tensor], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'gemm': self.static_q_config}
quantize_params = {'input': [np.uint8(0), np.float32(10.0)], 'linear1.weight': [np.uint8(0), np.float32(10.0)], 'linear1.bias': [np.uint8(0), np.float32(10.0)], 'output': [np.uint8(0), np.float32(10.0)]}
q_model = self.qlinear_test(model, q_config, quantize_params, ['Gemm'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, ['Gemm'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 4)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 2)
bias_tensor = helper.make_tensor_value_info(bias_name, TensorProto.FLOAT, [100])
gemm2_node = onnx.helper.make_node('Gemm', [input_name, weight_name, bias_name], [output_name], alpha=1.0, beta=1.0, transB=1, name=node_name)
initializers = []
initializers.append(onnx.numpy_helper.from_array(weight_data, name=weight_name))
graph_name = 'gemm_test'
graph = helper.make_graph([gemm2_node], graph_name, [input_tensor, bias_tensor], [output_tensor], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_model = self.qlinear_test(model, q_config, quantize_params, ['Gemm'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 0)
q_model = self.qdq_test(model, q_config, quantize_params, ['Gemm'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 3)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 2)
def test_embed(self):
input_ids_shape = [1, 4]
input_ids_tensor = helper.make_tensor_value_info('input_ids', TensorProto.INT32, input_ids_shape)
segment_ids_shape = [1, 4]
segment_ids_tensor = helper.make_tensor_value_info('segment_ids', TensorProto.INT32, segment_ids_shape)
word_embed_shape = [32, 4]
word_embed_weights = np.random.random_sample(word_embed_shape).astype(dtype='float32')
word_embed_initializer = onnx.numpy_helper.from_array(word_embed_weights, name='word_embed')
pos_embed_shape = [16, 4]
pos_embed_weights = np.random.random_sample(pos_embed_shape).astype(dtype='float32')
pos_embed_initializer = onnx.numpy_helper.from_array(pos_embed_weights, name='pos_embed')
seg_embed_shape = [2, 4]
seg_embed_weights = np.random.random_sample(seg_embed_shape).astype(dtype='float32')
seg_embed_initializer = onnx.numpy_helper.from_array(seg_embed_weights, name='seg_embed')
gamma_shape = [4]
gamma = np.random.random_sample(gamma_shape).astype(dtype='float32')
gamma_initializer = onnx.numpy_helper.from_array(gamma, name='gamma')
beta_shape = [4]
beta = np.random.random_sample(beta_shape).astype(dtype='float32')
beta_initializer = onnx.numpy_helper.from_array(beta, name='beta')
layernorm_out_shape = [1, 4, 4]
layernorm_out_tensor = helper.make_tensor_value_info('layernorm_out', TensorProto.FLOAT, layernorm_out_shape)
mask_index_out_shape = [1]
mask_index_out_tensor = helper.make_tensor_value_info('mask_index_out', TensorProto.INT32, mask_index_out_shape)
embed_layer_norm_inputs = ['input_ids', 'segment_ids', 'word_embed', 'pos_embed', 'seg_embed', 'gamma', 'beta']
embed_layer_norm_outputs = ['layernorm_out', 'mask_index_out']
embed_layer_norm_node = helper.make_node('EmbedLayerNormalization', embed_layer_norm_inputs, embed_layer_norm_outputs, domain='com.microsoft', name='Embed')
nodes = [embed_layer_norm_node]
graph_name = 'embed_layernorm_graph'
inputs = [input_ids_tensor, segment_ids_tensor]
outputs = [layernorm_out_tensor, mask_index_out_tensor]
initializers = [word_embed_initializer, pos_embed_initializer, seg_embed_initializer, gamma_initializer, beta_initializer]
graph = helper.make_graph(nodes, graph_name, inputs, outputs, initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('com.microsoft', 14), helper.make_opsetid('ai.onnx', 14)])
model.ir_version = 7
q_config = {'Embed': self.static_q_config}
quantize_params = {'word_embed': [np.uint8(10.0), np.float32(0)], 'pos_embed': [np.uint8(10.0), np.float32(0)], 'seg_embed': [np.uint8(10.0), np.float32(0)], 'gamma': [np.uint8(10.0), np.float32(0)], 'beta': [np.uint8(10.0), np.float32(0)], 'layernorm_out': [np.uint8(10.0), np.float32(0)], 'mask_index_out': [np.uint8(10.0), np.float32(0)], 'input_ids': [np.uint8(10.0), np.float32(0)]}
q_model = self.qlinear_test(model, q_config, quantize_params, ['EmbedLayerNormalization'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QEmbedLayerNormalization'], 1)
converter = QOPERATORS['QEmbedLayerNormalization']([i for i in q_model.nodes() if (i.op_type == 'QEmbedLayerNormalization')][0], None, q_model.initializer())
(done, add_node, init) = converter.convert()
self.assertTrue(('EmbedLayerNormalization' in [i.op_type for i in add_node]))
q_model = self.qdq_test(model, q_config, quantize_params, ['EmbedLayerNormalization'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 5)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['EmbedLayerNormalization'], 1)
def test_LSTM(self):
input_shape = [1, 1, 200]
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)
w_shape = [2, 400, 200]
w_weights = np.random.random_sample(w_shape).astype(dtype='float32')
w_init = onnx.numpy_helper.from_array(w_weights, name='w')
r_shape = [2, 400, 100]
r_weights = np.random.random_sample(r_shape).astype(dtype='float32')
r_init = onnx.numpy_helper.from_array(r_weights, name='r')
b_shape = [2, 800]
b_weights = np.random.random_sample(b_shape).astype(dtype='float32')
b_init = onnx.numpy_helper.from_array(b_weights, name='b')
out_shape = [1, 2, 1, 100]
out_tensor = helper.make_tensor_value_info('out', TensorProto.FLOAT, out_shape)
kwargs = {}
kwargs['direction'] = 'bidirectional'
kwargs['activations'] = ['Sigmoid', 'Tanh', 'Tanh', 'Sigmoid', 'Tanh', 'Tanh']
kwargs['hidden_size'] = 100
kwargs['input_forget'] = 0
lstm_node = helper.make_node('LSTM', ['input', 'w', 'r', 'b'], ['out'], name='lstm', domain='', **kwargs)
graph = helper.make_graph([lstm_node], 'test', [input_tensor], [out_tensor], initializer=[w_init, r_init, b_init])
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 11)])
model.ir_version = 7
q_config = {'lstm': self.dynamic_q_config}
q_model = self.dynamic_test(model, q_config, None, ['LSTM'])
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DynamicQuantizeLSTM'], 1)
def test_concat_reshape_pooling(self):
model = build_model()
q_config = {'Reshape': self.static_q_config, 'conv1': self.static_q_config, 'conv2': self.static_q_config, 'Concat': self.static_q_config, 'AveragePool': self.static_q_config, 'add': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'conv1_weight': [np.uint8(10.0), np.float32(0)], 'conv1_output': [np.uint8(10.0), np.float32(0)], 'conv2_weight': [np.uint8(10.0), np.float32(0)], 'conv2_output': [np.uint8(10.0), np.float32(0)], 'concat_output': [np.uint8(10.0), np.float32(0)], 'avg_output': [np.uint8(10.0), np.float32(0)], 'add_out': [np.uint8(10.0), np.float32(0)], 'add_init': [np.uint8(10.0), np.float32(0)], 'shape': [np.uint8(10.0), np.float32(0)], 'reshape_output': [np.uint8(10.0), np.float32(0)], 'add_init_2': [np.uint8(10.0), np.float32(0)], 'add_out_2': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['Reshape', 'Conv', 'Concat', 'AveragePool', 'Add']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types, **{'dedicated_qdq_pair': True})
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
q_model.export('test.onnx', ONNXQlinear2QDQConfig())
export_model = onnx.load('test.onnx')
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 5)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 8)
os.remove('test.onnx')
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types, **{'dedicated_qdq_pair': True})
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 7)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 9)
q_config = {'Reshape': self.static_q_config, 'conv1': 'fp32', 'conv2': self.static_q_config, 'Concat': self.static_q_config, 'AveragePool': self.static_q_config}
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 2)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 3)
q_config = {'Reshape': self.static_q_config, 'conv1': 'fp32', 'conv2': 'fp32', 'Concat': self.static_q_config, 'AveragePool': self.static_q_config}
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 0)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 0)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
q_config = {'Reshape': self.static_q_config, 'conv1': self.static_q_config, 'conv2': self.static_q_config, 'Concat': self.static_q_config, 'AveragePool': 'fp32'}
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['AveragePool'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 6)
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'conv1_weight': [np.uint8(10.0), np.float32(0)], 'conv1_output': [np.uint8(10.0), np.float32(0)], 'conv2_weight': [np.uint8(10.0), np.float32(0)], 'conv2_output': [np.uint8(10.0), np.float32(0)], 'concat_output': [np.uint8(10.0), np.float32(0)], 'avg_output': [np.uint8(10.0), np.float32(0)], 'shape': [np.uint8(10.0), np.float32(0)], 'add_out': [np.uint8(10.0), np.float32(0)], 'add_init': [np.uint8(10.0), np.float32(0)], 'reshape_output': [np.uint8(10.0), np.float32(0)]}
q_config = {'Reshape': self.static_q_config, 'conv1': self.static_q_config, 'conv2': self.static_q_config, 'Concat': self.static_q_config, 'AveragePool': self.static_q_config}
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['Add'], 2)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 6)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 8)
def test_conv(self):
for op in ['Conv', 'FusedConv']:
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 5, 5, 1])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 3, 3, 1])
C = helper.make_tensor('C', TensorProto.FLOAT, [1, 5, 5, 1], np.random.random((1, 5, 5, 1)).reshape(25).tolist())
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 1])
conv_node = onnx.helper.make_node(op, ['A', 'B', 'C'], ['D'], name=op, kernel_shape=[3, 3], pads=[1, 1, 1, 1])
initializers = [C]
graph = helper.make_graph([conv_node], 'test_graph_1', [A, B], [D], initializer=initializers)
model = helper.make_model(graph)
q_config = {op: self.static_q_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)], 'C': [np.uint8(10.0), np.float32(0)], 'D': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = [op]
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 2)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 4)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 3)
def test_matmul(self):
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B_init = helper.make_tensor('B', TensorProto.FLOAT, [1, 1, 5, 1], np.random.random((1, 1, 5, 1)).reshape(5).tolist())
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 1, 5, 1])
matmul_node = onnx.helper.make_node('MatMul', ['A', 'B'], ['C'], name='Matmul')
graph = helper.make_graph([matmul_node], 'test_graph_1', [A], [C], [B_init])
model = helper.make_model(graph)
q_config = {'Matmul': self.static_q_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)], 'C': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['Matmul']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 3)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 2)
q_config = {'Matmul': self.dynamic_q_config}
q_model = self.dynamic_test(model, q_config, None, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DynamicQuantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['MatMulInteger'], 1)
quantize_params = {'A': [np.float32(10.0)], 'B': [np.float32(10.0)], 'C': [np.float32(10.0)]}
with self.assertRaises(ValueError):
self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
with self.assertRaises(ValueError):
self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
q_config = {'Matmul': {'weight': {'dtype': 3, 'algorithm': 'minmax', 'scheme': 'sym', 'granularity': 'per_tensor'}, 'activation': {'dtype': 2, 'algorithm': 'minmax', 'scheme': 'asym', 'granularity': 'per_tensor', 'quant_mode': 'dynamic'}}}
quantize_params = {}
q_model = self.dynamic_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DynamicQuantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['MatMulInteger'], 1)
def test_attention(self):
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 5, 5])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 1, 5, 5])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Attention', ['A', 'B', 'C'], ['D'], name='Attention')
graph = helper.make_graph([node], 'test_graph_1', [A, B, C], [D])
model = helper.make_model(graph)
q_config = {'Attention': self.static_q_config}
quantize_params = {'A': [np.uint8(0), np.float32(0.5)], 'B': [np.uint8(0), np.float32(0.5)], 'C': [np.uint8(0), np.float32(0.5)], 'D': [np.uint8(0), np.float32(0.5)]}
quantizable_op_types = ['Attention']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QAttention'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 3)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
converter = QOPERATORS['QAttention']([i for i in q_model.nodes() if (i.op_type == 'QAttention')][0], None, q_model.initializer())
(done, add_node, init) = converter.convert()
self.assertTrue(('Attention' in [i.op_type for i in add_node]))
self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
q_config = {'Attention': self.dynamic_q_config}
q_model = self.dynamic_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DynamicQuantizeLinear'], 3)
E = helper.make_tensor_value_info('E', TensorProto.INT32, [1, 1, 5, 5])
F = helper.make_tensor_value_info('F', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Attention', ['A', 'B', 'C', 'F', 'E'], ['D'], name='Attention')
graph = helper.make_graph([node], 'test_graph_1', [A, B, C, F, E], [D])
model = helper.make_model(graph)
q_config = {'Attention': self.static_q_config}
quantize_params = {'A': [np.uint8(0), np.float32(0.5)], 'B': [np.uint8(0), np.float32(0.5)], 'C': [np.uint8(0), np.float32(0.5)], 'D': [np.uint8(0), np.float32(0.5)]}
quantizable_op_types = ['Attention']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 3)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 3)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 3)
q_config = {'Attention': self.dynamic_q_config}
q_model = self.dynamic_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DynamicQuantizeLinear'], 3)
def test_gather(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [3, 2])
matmul_weight = helper.make_tensor('matmul_weight', TensorProto.FLOAT, [2, 3], np.random.random((2, 3)).reshape(6).tolist())
matmul_output = helper.make_tensor_value_info('matmul_output', TensorProto.FLOAT, [3, 3])
matmul_node = onnx.helper.make_node('MatMul', ['input', 'matmul_weight'], ['matmul_output'], name='MatMul')
gather_indices = helper.make_tensor('gather_indices', TensorProto.INT64, [1, 2], [0, 2])
gather_output = helper.make_tensor_value_info('gather_output', TensorProto.FLOAT, [1, 2, 3])
gather_node = onnx.helper.make_node('Gather', ['matmul_output', 'gather_indices'], ['gather_output'], name='Gather')
initializers = [matmul_weight, gather_indices]
graph = helper.make_graph([matmul_node, gather_node], 'TestGather_test_model', [input_tensor], [gather_output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'Gather': self.static_q_config, 'MatMul': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'matmul_weight': [np.uint8(10.0), np.float32(0)], 'matmul_output': [np.uint8(10.0), np.float32(0)], 'gather_output': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['Gather', 'MatMul']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 3)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 4)
q_config = {'Gather': self.dynamic_q_config, 'MatMul': self.dynamic_q_config}
q_model = self.dynamic_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(len(q_model.model.graph.node), 6)
def test_split(self):
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [100, 2])
e_value = np.random.randn(2, 2).astype(np.float32)
E_init = helper.make_tensor('E', TensorProto.FLOAT, [2, 2], e_value.reshape(4).tolist())
matmul_node = onnx.helper.make_node('MatMul', ['D', 'E'], ['A'], name='Matmul')
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [50, 2])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [50, 2])
node = onnx.helper.make_node('Split', ['A'], ['B', 'C'], name='Split', **{'num_outputs': 2})
graph = helper.make_graph([matmul_node, node], 'test_graph_1', [D], [B, C], [E_init])
model = helper.make_model(graph)
q_config = {'Split': {'weight': {'dtype': 3, 'algorithm': 'minmax', 'scheme': 'sym', 'granularity': 'per_tensor'}, 'activation': {'dtype': 2, 'algorithm': 'minmax', 'scheme': 'asym', 'granularity': 'per_tensor', 'quant_mode': 'static'}}, 'Matmul': {'weight': {'dtype': 3, 'algorithm': 'minmax', 'scheme': 'sym', 'granularity': 'per_tensor'}, 'activation': {'dtype': 2, 'algorithm': 'minmax', 'scheme': 'asym', 'granularity': 'per_tensor', 'quant_mode': 'static'}}}
quantize_params = {'A': [np.uint8(0), np.float32(0.5)], 'B': [np.uint8(0), np.float32(0.5)], 'C': [np.uint8(0), np.float32(0.5)], 'D': [np.uint8(0), np.float32(0.5)], 'E': [np.uint8(0), np.float32(0.5)]}
quantizable_op_types = ['Split', 'MatMul']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 2)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
q_model.export('test.onnx', ONNXQlinear2QDQConfig())
export_model = onnx.load('test.onnx')
self.assertEqual(len(export_model.graph.node), 11)
os.remove('test.onnx')
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 5)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
def test_pad(self):
b_value = np.array([0, 1, 1, 0, 1, 1]).astype(np.int64)
B_init = helper.make_tensor('B', TensorProto.INT64, [6], b_value.reshape(6).tolist())
B = helper.make_tensor_value_info('B', TensorProto.INT64, [6])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 7, 7])
d_value = np.random.randn(1).astype(np.float32)
D_init = helper.make_tensor('D', TensorProto.FLOAT, [1], d_value.reshape(1).tolist())
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1])
e_value = np.random.randn(1, 5, 5).astype(np.float32)
E_init = helper.make_tensor('E', TensorProto.FLOAT, [1, 1, 5, 5], e_value.reshape(25).tolist())
E = helper.make_tensor_value_info('E', TensorProto.FLOAT, [1, 1, 5, 5])
f_value = np.random.randn(1, 3, 3).astype(np.float32)
F_init = helper.make_tensor('F', TensorProto.FLOAT, [1, 1, 3, 3], f_value.reshape(9).tolist())
F = helper.make_tensor_value_info('F', TensorProto.FLOAT, [1, 1, 3, 3])
for mode in ['constant', 'edge', 'reflect', 'constant_value', 'constant_value_wo_init']:
conv_node = onnx.helper.make_node('Conv', ['E', 'F'], ['A'], name='Conv', kernel=[3, 3], padding=[1, 1, 1, 1])
if (mode == 'constant_value'):
node = onnx.helper.make_node('Pad', ['A', 'B', 'D'], ['C'], name='Pad', mode='constant')
graph = helper.make_graph([conv_node, node], 'test_graph_1', [E, F, B, D], [C], [E_init, F_init, B_init, D_init])
elif (mode == 'constant_value_wo_init'):
node = onnx.helper.make_node('Pad', ['A', 'B', 'D'], ['C'], name='Pad', mode='constant')
graph = helper.make_graph([conv_node, node], 'test_graph_1', [E, F, B, D], [C], [E_init, F_init, B_init])
else:
node = onnx.helper.make_node('Pad', ['A', 'B'], ['C'], name='Pad', mode=mode)
graph = helper.make_graph([conv_node, node], 'test_graph_1', [E, F, B], [C], [E_init, F_init, B_init])
model = helper.make_model(graph)
pad_config = {'weight': {'dtype': 3, 'algorithm': 'minmax', 'scheme': 'sym', 'granularity': 'per_tensor'}, 'activation': {'dtype': 2, 'algorithm': 'minmax', 'scheme': 'asym', 'granularity': 'per_tensor', 'quant_mode': 'static'}}
conv_config = {'weight': {'dtype': 3, 'algorithm': 'minmax', 'scheme': 'sym', 'granularity': 'per_channel'}, 'activation': {'dtype': 2, 'algorithm': 'minmax', 'scheme': 'asym', 'granularity': 'per_tensor', 'quant_mode': 'static'}}
q_config = {'Conv': conv_config, 'Pad': pad_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(1)], 'C': [np.uint8(10.0), np.float32(1)], 'D': [np.uint8(10.0), np.float32(1)], 'E': [np.uint8(10.0), np.float32(1)], 'F': [np.uint8(10.0), np.float32(1)]}
quantizable_op_types = ['Conv', 'Pad']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types, **{'dedicated_qdq_pair': True})
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 4)
node = onnx.helper.make_node('Pad', ['E', 'B', 'D'], ['C'], name='Pad', mode='constant')
graph = helper.make_graph([node], 'test_graph_1', [E, B, D], [C], [E_init, B_init, D_init])
model = helper.make_model(graph)
quantize_params = {'C': [np.uint8(10.0), np.float32(0)], 'E': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['Pad']
q_config = {'Pad': pad_config}
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 2)
def test_binary(self):
for op in ['Mul', 'Add']:
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 10])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 10])
node = onnx.helper.make_node(op, ['A', 'B'], ['C'], name=op)
graph = helper.make_graph([node], 'test_graph_1', [A, B], [C])
model = helper.make_model(graph)
q_config = {op: self.static_q_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)], 'C': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = [op]
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
q_model = self.qlinear_test(model, q_config, {}, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
q_model = self.qdq_test(model, q_config, {}, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
def test_relu(self):
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 3, 3])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 5])
E = helper.make_tensor_value_info('E', TensorProto.FLOAT, [1, 1, 5, 5])
F = helper.make_tensor_value_info('F', TensorProto.FLOAT, [1, 1, 5, 5])
conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'], name='Conv', kernel_shape=[3, 3], pads=[1, 1, 1, 1])
relu_node = onnx.helper.make_node('Relu', ['C'], ['D'], name='Relu')
add_node = onnx.helper.make_node('Add', ['D', 'E'], ['F'], name='Add')
graph = helper.make_graph([conv_node, relu_node], 'test_graph_1', [A, B], [D])
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
sess_options.optimized_model_filepath = './onnxrt_test/optimized_model.onnx'
session = ort.InferenceSession(model.SerializeToString(), sess_options, providers=ort.get_available_providers())
tmp_model = onnx.load(sess_options.optimized_model_filepath)
q_config = {'Conv': self.static_q_config, 'Relu': self.static_q_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)], 'C': [np.uint8(10.0), np.float32(0)], 'D': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['Conv', 'Relu']
q_model = self.qlinear_test(tmp_model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(len(q_model.model.graph.node), 4)
q_model = self.qdq_test(tmp_model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(len(q_model.model.graph.node), 7)
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_BASIC
session = ort.InferenceSession(model.SerializeToString(), sess_options, providers=ort.get_available_providers())
tmp_model = onnx.load(sess_options.optimized_model_filepath)
q_model = self.qlinear_test(tmp_model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(len(q_model.model.graph.node), 5)
q_model = self.qdq_test(tmp_model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(len(q_model.model.graph.node), 8)
graph = helper.make_graph([conv_node, relu_node, add_node], 'test_graph_2', [A, B, E], [F])
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_BASIC
session = ort.InferenceSession(model.SerializeToString(), sess_options, providers=ort.get_available_providers())
tmp_model = onnx.load(sess_options.optimized_model_filepath)
q_model = self.qlinear_test(tmp_model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(len(q_model.model.graph.node), 5)
q_model = self.qdq_test(tmp_model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(len(q_model.model.graph.node), 8)
def test_clip(self):
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 3, 3])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 5])
conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'], name='Conv', kernel_shape=[3, 3], pads=[1, 1, 1, 1])
clip_node = onnx.helper.make_node('Clip', ['C'], ['D'], name='Clip')
graph = helper.make_graph([conv_node, clip_node], 'test_graph_1', [A, B], [D])
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
sess_options.optimized_model_filepath = './onnxrt_test/optimized_model.onnx'
session = ort.InferenceSession(model.SerializeToString(), sess_options, providers=ort.get_available_providers())
model = onnx.load(sess_options.optimized_model_filepath)
q_config = {'Conv': self.static_q_config, 'Clip': self.static_q_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)], 'C': [np.uint8(10.0), np.float32(0)], 'D': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['Conv', 'Clip']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 2)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 3)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 3)
def test_activation(self):
for op in ['LeakyRelu', 'Sigmoid']:
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 10])
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 10])
node = onnx.helper.make_node(op, ['A'], ['B'], name=op)
graph = helper.make_graph([node], 'test_graph_1', [A], [B])
model = helper.make_model(graph)
q_config = {op: self.static_q_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = [op]
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 2)
a_value = np.random.randn(1, 10).astype(np.float32)
A_init = helper.make_tensor('A', TensorProto.FLOAT, [1, 10], a_value.reshape(10).tolist())
graph = helper.make_graph([node], 'test_graph_1', [A], [B], [A_init])
model = helper.make_model(graph)
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 2)
q_model = self.qlinear_test(model, q_config, {}, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
q_model = self.qdq_test(model, q_config, {}, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
for op in ['Relu']:
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 10])
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 10])
node = onnx.helper.make_node(op, ['A'], ['B'], name=op)
graph = helper.make_graph([node], 'test_graph_1', [A], [B])
model = helper.make_model(graph)
q_config = {op: self.static_q_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = [op]
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
a_value = np.random.randn(1, 10).astype(np.float32)
A_init = helper.make_tensor('A', TensorProto.FLOAT, [1, 10], a_value.reshape(10).tolist())
graph = helper.make_graph([node], 'test_graph_1', [A], [B], [A_init])
model = helper.make_model(graph)
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
q_model = self.qlinear_test(model, q_config, {}, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
q_model = self.qdq_test(model, q_config, {}, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 0)
def test_pooling(self):
op = 'MaxPool'
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 5, 5, 1])
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 5, 5, 1])
node = onnx.helper.make_node(op, ['A'], ['B'], name=op, kernel_shape=[3, 3], pads=[1, 1, 1, 1])
graph = helper.make_graph([node], 'test_graph_1', [A], [B])
q_config = {op: self.static_q_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = [op]
for opset_version in [12, 13]:
opset = onnx.OperatorSetIdProto()
opset.version = opset_version
model = helper.make_model(graph, opset_imports=[opset])
self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 3, 3])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 5])
conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'], name='Conv', kernel_shape=[3, 3], pads=[1, 1, 1, 1])
pool_node = onnx.helper.make_node(op, ['C'], ['D'], name=op)
graph = helper.make_graph([conv_node, pool_node], 'test_graph_1', [A, B], [D])
model = helper.make_model(graph)
q_config = {'Conv': self.static_q_config, op: self.static_q_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)], 'C': [np.uint8(10.0), np.float32(0)], 'D': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['Conv', op]
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 2)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 4)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
op = 'GlobalAveragePool'
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 5, 1, 1])
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 5, 5, 1])
node = onnx.helper.make_node(op, ['A'], ['B'], name=op, kernel_shape=[3, 3], pads=[1, 1, 1, 1])
graph = helper.make_graph([node], 'test_graph_1', [A], [B])
q_config = {op: self.static_q_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = [op]
for opset_version in [12, 13]:
opset = onnx.OperatorSetIdProto()
opset.version = opset_version
model = helper.make_model(graph, opset_imports=[opset])
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 2)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 2)
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 3, 3])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 1, 1])
conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'], name='Conv', kernel_shape=[3, 3], pads=[1, 1, 1, 1])
pool_node = onnx.helper.make_node(op, ['C'], ['D'], name=op)
graph = helper.make_graph([conv_node, pool_node], 'test_graph_1', [A, B], [D])
model = helper.make_model(graph)
q_config = {'Conv': self.static_q_config, op: self.static_q_config}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)], 'C': [np.uint8(10.0), np.float32(0)], 'D': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['Conv', op]
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 2)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 4)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
def test_exclude_node(self):
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 5, 5, 1])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [3, 3, 1, 1])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 3, 3])
conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'], name='Conv', kernel_shape=[3, 3], pads=[1, 1, 1, 1])
pool_node = onnx.helper.make_node('MaxPool', ['C'], ['D'], name='MaxPool')
graph = helper.make_graph([conv_node, pool_node], 'test_graph_1', [A, B], [D])
model = helper.make_model(graph)
q_config = {'Conv': self.static_q_config, 'MaxPool': 'fp32'}
quantize_params = {'A': [np.uint8(10.0), np.float32(0)], 'B': [np.uint8(10.0), np.float32(0)], 'C': [np.uint8(10.0), np.float32(0)], 'D': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['Conv', 'MaxPool']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
q_model.save('int8.onnx')
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 2)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 3)
def test_more_direct8bit_nodes(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 32])
matmul1_weight = helper.make_tensor('matmul1_weight', TensorProto.FLOAT, [32, 64], np.random.random((32, 64)).reshape(2048).tolist())
matmul1_output = helper.make_tensor_value_info('matmul1_output', TensorProto.FLOAT, [1, 64])
matmul1_node = onnx.helper.make_node('MatMul', ['input', 'matmul1_weight'], ['matmul1_output'], name='Matmul_0')
flatten_output = helper.make_tensor_value_info('flatten_output', TensorProto.FLOAT, [1, 64])
flatten_node = onnx.helper.make_node('Flatten', inputs=['matmul1_output'], outputs=['flatten_output'], axis=1, name='Flatten_1')
abs_output = helper.make_tensor_value_info('abs_output', TensorProto.FLOAT, [1, 64])
abs_node = onnx.helper.make_node('Abs', inputs=['flatten_output'], outputs=['abs_output'], name='Abs_2')
sign_output = helper.make_tensor_value_info('sign_output', TensorProto.FLOAT, [1, 64])
sign_node = onnx.helper.make_node('Sign', inputs=['abs_output'], outputs=['sign_output'], name='Sign_3')
shrink_output = helper.make_tensor_value_info('shrink_output', TensorProto.FLOAT, [1, 64])
shrink_node = onnx.helper.make_node('Shrink', inputs=['sign_output'], outputs=['shrink_output'], name='Shrink_4')
matmul2_weight = helper.make_tensor('matmul2_weight', TensorProto.FLOAT, [64, 2], np.random.random((64, 2)).reshape(128).tolist())
matmul2_output = helper.make_tensor_value_info('matmul2_output', TensorProto.FLOAT, [1, 2])
matmul2_node = onnx.helper.make_node('MatMul', ['shrink_output', 'matmul2_weight'], ['matmul2_output'], name='Matmul_5')
initializers = [matmul1_weight, matmul2_weight]
graph = helper.make_graph([matmul1_node, flatten_node, abs_node, sign_node, shrink_node, matmul2_node], 'TestMoreDirect8_test_model', [input_tensor], [matmul2_output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'Matmul_0': self.static_q_config, 'Flatten_1': self.static_q_config, 'Abs_2': self.static_q_config, 'Sign_3': self.static_q_config, 'Shrink_4': self.static_q_config, 'Matmul_5': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'matmul1_weight': [np.uint8(10.0), np.float32(0)], 'matmul1_output': [np.uint8(10.0), np.float32(0)], 'flatten_output': [np.uint8(10.0), np.float32(0)], 'abs_output': [np.uint8(10.0), np.float32(0)], 'sign_output': [np.uint8(10.0), np.float32(0)], 'shrink_output': [np.uint8(10.0), np.float32(0)], 'matmul2_weight': [np.uint8(10.0), np.float32(0)], 'matmul2_output': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['MatMul', 'Flatten', 'Abs', 'Sign', 'Shrink']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
q_model.save('qdq.onnx')
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 9)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 7)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
def test_expand(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [3, 2])
matmul1_weight = helper.make_tensor('matmul1_weight', TensorProto.FLOAT, [2, 1], np.random.random((2, 1)).reshape(2).tolist())
matmul1_output = helper.make_tensor_value_info('matmul1_output', TensorProto.FLOAT, [3, 1])
matmul1_node = onnx.helper.make_node('MatMul', ['input', 'matmul1_weight'], ['matmul1_output'], name='Matmul_0')
expand_new_shape = helper.make_tensor('expand_new_shape', TensorProto.INT64, [2], [3, 4])
expand_output = helper.make_tensor_value_info('expand_output', TensorProto.FLOAT, [3, 4])
expand_node = onnx.helper.make_node('Expand', ['matmul1_output', 'expand_new_shape'], ['expand_output'], name='Expand_1')
matmul2_weight = helper.make_tensor('matmul2_weight', TensorProto.FLOAT, [4, 2], np.random.random((4, 2)).reshape(8).tolist())
matmul2_output = helper.make_tensor_value_info('matmul2_output', TensorProto.FLOAT, [3, 2])
matmul2_node = onnx.helper.make_node('MatMul', ['expand_output', 'matmul2_weight'], ['matmul2_output'], name='Matmul_2')
initializers = [matmul1_weight, matmul2_weight, expand_new_shape]
graph = helper.make_graph([matmul1_node, expand_node, matmul2_node], 'TestExpand_test_model', [input_tensor], [matmul2_output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'Matmul_0': self.static_q_config, 'Expand_1': self.static_q_config, 'Matmul_2': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'matmul1_weight': [np.uint8(10.0), np.float32(0)], 'matmul1_output': [np.uint8(10.0), np.float32(0)], 'matmul2_weight': [np.uint8(10.0), np.float32(0)], 'matmul2_output': [np.uint8(10.0), np.float32(0)], 'expand_output': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['MatMul', 'Expand']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 6)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
def test_slice(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [5, 4, 1])
matmul1_weight = helper.make_tensor('matmul1_weight', TensorProto.FLOAT, [1, 3], np.random.random((1, 3)).reshape(3).tolist())
matmul1_output = helper.make_tensor_value_info('matmul1_output', TensorProto.FLOAT, [5, 4, 3])
matmul1_node = onnx.helper.make_node('MatMul', ['input', 'matmul1_weight'], ['matmul1_output'], name='Matmul_0')
slice_starts = helper.make_tensor('slice_starts', TensorProto.INT64, [2], [0, 0])
slice_ends = helper.make_tensor('slice_ends', TensorProto.INT64, [2], [3, 4])
slice_axes = helper.make_tensor('slice_axes', TensorProto.INT64, [2], [0, 1])
slice_steps = helper.make_tensor('slice_steps', TensorProto.INT64, [2], [1, 1])
slice_output = helper.make_tensor_value_info('slice_output', TensorProto.FLOAT, [3, 4, 3])
slice_node = onnx.helper.make_node('Slice', ['matmul1_output', 'slice_starts', 'slice_ends', 'slice_axes', 'slice_steps'], ['slice_output'], name='Slice_1')
matmul2_weight = helper.make_tensor('matmul2_weight', TensorProto.FLOAT, [3, 2], np.random.random((3, 2)).reshape(6).tolist())
matmul2_output = helper.make_tensor_value_info('matmul2_output', TensorProto.FLOAT, [3, 4, 2])
matmul2_node = onnx.helper.make_node('MatMul', ['slice_output', 'matmul2_weight'], ['matmul2_output'], name='Matmul_2')
initializers = [matmul1_weight, matmul2_weight, slice_starts, slice_ends, slice_axes, slice_steps]
graph = helper.make_graph([matmul1_node, slice_node, matmul2_node], 'TestSlice_test_model', [input_tensor], [matmul2_output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'Matmul_0': self.static_q_config, 'Slice_1': self.static_q_config, 'Matmul_2': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'matmul1_weight': [np.uint8(10.0), np.float32(0)], 'matmul1_output': [np.uint8(10.0), np.float32(0)], 'matmul2_weight': [np.uint8(10.0), np.float32(0)], 'matmul2_output': [np.uint8(10.0), np.float32(0)], 'slice_output': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['MatMul', 'Slice']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 6)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
def test_mod(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [2, 3])
matmul1_weight = helper.make_tensor('matmul1_weight', TensorProto.FLOAT, [3, 4], np.random.random((3, 4)).reshape(12).tolist())
matmul1_output = helper.make_tensor_value_info('matmul1_output', TensorProto.FLOAT, [2, 4])
matmul1_node = onnx.helper.make_node('MatMul', ['input', 'matmul1_weight'], ['matmul1_output'], name='Matmul_0')
matmul2_weight = helper.make_tensor('matmul2_weight', TensorProto.FLOAT, [3, 4], np.random.random((3, 4)).reshape(12).tolist())
matmul2_output = helper.make_tensor_value_info('matmul2_output', TensorProto.FLOAT, [2, 4])
matmul2_node = onnx.helper.make_node('MatMul', ['input', 'matmul2_weight'], ['matmul2_output'], name='Matmul_1')
mod_output = helper.make_tensor_value_info('mod_output', TensorProto.FLOAT, [2, 4])
mod_node = onnx.helper.make_node('Mod', ['matmul1_output', 'matmul2_output'], ['mod_output'], name='Mod_2')
matmul3_weight = helper.make_tensor('matmul3_weight', TensorProto.FLOAT, [4, 2], np.random.random((4, 2)).reshape(8).tolist())
matmul3_output = helper.make_tensor_value_info('matmul3_output', TensorProto.FLOAT, [2, 2])
matmul3_node = onnx.helper.make_node('MatMul', ['mod_output', 'matmul3_weight'], ['matmul3_output'], name='Matmul_3')
initializers = [matmul1_weight, matmul2_weight, matmul3_weight]
graph = helper.make_graph([matmul1_node, matmul2_node, mod_node, matmul3_node], 'TestMod_test_model', [input_tensor], [matmul3_output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 14)])
model.ir_version = 7
q_config = {'Matmul_0': self.static_q_config, 'Matmul_1': self.static_q_config, 'Mod_2': self.static_q_config, 'Matmul_3': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'matmul1_weight': [np.uint8(10.0), np.float32(0)], 'matmul1_output': [np.uint8(10.0), np.float32(0)], 'matmul2_weight': [np.uint8(10.0), np.float32(0)], 'matmul2_output': [np.uint8(10.0), np.float32(0)], 'mod_output': [np.uint8(10.0), np.float32(0)], 'matmul3_weight': [np.uint8(10.0), np.float32(0)], 'matmul3_output': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['MatMul', 'Mod']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 8)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 5)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
def test_reducemin_reducemax(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [3, 2, 3])
matmul1_weight = helper.make_tensor('matmul1_weight', TensorProto.FLOAT, [3, 2], np.random.random((3, 2)).reshape(6).tolist())
matmul1_output = helper.make_tensor_value_info('matmul1_output', TensorProto.FLOAT, [3, 2, 2])
matmul1_node = onnx.helper.make_node('MatMul', ['input', 'matmul1_weight'], ['matmul1_output'], name='Matmul_0')
reducemin_output = helper.make_tensor_value_info('reducemin_output', TensorProto.FLOAT, [3, 1, 2])
reducemin_node = onnx.helper.make_node('ReduceMin', inputs=['matmul1_output'], outputs=['reducemin_output'], axes=[1], keepdims=1, name='Reducemin_1')
matmul2_weight = helper.make_tensor('matmul2_weight', TensorProto.FLOAT, [2, 3], np.random.random((2, 3)).reshape(6).tolist())
matmul2_output = helper.make_tensor_value_info('matmul2_output', TensorProto.FLOAT, [3, 1, 3])
matmul2_node = onnx.helper.make_node('MatMul', ['reducemin_output', 'matmul2_weight'], ['matmul2_output'], name='Matmul_2')
initializers = [matmul1_weight, matmul2_weight]
graph = helper.make_graph([matmul1_node, reducemin_node, matmul2_node], 'TestReduceMin_test_model', [input_tensor], [matmul2_output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'Matmul_0': self.static_q_config, 'Reducemin_1': self.static_q_config, 'Matmul_2': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'matmul1_weight': [np.uint8(10.0), np.float32(0)], 'matmul1_output': [np.uint8(10.0), np.float32(0)], 'reducemin_output': [np.uint8(10.0), np.float32(0)], 'matmul2_weight': [np.uint8(10.0), np.float32(0)], 'matmul2_output': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['MatMul', 'ReduceMin']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 6)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [3, 2, 3])
matmul1_weight = helper.make_tensor('matmul1_weight', TensorProto.FLOAT, [3, 2], np.random.random((3, 2)).reshape(6).tolist())
matmul1_output = helper.make_tensor_value_info('matmul1_output', TensorProto.FLOAT, [3, 2, 2])
matmul1_node = onnx.helper.make_node('MatMul', ['input', 'matmul1_weight'], ['matmul1_output'], name='Matmul_0')
reducemax_output = helper.make_tensor_value_info('reducemax_output', TensorProto.FLOAT, [3, 1, 2])
reducemax_node = onnx.helper.make_node('ReduceMax', inputs=['matmul1_output'], outputs=['reducemax_output'], axes=[1], keepdims=1, name='Reducemax_1')
matmul2_weight = helper.make_tensor('matmul2_weight', TensorProto.FLOAT, [2, 3], np.random.random((2, 3)).reshape(6).tolist())
matmul2_output = helper.make_tensor_value_info('matmul2_output', TensorProto.FLOAT, [3, 1, 3])
matmul2_node = onnx.helper.make_node('MatMul', ['reducemax_output', 'matmul2_weight'], ['matmul2_output'], name='Matmul_2')
initializers = [matmul1_weight, matmul2_weight]
graph = helper.make_graph([matmul1_node, reducemax_node, matmul2_node], 'TestReduceMax_test_model', [input_tensor], [matmul2_output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'Matmul_0': self.static_q_config, 'Reducemax_1': self.static_q_config, 'Matmul_2': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'matmul1_weight': [np.uint8(10.0), np.float32(0)], 'matmul1_output': [np.uint8(10.0), np.float32(0)], 'reducemax_output': [np.uint8(10.0), np.float32(0)], 'matmul2_weight': [np.uint8(10.0), np.float32(0)], 'matmul2_output': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['MatMul', 'ReduceMax']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 6)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
def test_tile(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [2, 3, 4, 1])
matmul1_weight = helper.make_tensor('matmul1_weight', TensorProto.FLOAT, [1, 5], np.random.random((1, 5)).reshape(5).tolist())
matmul1_output = helper.make_tensor_value_info('matmul1_output', TensorProto.FLOAT, [2, 3, 4, 5])
matmul1_node = onnx.helper.make_node('MatMul', ['input', 'matmul1_weight'], ['matmul1_output'], name='Matmul_0')
repeats = helper.make_tensor('repeats', TensorProto.INT64, [4], [2, 2, 2, 2])
tile_output = helper.make_tensor_value_info('tile_output', TensorProto.FLOAT, [4, 6, 8, 10])
tile_node = onnx.helper.make_node('Tile', ['matmul1_output', 'repeats'], ['tile_output'], name='Tile_1')
matmul2_weight = helper.make_tensor('matmul2_weight', TensorProto.FLOAT, [10, 1], np.random.random((10, 1)).reshape(10).tolist())
matmul2_output = helper.make_tensor_value_info('matmul2_output', TensorProto.FLOAT, [4, 6, 8, 1])
matmul2_node = onnx.helper.make_node('MatMul', ['tile_output', 'matmul2_weight'], ['matmul2_output'], name='Matmul_2')
initializers = [matmul1_weight, matmul2_weight, repeats]
graph = helper.make_graph([matmul1_node, tile_node, matmul2_node], 'TestTile_test_model', [input_tensor], [matmul2_output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'Matmul_0': self.static_q_config, 'Tile_1': self.static_q_config, 'Matmul_2': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'matmul1_weight': [np.uint8(10.0), np.float32(0)], 'matmul1_output': [np.uint8(10.0), np.float32(0)], 'matmul2_weight': [np.uint8(10.0), np.float32(0)], 'matmul2_output': [np.uint8(10.0), np.float32(0)], 'tile_output': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['MatMul', 'Tile']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 6)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
def test_centercroppad(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [20, 10, 1])
matmul1_weight = helper.make_tensor('matmul1_weight', TensorProto.FLOAT, [1, 3], np.random.random((1, 3)).reshape(3).tolist())
matmul1_output = helper.make_tensor_value_info('matmul1_output', TensorProto.FLOAT, [20, 10, 3])
matmul1_node = onnx.helper.make_node('MatMul', ['input', 'matmul1_weight'], ['matmul1_output'], name='Matmul_0')
centercroppad_output = helper.make_tensor_value_info('centercroppad_output', TensorProto.FLOAT, [10, 7, 3])
shape = helper.make_tensor('shape', TensorProto.INT64, [3], [10, 7, 3])
centercroppad_node = onnx.helper.make_node('CenterCropPad', ['matmul1_output', 'shape'], ['centercroppad_output'], name='Centercroppad_1')
matmul2_weight = helper.make_tensor('matmul2_weight', TensorProto.FLOAT, [3, 1], np.random.random((3, 1)).reshape(3).tolist())
matmul2_output = helper.make_tensor_value_info('matmul2_output', TensorProto.FLOAT, [10, 7, 1])
matmul2_node = onnx.helper.make_node('MatMul', ['centercroppad_output', 'matmul2_weight'], ['matmul2_output'], name='Matmul_2')
initializers = [matmul1_weight, shape, matmul2_weight]
graph = helper.make_graph([matmul1_node, centercroppad_node, matmul2_node], 'TestCenterCropPad_test_model', [input_tensor], [matmul2_output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 18)])
model.ir_version = 8
q_config = {'Matmul_0': self.static_q_config, 'Centercroppad_1': self.static_q_config, 'Matmul_2': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'matmul1_weight': [np.uint8(10.0), np.float32(0)], 'matmul1_output': [np.uint8(10.0), np.float32(0)], 'matmul2_weight': [np.uint8(10.0), np.float32(0)], 'matmul2_output': [np.uint8(10.0), np.float32(0)], 'centercroppad_output': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['MatMul', 'CenterCropPad']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 6)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
def test_gathernd(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [2, 2, 1])
matmul1_weight = helper.make_tensor('matmul1_weight', TensorProto.FLOAT, [1, 2], np.random.random((1, 2)).reshape(2).tolist())
matmul1_output = helper.make_tensor_value_info('matmul1_output', TensorProto.FLOAT, [2, 2, 2])
matmul1_node = onnx.helper.make_node('MatMul', ['input', 'matmul1_weight'], ['matmul1_output'], name='Matmul_0')
gathernd_output = helper.make_tensor_value_info('gathernd_output', TensorProto.FLOAT, [2, 1, 2])
indices = helper.make_tensor('indices', TensorProto.INT64, [2, 1, 2], [0, 1, 1, 0])
gathernd_node = onnx.helper.make_node('GatherND', ['matmul1_output', 'indices'], ['gathernd_output'], name='Gathernd_1')
matmul2_weight = helper.make_tensor('matmul2_weight', TensorProto.FLOAT, [2, 1], np.random.random((2, 1)).reshape(2).tolist())
matmul2_output = helper.make_tensor_value_info('matmul2_output', TensorProto.FLOAT, [2, 1, 1])
matmul2_node = onnx.helper.make_node('MatMul', ['gathernd_output', 'matmul2_weight'], ['matmul2_output'], name='Matmul_2')
initializers = [matmul1_weight, indices, matmul2_weight]
graph = helper.make_graph([matmul1_node, gathernd_node, matmul2_node], 'TestGatherND_test_model', [input_tensor], [matmul2_output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'Matmul_0': self.static_q_config, 'Matmul_2': self.static_q_config, 'Gathernd_1': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'matmul1_weight': [np.uint8(10.0), np.float32(0)], 'matmul1_output': [np.uint8(10.0), np.float32(0)], 'matmul2_weight': [np.uint8(10.0), np.float32(0)], 'matmul2_output': [np.uint8(10.0), np.float32(0)], 'gathernd_output': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['MatMul', 'GatherND']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 6)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
def test_gatherelements(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [3, 1])
matmul1_weight = helper.make_tensor('matmul1_weight', TensorProto.FLOAT, [1, 3], np.random.random((1, 3)).reshape(3).tolist())
matmul1_output = helper.make_tensor_value_info('matmul1_output', TensorProto.FLOAT, [3, 3])
matmul1_node = onnx.helper.make_node('MatMul', ['input', 'matmul1_weight'], ['matmul1_output'], name='Matmul_0')
gatherelements_output = helper.make_tensor_value_info('gatherelements_output', TensorProto.FLOAT, [2, 3])
indices = helper.make_tensor('indices', TensorProto.INT64, [2, 3], [(- 1), (- 2), 0, (- 2), 0, 0])
gathernd_node = onnx.helper.make_node('GatherElements', ['matmul1_output', 'indices'], ['gatherelements_output'], name='Gatherelements_1')
matmul2_weight = helper.make_tensor('matmul2_weight', TensorProto.FLOAT, [3, 1], np.random.random((3, 1)).reshape(3).tolist())
matmul2_output = helper.make_tensor_value_info('matmul2_output', TensorProto.FLOAT, [2, 1])
matmul2_node = onnx.helper.make_node('MatMul', ['gatherelements_output', 'matmul2_weight'], ['matmul2_output'], name='Matmul_2')
initializers = [matmul1_weight, indices, matmul2_weight]
graph = helper.make_graph([matmul1_node, gathernd_node, matmul2_node], 'TestGatherElements_test_model', [input_tensor], [matmul2_output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
model.ir_version = 7
q_config = {'Matmul_0': self.static_q_config, 'Matmul_2': self.static_q_config, 'Gatherelements_1': self.static_q_config}
quantize_params = {'input': [np.uint8(10.0), np.float32(0)], 'matmul1_weight': [np.uint8(10.0), np.float32(0)], 'matmul1_output': [np.uint8(10.0), np.float32(0)], 'matmul2_weight': [np.uint8(10.0), np.float32(0)], 'matmul2_output': [np.uint8(10.0), np.float32(0)], 'gatherelements_output': [np.uint8(10.0), np.float32(0)]}
quantizable_op_types = ['MatMul', 'GatherElements']
q_model = self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 1)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 1)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session)
q_model = self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['DequantizeLinear'], 6)
self.assertEqual(Counter([node.op_type for node in q_model.model.graph.node])['QuantizeLinear'], 4)
session = ort.InferenceSession(q_model.model.SerializeToString(), providers=['CPUExecutionProvider'])
self.assertIsNotNone(session) |
class BPE(object):
def __init__(self, codes, merges=(- 1), separator='', vocab=None, glossaries=None):
codes.seek(0)
firstline = codes.readline()
if firstline.startswith('#version:'):
self.version = tuple([int(x) for x in re.sub('(\\.0+)*$', '', firstline.split()[(- 1)]).split('.')])
else:
self.version = (0, 1)
codes.seek(0)
self.bpe_codes = [tuple(item.strip().split(' ')) for (n, item) in enumerate(codes) if ((n < merges) or (merges == (- 1)))]
for item in self.bpe_codes:
if (len(item) != 2):
sys.stderr.write('Error: invalid line in BPE codes file: {0}\n'.format(' '.join(item)))
sys.stderr.write('The line should exist of exactly two subword units, separated by whitespace\n'.format(' '.join(item)))
sys.exit(1)
self.bpe_codes = dict([(code, i) for (i, code) in reversed(list(enumerate(self.bpe_codes)))])
self.bpe_codes_reverse = dict([((pair[0] + pair[1]), pair) for (pair, i) in self.bpe_codes.items()])
self.separator = separator
self.vocab = vocab
self.glossaries = (glossaries if glossaries else [])
self.cache = {}
def process_line(self, line):
out = ''
leading_whitespace = (len(line) - len(line.lstrip()))
if leading_whitespace:
out += line[:leading_whitespace]
out += self.segment(line)
trailing_whitespace = (len(line) - len(line.rstrip()))
if trailing_whitespace:
out += line[(- trailing_whitespace):]
return out
def segment(self, sentence):
output = []
for word in sentence.strip().split(' '):
if (not word):
continue
new_word = [out for segment in self._isolate_glossaries(word) for out in encode(segment, self.bpe_codes, self.bpe_codes_reverse, self.vocab, self.separator, self.version, self.cache, self.glossaries)]
for item in new_word[:(- 1)]:
output.append((item + self.separator))
output.append(new_word[(- 1)])
return ' '.join(output)
def _isolate_glossaries(self, word):
word_segments = [word]
for gloss in self.glossaries:
word_segments = [out_segments for segment in word_segments for out_segments in isolate_glossary(segment, gloss)]
return word_segments |
def encode_audio(video_path, audio_path, output_path):
ffmpeg.concat(ffmpeg.input(video_path), ffmpeg.input(audio_path), v=1, a=1).output(output_path, strict='-2').run(overwrite_output=True) |
def sb_cnn(x, is_training, config):
print(('Input: ' + str(x.get_shape)))
input_layer = tf.expand_dims(x, 3)
return sb_cnn_core(input_layer, is_training, config) |
class PrependTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
if (token is not None):
self._sizes = (np.array(dataset.sizes) + 1)
else:
self._sizes = dataset.sizes
def __getitem__(self, idx):
item = self.dataset[idx]
if (self.token is not None):
item = torch.cat([item.new([self.token]), item])
return item
def sizes(self):
return self._sizes
def num_tokens(self, index):
n = self.dataset.num_tokens(index)
if (self.token is not None):
n += 1
return n
def size(self, index):
n = self.dataset.size(index)
if (self.token is not None):
n += 1
return n |
def extend_schema_with_default(validator_class):
validate_properties = validator_class.VALIDATORS['properties']
def set_defaults(validator, properties, instance, schema):
for (property_, subschema) in properties.items():
if (('default' in subschema) and (not isinstance(instance, list))):
instance.setdefault(property_, subschema['default'])
for error in validate_properties(validator, properties, instance, schema):
(yield error)
return validators.extend(validator_class, {'properties': set_defaults}) |
def network_weight_zero_init(net: nn.Module):
with torch.no_grad():
for m in net.modules():
if isinstance(m, nn.Conv2d):
device = m.weight.device
(in_channels, out_channels, k1, k2) = m.weight.shape
m.weight[:] = ((torch.randn(m.weight.shape, device=device) / np.sqrt(((k1 * k2) * in_channels))) * 0.001)
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
device = m.weight.device
(in_channels, out_channels) = m.weight.shape
m.weight[:] = ((torch.randn(m.weight.shape, device=device) / np.sqrt(in_channels)) * 0.001)
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.zeros_(m.bias)
else:
continue
return net |
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if (pool_type == 'max'):
x = F.max_pool2d(x, kernel_size=pool_size)
elif (pool_type == 'avg'):
x = F.avg_pool2d(x, kernel_size=pool_size)
elif (pool_type == 'avg+max'):
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = (x1 + x2)
else:
raise Exception('Incorrect argument!')
return x |
def add_feature_maps(feature_maps, layer_name):
with tf.name_scope(layer_name):
(batch, maps_height, maps_width, num_maps) = np.array(feature_maps.shape).astype(np.int32)
map_width_out = 300
ratio = (map_width_out / maps_width)
map_height_out = int((maps_height * ratio))
map_size_out = tf.convert_to_tensor([map_height_out, map_width_out], tf.int32)
resized_maps = tf.image.resize_bilinear(feature_maps, map_size_out)
output = tf.slice(resized_maps, (0, 0, 0, 0), (1, (- 1), (- 1), (- 1)))
output = tf.reshape(output, (map_height_out, map_width_out, num_maps))
map_width_out += 5
map_height_out += 5
output = tf.image.resize_image_with_crop_or_pad(output, map_height_out, map_width_out)
map_sizes = [1, 32, 64, 128, 256, 512]
image_sizes = [(1, 1), (4, 8), (8, 8), (8, 16), (8, 32), (16, 32)]
size_idx = map_sizes.index(num_maps)
desired_image_size = image_sizes[size_idx]
image_width = desired_image_size[0]
image_height = desired_image_size[1]
output = tf.reshape(output, (map_height_out, map_width_out, image_height, image_width))
output = tf.transpose(output, (2, 0, 3, 1))
output = tf.reshape(output, (1, (image_height * map_height_out), (image_width * map_width_out), 1))
layer_name = layer_name.split('/')[(- 1)]
tf.summary.image(layer_name, output, max_outputs=16) |
def pairwise_concat(nodes):
n_nodes = tf.shape(nodes)[0]
node_embedding_dim = tf.shape(nodes)[1]
tile_as = tf.reshape(tf.tile(nodes, [1, n_nodes]), [(n_nodes * n_nodes), node_embedding_dim])
tile_as.set_shape([None, 40])
tile_bs = tf.tile(nodes, [n_nodes, 1])
toret = tf.concat([tile_as, tile_bs], axis=(- 1))
return toret |
def test_unique_nodelete4a():
o = m.MyObject4a(23)
assert (o.value == 23)
cstats = ConstructorStats.get(m.MyObject4a)
assert (cstats.alive() == 1)
del o
assert (cstats.alive() == 1) |
def file_process(dialogue_file='1224_ms.json'):
def qrfa_check(item):
if (item[0] == USER_TAG):
label = item[2]
elif (item[0] == AGENT_TAG):
label = ('REQUEST' if (item[2] in REQUEST) else 'ANSWER')
else:
label = 'MISSING'
return label
diags = json.load(open(dialogue_file))
agenda_list = [[i[2] for i in diag if (i[0] == USER_TAG)] for (_, diag) in diags.items()]
agenda_stat = {}
for agenda in agenda_list:
for (i, item) in enumerate(agenda):
if (item not in agenda_stat):
agenda_stat[item] = {}
act = (agenda[(i + 1)] if (i < (len(agenda) - 1)) else 'Stop')
if (act not in agenda_stat[item]):
agenda_stat[item][act] = 0
agenda_stat[item][act] += 1
agenda_list_qrfa = [[qrfa_check(i) for i in diag] for (_, diag) in diags.items()]
agenda_stat_qrfa = {}
for agenda in agenda_list_qrfa:
for (i, item) in enumerate(agenda):
if (item not in agenda_stat_qrfa):
agenda_stat_qrfa[item] = {}
act = (agenda[(i + 1)] if (i < (len(agenda) - 1)) else 'Stop')
if (act not in agenda_stat_qrfa[item]):
agenda_stat_qrfa[item][act] = 0
agenda_stat_qrfa[item][act] += 1
agent_list_pair = [[diag[i:(i + 2)] for i in range(len(diag)) if ((diag[i][0] == AGENT_TAG) and (len(diag[i:(i + 2)]) == 2))] for (_, diag) in diags.items()]
agent_user_intent = {}
for item in agent_list_pair:
for pair in item:
if (pair[0][2] not in agent_user_intent):
agent_user_intent[pair[0][2]] = {}
if (pair[1][2] not in agent_user_intent[pair[0][2]]):
agent_user_intent[pair[0][2]][pair[1][2]] = 0
agent_user_intent[pair[0][2]][pair[1][2]] += 1
agent_list = [[i for i in diag if (i[0] == AGENT_TAG)] for (_, diag) in diags.items()]
intent_map = {}
for item in agent_list:
for recom in item:
intent_map[recom[1]] = recom[2]
docs = [text for text in sorted(intent_map.keys())]
tfidf_vectorizer = TfidfVectorizer()
tfidf_fit = tfidf_vectorizer.fit(docs)
tfidf_matrix = tfidf_fit.transform(docs)
return (agenda_list, agenda_stat, agenda_stat_qrfa, intent_map, agent_user_intent, tfidf_matrix, tfidf_fit) |
def save_tflite(tflite_model, path, filename):
open(os.path.join(path, (filename + '.tflite')), 'wb').write(tflite_model) |
def load_mlp(our, oai, dst2src=False):
load_weights(oai.c_fc, our.dense_h_to_4h, dst2src)
load_weights(oai.c_proj, our.dense_4h_to_h, dst2src) |
def load_folds(options=None, df=None):
if ((df is not None) and ('fold' in df.columns)):
i_train = df.query("fold != 'test'").index.to_numpy()
i_test = df.query("fold == 'test'").index.to_numpy()
return [(i_train, i_test)]
print('No folds specified in CSV file')
if (options.folds == 'weak'):
return save_weak_folds(df)
return save_folds(df) |
def main(translate_args: Dict[(str, Any)], dataset_args: Dict[(str, Any)]) -> None:
dataset = get_dataset(dataset_args)
texts = get_texts(dataset, dataset_args)
few_shot_dataset = get_few_shot_dataset(dataset_args)
prompts = get_few_shot_prompts(few_shot_dataset, dataset_args, translate_args, shots=4)
texts_with_prompts = map_texts_with_prompts(texts, prompts, translate_args=translate_args)
translate_texts(dataset, texts_with_prompts, translate_args, dataset_args) |
def main():
parser = argparse.ArgumentParser(description='PyTorch Object Detection Training')
parser.add_argument('--config-file', default='', metavar='FILE', help='path to config file', type=str)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--skip-test', dest='skip_test', help='Do not test the final model', action='store_true')
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1)
args.distributed = (num_gpus > 1)
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
mkdir(output_dir)
logger = setup_logger('maskrcnn_benchmark', output_dir, get_rank())
logger.info('Using {} GPUs'.format(num_gpus))
logger.info(args)
logger.info('Collecting env info (might take some time)')
logger.info(('\n' + collect_env_info()))
logger.info('Loaded configuration file {}'.format(args.config_file))
with open(args.config_file, 'r') as cf:
config_str = ('\n' + cf.read())
logger.info(config_str)
logger.info('Running with config:\n{}'.format(cfg))
model = train(cfg, args.local_rank, args.distributed)
if (not args.skip_test):
run_test(cfg, model, args.distributed) |
def update_user(users, user, line_no):
if (user in reserved):
return
all_digit = True
for char in user:
if (char not in string.digits):
all_digit = False
if all_digit:
return
if (user not in users):
users[user] = (line_no, line_no)
else:
(cmin, cmax) = users[user]
users[user] = (min(cmin, line_no), max(cmax, line_no)) |
class TestPAAHead(TestCase):
def test_paa_head_loss(self):
class mock_skm():
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape((- 1))
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
paa_head.skm = mock_skm()
s = 256
img_metas = [{'img_shape': (s, s, 3), 'pad_shape': (s, s, 3), 'scale_factor': 1}]
train_cfg = Config(dict(assigner=dict(type='MaxIoUAssigner', pos_iou_thr=0.1, neg_iou_thr=0.1, min_pos_iou=0, ignore_iof_thr=(- 1)), allowed_border=(- 1), pos_weight=(- 1), debug=False))
paa = PAAHead(num_classes=4, in_channels=1, train_cfg=train_cfg, anchor_generator=dict(type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [torch.rand(1, 1, (s // feat_size), (s // feat_size)) for feat_size in [4, 8, 16, 32, 64]]
paa.init_weights()
(cls_scores, bbox_preds, iou_preds) = paa(feat)
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = paa.loss_by_feat(cls_scores, bbox_preds, iou_preds, [gt_instances], img_metas)
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
self.assertGreater(empty_cls_loss.item(), 0, 'cls loss should be non-zero')
self.assertEqual(empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes')
self.assertEqual(empty_iou_loss.item(), 0, 'there should be no box loss when there are no true boxes')
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = paa.loss_by_feat(cls_scores, bbox_preds, iou_preds, [gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero')
self.assertGreater(onegt_iou_loss.item(), 0, 'box loss should be non-zero')
(n, c, h, w) = (10, 4, 20, 20)
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
self.assertEqual(len(results), n)
self.assertEqual(results[0].size(), (((h * w) * 5), c))
self.assertTrue(paa.with_score_voting)
paa = PAAHead(num_classes=4, in_channels=1, train_cfg=train_cfg, anchor_generator=dict(type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8]), loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = Config(dict(nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
rescale = False
paa.predict_by_feat(cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale) |
def quantize_targ_layer(graph, bit_weight=8, targ_type=None, quant_type='uniform'):
print('Quantizing Layer parameters')
assert (quant_type in ['uniform', 'pwg', 'pwl', 'pws']), 'quant_type not supported'
assert (targ_type != None), 'targ_type cannot be None!'
for layer_idx in graph:
if (type(graph[layer_idx]) in targ_type):
with torch.no_grad():
if (quant_type == 'uniform'):
param = graph[layer_idx].weight.detach()
min_value = param.view(param.size(0), (- 1)).min((- 1))[0].view((- 1), 1, 1, 1)
max_value = param.view(param.size(0), (- 1)).max((- 1))[0].view((- 1), 1, 1, 1)
if (len(param.shape) == 2):
min_value = min_value.view((- 1), 1)
max_value = max_value.view((- 1), 1)
tmp = quantize(param, bit_weight, min_value, max_value)
graph[layer_idx].weight.data.copy_(tmp.data.cpu())
if (graph[layer_idx].bias is not None):
param = graph[layer_idx].bias.detach()
graph[layer_idx].bias.data.copy_(quantize(param, 8, param.min(), param.max()).data.cpu())
else:
param = graph[layer_idx].weight.detach()
m = torch.abs(param).max()
if (quant_type == 'pws'):
import numpy as np
def ecdf(p, x):
idx = np.argwhere((np.sort(x) >= p))
if (idx.shape[0] == 0):
return 1.0
return (np.arange(1, (len(x) + 1)) / float(len(x)))[idx[0]]
expect_error = (lambda b, m, p, x: ((1 / (12 * (((2 ** b) - 1) ** 2))) * (((m - p) ** 2) + ((m * ((2 * p) - m)) * ((2 * ecdf(p, x)) - 1)))))
e_best = .0
p1 = 0
param_np = param.view((- 1)).cpu().numpy()
m_np = m.cpu().numpy()
for rr in np.arange(0.1, 1.0, 0.1):
tmp = (rr * m_np)
e_cur = expect_error(bit_weight, m_np, tmp, param_np)
if (e_cur < e_best):
e_best = e_cur
p1 = tmp
p2 = p1
for rr in np.arange(((p1 / m_np) * 0.1), ((p1 / m_np) + 0.1), 0.01):
tmp = (rr * m_np)
e_cur = expect_error(bit_weight, m_np, tmp, param_np)
if (e_cur < e_best):
e_best = e_cur
p2 = tmp
p = p2
for rr in np.arange(((p2 / m_np) * 0.1), ((p2 / m_np) + 0.01), 0.001):
tmp = (rr * m_np)
e_cur = expect_error(bit_weight, m_np, tmp, param_np)
if (e_cur < e_best):
e_best = e_cur
p = tmp
elif (quant_type == 'pwg'):
raise NotImplementedError
m = ((m - torch.mean(param)) / torch.std(param.view((- 1))))
p = (torch.log(((0.8614 * m) + 0.6079)) * m)
p = ((p * torch.std(param.view((- 1)))) + torch.mean(param))
else:
raise NotImplementedError
p = ((0.803 * torch.sqrt(m)) - 0.3167)
r1 = torch.abs(param).clamp(max=p)
r2 = torch.abs(param).clamp(min=p)
for rr in [r1, r2]:
min_value = rr.view(rr.size(0), (- 1)).min((- 1))[0].view((- 1), 1, 1, 1)
max_value = rr.view(rr.size(0), (- 1)).max((- 1))[0].view((- 1), 1, 1, 1)
if (len(rr.shape) == 2):
min_value = min_value.view((- 1), 1)
max_value = max_value.view((- 1), 1)
rr.data.copy_(quantize(param, bit_weight, min_value, max_value).data)
result = torch.zeros_like(param)
result[(torch.abs(param) < p)] = (r1 * torch.sign(param))[(torch.abs(param) < p)]
result[(torch.abs(param) >= p)] = (r2 * torch.sign(param))[(torch.abs(param) >= p)]
graph[layer_idx].weight.data.copy_(result)
return graph |
def parse_pound(line):
all_pound = re.findall('[0-9][0-9.,]*', line)
for pound in all_pound:
number_text = engine.number_to_words(pound[1:].replace(',', ''))
number_text = number_text.replace('-', ' ')
pound_text = (number_text + ' pounds')
line = line.replace(pound, pound_text, 1)
line = line.replace('', ' ')
return line |
def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, output_fn):
uid_to_qid = {}
unique_id =
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if (len(query_tokens) > max_query_length):
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if (is_training and example.is_impossible):
tok_start_position = (- 1)
tok_end_position = (- 1)
if (is_training and (not example.is_impossible)):
tok_start_position = orig_to_tok_index[example.start_position]
if (example.end_position < (len(example.doc_tokens) - 1)):
tok_end_position = (orig_to_tok_index[(example.end_position + 1)] - 1)
else:
tok_end_position = (len(all_doc_tokens) - 1)
(tok_start_position, tok_end_position) = _improve_answer_span(all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text)
max_tokens_for_doc = ((max_seq_length - len(query_tokens)) - 3)
_DocSpan = collections.namedtuple('DocSpan', ['start', 'length'])
doc_spans = []
start_offset = 0
while (start_offset < len(all_doc_tokens)):
length = (len(all_doc_tokens) - start_offset)
if (length > max_tokens_for_doc):
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if ((start_offset + length) == len(all_doc_tokens)):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = (doc_span.start + i)
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
start_position = None
end_position = None
if (is_training and (not example.is_impossible)):
doc_start = doc_span.start
doc_end = ((doc_span.start + doc_span.length) - 1)
out_of_span = False
if (not ((tok_start_position >= doc_start) and (tok_end_position <= doc_end))):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = (len(query_tokens) + 2)
start_position = ((tok_start_position - doc_start) + doc_offset)
end_position = ((tok_end_position - doc_start) + doc_offset)
if (is_training and example.is_impossible):
start_position = 0
end_position = 0
if (example_index < 20):
tf.logging.info('*** Example ***')
tf.logging.info(('unique_id: %s' % unique_id))
tf.logging.info(('example_index: %s' % example_index))
tf.logging.info(('doc_span_index: %s' % doc_span_index))
tf.logging.info(('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in tokens])))
tf.logging.info(('token_to_orig_map: %s' % ' '.join([('%d:%d' % (x, y)) for (x, y) in six.iteritems(token_to_orig_map)])))
tf.logging.info(('token_is_max_context: %s' % ' '.join([('%d:%s' % (x, y)) for (x, y) in six.iteritems(token_is_max_context)])))
tf.logging.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
tf.logging.info(('input_mask: %s' % ' '.join([str(x) for x in input_mask])))
tf.logging.info(('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])))
if (is_training and example.is_impossible):
tf.logging.info('impossible example')
if (is_training and (not example.is_impossible)):
answer_text = ' '.join(tokens[start_position:(end_position + 1)])
tf.logging.info(('start_position: %d' % start_position))
tf.logging.info(('end_position: %d' % end_position))
tf.logging.info(('answer: %s' % tokenization.printable_text(answer_text)))
feature = InputFeatures(unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, start_position=start_position, end_position=end_position, is_impossible=example.is_impossible)
output_fn(feature)
unique_id += 1 |
class DummySpace(object):
def __init__(self, dim):
self._dim = dim
def shape(self):
return self._dim |
class Environment():
def __init__(self):
self.action_space = ()
pass
def reset(self):
raise NotImplementedError()
def step(self, action_dict):
raise NotImplementedError()
def get_agent_handles(self):
raise NotImplementedError() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.