code stringlengths 101 5.91M |
|---|
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum((1 for k in qid_to_has_ans if (not qid_to_has_ans[k])))
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=(lambda k: na_probs[k]))
for (i, qid) in enumerate(qid_list):
if (qid not in scores):
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
elif preds[qid]:
diff = (- 1)
else:
diff = 0
cur_score += diff
if (cur_score > best_score):
best_score = cur_score
best_thresh = na_probs[qid]
return (((100.0 * best_score) / len(scores)), best_thresh) |
class Configs():
def __init__(self, batch, instance, cores, weight_sharing, memory_allocator, memory_planning, cmds, mode):
self.batch = batch
self.instance = instance
self.cores = cores
self.weight_sharing = weight_sharing
self.memory_allocator = memory_allocator
self.memory_planning = memory_planning
self.cmds = cmds
self.mode = mode
def set_batch(self, batch):
self.batch = batch
def set_instance(self, instance):
self.instance = instance
def set_cores_per_instance(self, cores):
self.cores = cores
def set_weight_sharing(self, weight_sharing):
self.weight_sharing = weight_sharing
def set_memory_allocator(self, memory_allocator):
self.memory_allocator = memory_allocator
def set_memory_planning(self, memory_planning):
self.memory_planning = memory_planning
def set_cmds(self, cmds):
self.cmds = cmds
def set_mode(self, mode):
self.mode = mode
def get_batch(self):
return self.batch
def get_instance(self):
return self.instance
def get_cores_per_instance(self):
return self.cores
def get_weight_sharing(self):
return self.weight_sharing
def get_memory_allocator(self):
return self.memory_allocator
def get_memory_planning(self):
return self.memory_planning
def get_cmds(self):
return self.cmds
def get_mode(self):
return self.mode |
def distort_image(image, height, width):
distorted_image = tf.random_crop(image, [height, width, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
return distorted_image |
def idx2seqtype(idx):
if (idx == 1):
return 'mpii'
elif (idx == 2):
return 'bonn'
elif (idx == 3):
return 'mpiinew'
else:
assert False |
def round_filters(config: EfficientNetConfig, num_channels: int):
divisor = config.depth_divisor
num_channels *= config.width_coefficient
new_dim = max(divisor, ((int((num_channels + (divisor / 2))) // divisor) * divisor))
if (new_dim < (0.9 * num_channels)):
new_dim += divisor
return int(new_dim) |
def init(exp_parent_dir, run_group=None):
return
global _g_session
assert (_g_session is None), 'aim_wrapper.init() should be called only once.'
_g_session = Session(repo=os.path.realpath(os.path.abspath(exp_parent_dir)), experiment=(run_group or 'default'), flush_frequency=64) |
class HelenSegmentation(BaseDataset):
NUM_CLASS = 11
def __init__(self, root='dataset/helen/', split='train', mode=None, transform=None, target_transform=None):
super(HelenSegmentation, self).__init__(root, split, mode, transform, target_transform, base_size=256, crop_size=256)
_mask_dir = os.path.join(root, 'label')
_image_dir = os.path.join(root, 'image')
if (self.mode == 'train'):
_split_f = os.path.join(root, 'train.txt')
elif (self.mode == 'val'):
_split_f = os.path.join(root, 'val.txt')
elif (self.mode == 'testval'):
_split_f = os.path.join(root, 'val.txt')
elif (self.mode == 'test'):
_split_f = os.path.join(root, 'test.txt')
else:
raise RuntimeError('Unknown dataset split.')
self.images = []
self.masks = []
self.names = []
self.crop_size_h = self.crop_size
self.crop_size_w = self.crop_size
with open(os.path.join(_split_f), 'r') as lines:
for line in tqdm(lines):
_image = os.path.join(_image_dir, ((self.split + '/') + line.rstrip('\n')))
assert os.path.isfile(_image)
self.images.append(_image)
self.names.append(line.rstrip('\n'))
if (self.mode != 'test'):
_mask = os.path.join(_mask_dir, (((self.split + '/') + line.rstrip('\n')[:(- 3)]) + 'png'))
assert os.path.isfile(_mask)
self.masks.append(_mask)
if (self.mode != 'test'):
assert (len(self.images) == len(self.masks))
def _val_sync_transform(self, img, mask):
(w, h) = img.size
oh = self.crop_size_h
ow = int((((1.0 * w) * oh) / h))
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
(w, h) = img.size
x1 = int(round(((w - self.crop_size_w) / 2.0)))
y1 = int(round(((h - self.crop_size_h) / 2.0)))
img = img.crop((x1, y1, (x1 + self.crop_size_w), (y1 + self.crop_size_h)))
mask = mask.crop((x1, y1, (x1 + self.crop_size_w), (y1 + self.crop_size_h)))
return (img, self._mask_transform(mask))
def _sync_transform(self, img, mask):
if (random.random() < 0.5):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
mask = swap_N(mask, 2, 3)
mask = swap_N(mask, 4, 5)
short_size = random.randint(int((self.base_size * 0.5)), int((self.base_size * 2.0)))
(w, h) = img.size
oh = short_size
ow = int((((1.0 * w) * oh) / h))
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
deg = random.uniform((- 10), 10)
img = img.rotate(deg, resample=Image.BILINEAR)
mask = mask.rotate(deg, resample=Image.NEAREST)
if (oh < self.crop_size_h):
padh = ((self.crop_size_h - oh) if (oh < self.crop_size_h) else 0)
padw = ((self.crop_size_w - ow) if (ow < self.crop_size_w) else 0)
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
(w, h) = img.size
x1 = random.randint(0, (w - self.crop_size_w))
y1 = random.randint(0, (h - self.crop_size_h))
img = img.crop((x1, y1, (x1 + self.crop_size_w), (y1 + self.crop_size_h)))
mask = mask.crop((x1, y1, (x1 + self.crop_size_w), (y1 + self.crop_size_h)))
if (random.random() < 0.5):
img = img.filter(ImageFilter.GaussianBlur(radius=random.random()))
return (img, self._mask_transform(mask))
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if (self.mode == 'test'):
if (self.transform is not None):
img = self.transform(img)
return (img, os.path.basename(self.images[index]))
target = Image.open(self.masks[index])
img = img.resize((self.crop_size_w, self.crop_size_h), Image.BILINEAR)
target = target.resize((self.crop_size_w, self.crop_size_h), Image.NEAREST)
if (self.mode == 'train'):
(img, target) = self._sync_transform(img, target)
elif (self.mode == 'val'):
(img, target) = self._val_sync_transform(img, target)
else:
assert (self.mode == 'testval')
target = self._mask_transform(target)
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
if (self.mode == 'testval'):
return (img, target, self.names[index])
return (img, target)
def __len__(self):
return len(self.images) |
class PatchGANDiscriminator(DiscriminatorEnsemble):
def __init__(self, cfg):
self._parse_config(cfg)
configs = ([(3, 64, self._max_dim, self._num_layers, self._num_layers)] * self._num_discs)
super(PatchGANDiscriminator, self).__init__(make_disc_backbones(configs))
self._log = logging.getLogger('epe.network.patchgan')
self._log.debug(f'Discriminators: {self.discs}')
pass
def _parse_config(self, cfg):
self._num_discs = int(cfg.get('num_discs', 3))
self._max_dim = int(cfg.get('max_dim', 256))
self._num_layers = int(cfg.get('num_layers', 5))
self._norm = cfg.get('norm', 'group')
assert (self._norm in ['group', 'spectral', 'inst', 'batch', 'domain', 'none', 'compare', 'compare2'])
pass
def prepare_input(self, img, fix_input, run_discs, **kwargs):
imgs = [(img, None)]
for i in range(1, self.__len__()):
imgi = torch.nn.functional.interpolate(imgs[(- 1)][0], scale_factor=0.5, mode='bilinear', align_corners=False)
imgs.append(((imgi.detach() if fix_input else imgi), None))
pass
return imgs |
class BaseLoader():
def __init__(self):
pass
def prepare(self):
raise NotImplementedError
def get_num_images(self):
raise NotImplementedError
def get_patch_batch(self, batch_size, scale, input_patch_size):
raise NotImplementedError
def get_random_image_patch_pair(self, scale, input_patch_size):
raise NotImplementedError
def get_image_patch_pair(self, image_index, scale, input_patch_size):
raise NotImplementedError
def get_image_pair(self, image_index, scale):
raise NotImplementedError |
class RougeL(Rouge):
def __init__(self, **kwargs):
super(RougeL, self).__init__(rouges=['rougeL']) |
def test_audio_datamodule_prepare_unprocessed_downloaded(fs, mocker):
mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_full_dataset')
mocked_preprocess = mocker.patch(f'{TESTED_MODULE}.AudioDataModule.preprocess_dataset')
data = AudioDataModule()
fs.create_dir(data.data_dir_unprocessed)
data.prepare_data(use_preprocessed=False)
assert (mocked_download.call_args_list == [])
mocked_preprocess.assert_called_once() |
def get_pip_packages(run_lambda):
def run_with_pip(pip):
if (get_platform() == 'win32'):
system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
findstr_cmd = os.path.join(system_root, 'System32', 'findstr')
grep_cmd = '{} /R "numpy torch mypy"'.format(findstr_cmd)
else:
grep_cmd = 'grep "torch\\|numpy\\|mypy"'
return run_and_read_all(run_lambda, ((pip + ' list --format=freeze | ') + grep_cmd))
pip_version = ('pip3' if (sys.version[0] == '3') else 'pip')
out = run_with_pip((sys.executable + ' -mpip'))
return (pip_version, out) |
_model
def mobilenetv2_120d(pretrained=False, **kwargs):
model = _gen_mobilenet_v2('mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs)
return model |
def main(_):
eps = (((1.0 * FLAGS.max_epsilon) / 256.0) / FLAGS.max_iter)
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
tf.reset_default_graph()
caps_net = CapsNet(mnist)
caps_net.creat_architecture()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
train_dir = cfg.TRAIN_DIR
ckpt = tf.train.get_checkpoint_state(train_dir)
(dy_dx,) = tf.gradients(caps_net._loss, caps_net._x)
x_adv = tf.stop_gradient((caps_net._x + ((1 * eps) * tf.sign(dy_dx))))
x_adv = tf.clip_by_value(x_adv, 0.0, 1.0)
with tf.Session(config=config) as sess:
if (ckpt and cfg.USE_CKPT):
print(('Reading parameters from %s' % ckpt.model_checkpoint_path))
caps_net.saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Created model with fresh paramters.')
sess.run(tf.global_variables_initializer())
print(('Num params: %d' % sum((v.get_shape().num_elements() for v in tf.trainable_variables()))))
caps_net.train_writer.add_graph(sess.graph)
caps_net.adv_validation(sess, 'test', x_adv, FLAGS.max_iter, (((('samples/gsm_' + str(FLAGS.max_iter)) + '_') + str(FLAGS.max_epsilon)) + '.PNG')) |
def main(root: _path_type):
root_: Path = path2Path(root)
assert (root_.is_dir() and root_.exists()), root
exp_list = find_experiment_list(root_)
print(f'Found {len(exp_list)} experiments.')
failed_exp_list = [x for x in exp_list if (not is_experiment_sucessed(x))]
print(f'Found {len(failed_exp_list)} failed experiments.')
for exp in failed_exp_list:
if (not is_experiment_sucessed(exp)):
remove_csv(exp) |
def _compute_scales(A):
norm = matrix_1_norm(A)
max_norm = torch.max(norm)
s = torch.zeros_like(norm)
if (A.dtype == torch.float64):
if A.requires_grad:
ell = {3: 0., 5: 0., 7: 0., 9: 1., 13: 4.}
else:
ell = {3: 0., 5: 0., 7: 0., 9: 2., 13: 5.}
if (max_norm >= ell[9]):
m = 13
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_((norm / magic_number))))
else:
for m in [3, 5, 7, 9]:
if (max_norm < ell[m]):
magic_number = ell[m]
break
elif (A.dtype == torch.float32):
if A.requires_grad:
ell = {3: 0., 5: 1., 7: 3.}
else:
ell = {3: 0., 5: 1., 7: 3.}
if (max_norm >= ell[5]):
m = 7
magic_number = ell[m]
s = torch.relu_(torch.ceil(torch.log2_((norm / magic_number))))
else:
for m in [3, 5]:
if (max_norm < ell[m]):
magic_number = ell[m]
break
return (s, m) |
class MSRResNet(nn.Module):
def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):
super(MSRResNet, self).__init__()
self.upscale = upscale
self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
basic_block = functools.partial(mutil.ResidualBlock_noBN, nf=nf)
self.recon_trunk = mutil.make_layer(basic_block, nb)
if (self.upscale == 2):
self.upconv1 = nn.Conv2d(nf, (nf * 4), 3, 1, 1, bias=True)
self.pixel_shuffle = nn.PixelShuffle(2)
elif (self.upscale == 3):
self.upconv1 = nn.Conv2d(nf, (nf * 9), 3, 1, 1, bias=True)
self.pixel_shuffle = nn.PixelShuffle(3)
elif (self.upscale == 4):
self.upconv1 = nn.Conv2d(nf, (nf * 4), 3, 1, 1, bias=True)
self.upconv2 = nn.Conv2d(nf, (nf * 4), 3, 1, 1, bias=True)
self.pixel_shuffle = nn.PixelShuffle(2)
self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
mutil.initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1)
if (self.upscale == 4):
mutil.initialize_weights(self.upconv2, 0.1)
def forward(self, x):
fea = self.lrelu(self.conv_first(x))
out = self.recon_trunk(fea)
if (self.upscale == 4):
out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
elif ((self.upscale == 3) or (self.upscale == 2)):
out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
out = self.conv_last(self.lrelu(self.HRconv(out)))
base = F.interpolate(x, scale_factor=self.upscale, mode='bilinear', align_corners=False)
out += base
return out |
class MarkupLMForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TestTokenizers():
def test_gpt_tokenizer(self):
tokenizers = ['gpt2', 'bert-base-uncased']
model_name = 'distilgpt2'
config = util.load_config(model_name)
tokenizer = AutoTokenizer.from_pretrained(tokenizers[0])
token_ids = tokenizer(' tokenization')['input_ids']
is_partial_1 = util.is_partial_token(config, tokenizer.convert_ids_to_tokens(token_ids[0]))
is_partial_2 = util.is_partial_token(config, tokenizer.convert_ids_to_tokens(token_ids[1]))
assert (not is_partial_1)
assert is_partial_2
def test_bert_tokenizer(self):
model_name = 'bert-base-uncased'
config = util.load_config(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
token_ids = tokenizer(' tokenization')['input_ids']
is_partial_1 = util.is_partial_token(config, tokenizer.convert_ids_to_tokens(token_ids[1]))
is_partial_2 = util.is_partial_token(config, tokenizer.convert_ids_to_tokens(token_ids[2]))
assert (not is_partial_1)
assert is_partial_2
def test_t5_tokenizer(self):
model_name = 't5-small'
config = util.load_config(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
token_ids = tokenizer(' tokenization')['input_ids']
is_partial_1 = util.is_partial_token(config, tokenizer.convert_ids_to_tokens(token_ids[0]))
is_partial_2 = util.is_partial_token(config, tokenizer.convert_ids_to_tokens(token_ids[1]))
assert (not is_partial_1)
assert is_partial_2 |
class QMsumDataset(SummDataset):
dataset_name = 'QMsum'
description = '\n QMSum is a new human-annotated benchmark for query-based multi-domain meeting summarization task,\n which consists of 1,808 query-summary pairs over 232 meetings in multiple domains.\n '
is_dialogue_based = True
is_multi_document = False
is_query_based = True
builder_script_path = path.join(BASE_NONHUGGINGFACE_DATASETS_PATH, (dataset_name.lower() + '.py'))
def __init__(self, cache_dir: Optional[str]=None):
dataset_kwargs = {'cache_dir': cache_dir, 'path': self.builder_script_path}
super().__init__(dataset_kwargs=dataset_kwargs)
def _process_data(self, data: Dataset) -> Generator[(SummInstance, None, None)]:
for instance in tqdm(data):
for query_set in (instance['general_query_list'] + instance['specific_query_list']):
meeting: List = [((utterance['speaker'] + ' : ') + utterance['content']) for utterance in instance['meeting_transcripts']]
query: str = query_set['query']
summary: str = query_set['answer']
summ_instance = SummInstance(source=meeting, summary=summary, query=query)
(yield summ_instance) |
class ResNet_IBN(nn.Module):
def __init__(self, last_stride, block, layers, num_classes=1000):
scale = 64
self.inplanes = scale
super(ResNet_IBN, self).__init__()
self.conv1 = nn.Conv2d(3, scale, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(scale)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, scale, layers[0])
self.layer2 = self._make_layer(block, (scale * 2), layers[1], stride=2)
self.layer3 = self._make_layer(block, (scale * 4), layers[2], stride=2)
self.layer4 = self._make_layer(block, (scale * 8), layers[3], stride=last_stride)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(((scale * 8) * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
ibn = True
if (planes == 512):
ibn = False
layers.append(block(self.inplanes, planes, ibn, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, ibn))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if ('fc' in i):
continue
self.state_dict()[i].copy_(param_dict[i]) |
def add_displace_modifier(mesh_object: bpy.types.Object, texture_name: str, vertex_group: str='', mid_level: float=0.5, strength: float=1.0) -> None:
modifier = mesh_object.modifiers.new(name='Displace', type='DISPLACE')
modifier.mid_level = mid_level
modifier.strength = strength
modifier.texture = bpy.data.textures[texture_name]
modifier.vertex_group = vertex_group |
class GLJudge(object):
def __init__(self):
print('loading ping and ze dict.txt')
f = open('data/pingsheng.txt', 'r')
self.__ping = f.read()
f.close()
self.__ping = self.__ping
f = open('data/zesheng.txt', 'r')
self.__ze = f.read()
f.close()
self.__ze = self.__ze
def gelvJudge(self, sentence):
if (len(sentence) == 5):
if ((sentence[0] in self.__ping) and (sentence[1] in self.__ping) and (sentence[2] in self.__ping) and (sentence[3] in self.__ze) and (sentence[4] in self.__ze)):
return 0
if ((sentence[0] in self.__ping) and (sentence[1] in self.__ping) and (sentence[2] in self.__ze) and (sentence[3] in self.__ze) and (sentence[4] in self.__ze)):
return 0
if ((sentence[0] in self.__ze) and (sentence[1] in self.__ping) and (sentence[2] in self.__ping) and (sentence[3] in self.__ze) and (sentence[4] in self.__ze)):
return 0
if ((sentence[0] in self.__ze) and (sentence[1] in self.__ping) and (sentence[2] in self.__ze) and (sentence[3] in self.__ping) and (sentence[4] in self.__ze)):
return 0
if ((sentence[0] in self.__ping) and (sentence[1] in self.__ping) and (sentence[2] in self.__ze) and (sentence[3] in self.__ping) and (sentence[4] in self.__ze)):
return 0
if ((sentence[0] in self.__ze) and (sentence[1] in self.__ze) and (sentence[2] in self.__ze) and (sentence[3] in self.__ping) and (sentence[4] in self.__ping)):
return 1
if ((sentence[0] in self.__ping) and (sentence[1] in self.__ze) and (sentence[2] in self.__ze) and (sentence[3] in self.__ping) and (sentence[4] in self.__ping)):
return 1
if ((sentence[0] in self.__ping) and (sentence[1] in self.__ze) and (sentence[2] in self.__ping) and (sentence[3] in self.__ping) and (sentence[4] in self.__ze)):
return 3
if ((sentence[0] in self.__ping) and (sentence[1] in self.__ze) and (sentence[2] in self.__ze) and (sentence[3] in self.__ping) and (sentence[4] in self.__ze)):
return 3
if ((sentence[0] in self.__ze) and (sentence[1] in self.__ze) and (sentence[2] in self.__ping) and (sentence[3] in self.__ping) and (sentence[4] in self.__ze)):
return 3
if ((sentence[0] in self.__ze) and (sentence[1] in self.__ze) and (sentence[2] in self.__ze) and (sentence[3] in self.__ping) and (sentence[4] in self.__ze)):
return 3
if ((sentence[0] in self.__ping) and (sentence[1] in self.__ping) and (sentence[2] in self.__ze) and (sentence[3] in self.__ze) and (sentence[4] in self.__ping)):
return 2
if ((sentence[0] in self.__ze) and (sentence[1] in self.__ping) and (sentence[2] in self.__ping) and (sentence[3] in self.__ze) and (sentence[4] in self.__ping)):
return 2
if ((sentence[0] in self.__ping) and (sentence[1] in self.__ping) and (sentence[2] in self.__ping) and (sentence[3] in self.__ze) and (sentence[4] in self.__ping)):
return 2
elif (len(sentence) == 7):
if ((sentence[1] in self.__ze) and (sentence[2] in self.__ping) and (sentence[3] in self.__ping) and (sentence[4] in self.__ping) and (sentence[5] in self.__ze) and (sentence[6] in self.__ze)):
return 0
if ((sentence[1] in self.__ze) and (sentence[2] in self.__ping) and (sentence[3] in self.__ping) and (sentence[4] in self.__ze) and (sentence[5] in self.__ze) and (sentence[6] in self.__ze)):
return 0
if ((sentence[1] in self.__ze) and (sentence[2] in self.__ze) and (sentence[3] in self.__ping) and (sentence[4] in self.__ping) and (sentence[5] in self.__ze) and (sentence[6] in self.__ze)):
return 0
if ((sentence[1] in self.__ze) and (sentence[2] in self.__ping) and (sentence[3] in self.__ping) and (sentence[4] in self.__ze) and (sentence[5] in self.__ping) and (sentence[6] in self.__ze)):
return 0
if ((sentence[1] in self.__ze) and (sentence[2] in self.__ze) and (sentence[3] in self.__ping) and (sentence[4] in self.__ze) and (sentence[5] in self.__ping) and (sentence[6] in self.__ze)):
return 0
if ((sentence[1] in self.__ping) and (sentence[2] in self.__ze) and (sentence[3] in self.__ze) and (sentence[4] in self.__ze) and (sentence[5] in self.__ping) and (sentence[6] in self.__ping)):
return 1
if ((sentence[1] in self.__ping) and (sentence[2] in self.__ping) and (sentence[3] in self.__ze) and (sentence[4] in self.__ze) and (sentence[5] in self.__ping) and (sentence[6] in self.__ping)):
return 1
if ((sentence[1] in self.__ping) and (sentence[2] in self.__ping) and (sentence[3] in self.__ze) and (sentence[4] in self.__ping) and (sentence[5] in self.__ping) and (sentence[6] in self.__ze)):
return 3
if ((sentence[1] in self.__ping) and (sentence[2] in self.__ping) and (sentence[3] in self.__ze) and (sentence[4] in self.__ze) and (sentence[5] in self.__ping) and (sentence[6] in self.__ze)):
return 3
if ((sentence[1] in self.__ping) and (sentence[2] in self.__ze) and (sentence[3] in self.__ze) and (sentence[4] in self.__ping) and (sentence[5] in self.__ping) and (sentence[6] in self.__ze)):
return 3
if ((sentence[1] in self.__ping) and (sentence[2] in self.__ze) and (sentence[3] in self.__ze) and (sentence[4] in self.__ze) and (sentence[5] in self.__ping) and (sentence[6] in self.__ze)):
return 3
if ((sentence[1] in self.__ze) and (sentence[2] in self.__ping) and (sentence[3] in self.__ping) and (sentence[4] in self.__ze) and (sentence[5] in self.__ze) and (sentence[6] in self.__ping)):
return 2
if ((sentence[1] in self.__ze) and (sentence[2] in self.__ze) and (sentence[3] in self.__ping) and (sentence[4] in self.__ping) and (sentence[5] in self.__ze) and (sentence[6] in self.__ping)):
return 2
if ((sentence[1] in self.__ze) and (sentence[2] in self.__ping) and (sentence[3] in self.__ping) and (sentence[4] in self.__ping) and (sentence[5] in self.__ze) and (sentence[6] in self.__ping)):
return 2
else:
return (- 2)
return (- 1) |
def average_weights(w):
w_avg = copy.deepcopy(w[0])
for key in w_avg.keys():
for i in range(1, len(w)):
w_avg[key] += w[i][key]
w_avg[key] = torch.div(w_avg[key], float(len(w)))
return w_avg |
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = (in_ch == out_ch)
self.shortcut = ((self.equal_io and None) or Shortcut(in_ch, out_ch, stride=stride))
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = (x if self.equal_io else self.shortcut(x))
return (h + h0)
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch, out_ch, 3, padding=1, stride=stride, bias=False), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch, out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch)) |
def _DGStrat_makeSequence(l: Iterable[DGStrat]) -> DGStrat:
return _DGStrat_makeSequence_orig(_wrap(libpymod._VecDGStrat, l)) |
class Identical(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, input):
return input |
class ConcatData(DataFlow):
def __init__(self, df_lists):
self.df_lists = df_lists
def reset_state(self):
for d in self.df_lists:
d.reset_state()
def size(self):
return sum([x.size() for x in self.df_lists])
def get_data(self):
for d in self.df_lists:
for dp in d.get_data():
(yield dp) |
def gen_description(rule1_cat, d1, rule2_cat, d2):
cat_order = ['size', 'color', 'material', 'shape']
if (cat_order.index(rule1_cat) > cat_order.index(rule2_cat)):
(rule1_cat, rule2_cat) = (rule2_cat, rule1_cat)
(d1, d2) = (d2, d1)
d = ((d1 + ' ') + d2)
if (rule2_cat != 'shape'):
d += ' object'
if d.startswith('aeiou'):
d = ('an ' + d)
else:
d = ('a ' + d)
return d |
def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss):
from fairseq import meters
if (trainer.data_parallel_rank == 0):
os.makedirs(cfg.save_dir, exist_ok=True)
prev_best = getattr(save_checkpoint, 'best', val_loss)
if (val_loss is not None):
best_function = (max if cfg.maximize_best_checkpoint_metric else min)
save_checkpoint.best = best_function(val_loss, prev_best)
if cfg.no_save:
return
trainer.consolidate_optimizer()
if (not trainer.should_save_checkpoint_on_current_rank):
if trainer.always_call_state_dict_during_save_checkpoint:
trainer.state_dict()
return
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
logger.info(f'Preparing to save checkpoint for epoch {epoch} {updates} updates')
def is_better(a, b):
return ((a >= b) if cfg.maximize_best_checkpoint_metric else (a <= b))
suffix = trainer.checkpoint_suffix
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}{}.pt'.format(epoch, suffix)] = (end_of_epoch and (not cfg.no_epoch_checkpoints) and ((epoch % cfg.save_interval) == 0))
checkpoint_conds['checkpoint_{}_{}{}.pt'.format(epoch, updates, suffix)] = ((not end_of_epoch) and (cfg.save_interval_updates > 0) and ((updates % cfg.save_interval_updates) == 0))
checkpoint_conds['checkpoint_best{}.pt'.format(suffix)] = ((val_loss is not None) and ((not hasattr(save_checkpoint, 'best')) or is_better(val_loss, save_checkpoint.best)))
if ((val_loss is not None) and (cfg.keep_best_checkpoints > 0)):
worst_best = getattr(save_checkpoint, 'best', None)
chkpts = checkpoint_paths(cfg.save_dir, pattern='checkpoint\\.best_{}_(\\d+\\.?\\d*)\\.pt'.format(cfg.best_checkpoint_metric))
if (len(chkpts) > 0):
p = (chkpts[(- 1)] if cfg.maximize_best_checkpoint_metric else chkpts[0])
worst_best = float(p.rsplit('_')[(- 1)].replace('.pt', ''))
rand_sfx = randint(0, cfg.keep_best_checkpoints)
checkpoint_conds['checkpoint.best_{}_{:.3f}{}.pt'.format(cfg.best_checkpoint_metric, val_loss, rand_sfx)] = ((worst_best is None) or is_better(val_loss, worst_best))
checkpoint_conds['checkpoint_last{}.pt'.format(suffix)] = (not cfg.no_last_checkpoints)
extra_state = {'train_iterator': epoch_itr.state_dict(), 'val_loss': val_loss}
if hasattr(save_checkpoint, 'best'):
extra_state.update({'best': save_checkpoint.best})
checkpoints = [os.path.join(cfg.save_dir, fn) for (fn, cond) in checkpoint_conds.items() if cond]
if (len(checkpoints) > 0):
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
if cfg.write_checkpoints_asynchronously:
logger.warning(f'ioPath is not copying {checkpoints[0]} to {cp} since async write mode is on.')
else:
assert PathManager.copy(checkpoints[0], cp, overwrite=True), f'Failed to copy {checkpoints[0]} to {cp}'
write_timer.stop()
logger.info('Saved checkpoint {} (epoch {} {} updates, score {}) (writing took {} seconds)'.format(checkpoints[0], epoch, updates, val_loss, write_timer.sum))
if ((not end_of_epoch) and (cfg.keep_interval_updates > 0)):
if (cfg.keep_interval_updates_pattern == (- 1)):
checkpoints = checkpoint_paths(cfg.save_dir, pattern='checkpoint_\\d+_(\\d+){}\\.pt'.format(suffix))
else:
checkpoints = checkpoint_paths(cfg.save_dir, pattern='checkpoint_\\d+_(\\d+){}\\.pt'.format(suffix), keep_match=True)
checkpoints = [x[0] for x in checkpoints if ((x[1] % cfg.keep_interval_updates_pattern) != 0)]
for old_chk in checkpoints[cfg.keep_interval_updates:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
elif PathManager.exists(old_chk):
PathManager.rm(old_chk)
if (cfg.keep_last_epochs > 0):
checkpoints = checkpoint_paths(cfg.save_dir, pattern='checkpoint(\\d+){}\\.pt'.format(suffix))
for old_chk in checkpoints[cfg.keep_last_epochs:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if (cfg.keep_best_checkpoints > 0):
checkpoints = checkpoint_paths(cfg.save_dir, pattern='checkpoint\\.best_{}_(\\d+\\.?\\d*){}\\.pt'.format(cfg.best_checkpoint_metric, suffix))
if (not cfg.maximize_best_checkpoint_metric):
checkpoints = checkpoints[::(- 1)]
for old_chk in checkpoints[cfg.keep_best_checkpoints:]:
if os.path.lexists(old_chk):
os.remove(old_chk) |
class SpecAugment(torch.nn.Module):
def __init__(self, freq_mask=20, time_mask=50, freq_stripes=2, time_stripes=2, p=1.0):
super().__init__()
self.p = p
self.freq_mask = freq_mask
self.time_mask = time_mask
self.freq_stripes = freq_stripes
self.time_stripes = time_stripes
self.specaugment = nn.Sequential(*[T.FrequencyMasking(freq_mask_param=self.freq_mask, iid_masks=True) for _ in range(self.freq_stripes)], *[T.TimeMasking(time_mask_param=self.time_mask, iid_masks=True) for _ in range(self.time_stripes)])
def forward(self, audio):
if (self.p > torch.randn(1)):
return self.specaugment(audio)
else:
return audio |
def resnet50_k(channel_k=32):
print('Constructing resnet50_k......')
model = ResNet_k(Bottleneck, [3, 4, 6, 3], channel_k=channel_k)
return model |
class FlowControllerLinear(FlowController):
def __init__(self, passes, options):
self.passes = self._passes = passes
self.options = options |
def transform(t):
return ''.join([i for i in t if (not i.isdigit())]).translate(table).strip(' ').lower() |
class PerResidueLDDTCaPredictor(nn.Module):
def __init__(self, no_bins, c_in, c_hidden):
super(PerResidueLDDTCaPredictor, self).__init__()
self.no_bins = no_bins
self.c_in = c_in
self.c_hidden = c_hidden
self.layer_norm = LayerNorm(self.c_in)
self.linear_1 = Linear(self.c_in, self.c_hidden, init='relu')
self.linear_2 = Linear(self.c_hidden, self.c_hidden, init='relu')
self.linear_3 = Linear(self.c_hidden, self.no_bins, init='final')
self.relu = nn.ReLU()
def forward(self, s):
s = self.layer_norm(s)
s = self.linear_1(s)
s = self.relu(s)
s = self.linear_2(s)
s = self.relu(s)
s = self.linear_3(s)
return s |
def save_data(test_data_dir, prefix, names, data_list):
if (isinstance(data_list, torch.autograd.Variable) or isinstance(data_list, torch.Tensor)):
data_list = [data_list]
for (i, d) in enumerate(data_list):
d = d.data.cpu().numpy()
save_tensor_proto(os.path.join(test_data_dir, '{0}_{1}.pb'.format(prefix, i)), names[i], d) |
def _get_info_from_anaconda_info(info, split=':'):
info = info.strip('\n').replace(' ', '')
info_dict = {}
latest_key = ''
for line in info.splitlines():
if (split in line):
pair = line.split(split)
info_dict[pair[0]] = pair[1]
latest_key = pair[0]
else:
if (not isinstance(info_dict[latest_key], list)):
info_dict[latest_key] = [info_dict[latest_key]]
info_dict[latest_key].append(line)
return info_dict |
class _SwapAlign2Nat(Function):
def forward(ctx, X, lambda_val, pad_val):
ctx.lambda_val = lambda_val
ctx.input_shape = X.size()
Y = _C.swap_align2nat_forward(X, lambda_val, pad_val)
return Y
_differentiable
def backward(ctx, gY):
lambda_val = ctx.lambda_val
(bs, ch, h, w) = ctx.input_shape
gX = _C.swap_align2nat_backward(gY, lambda_val, bs, ch, h, w)
return (gX, None, None) |
def _close_handlers(logger: logging.Logger) -> None:
for handler in list(logger.handlers):
if isinstance(handler, (logging.FileHandler, logging.StreamHandler)):
if isinstance(handler, logging.FileHandler):
handler.close()
logger.removeHandler(handler) |
class EMAParametersFunc(torch.autograd.Function):
def forward(ctx: FunctionCtx, p: torch.Tensor, q: torch.Tensor, gamma: torch.Tensor, h: Optional[torch.Tensor], length: int) -> Tuple[(torch.Tensor, Optional[torch.Tensor])]:
with torch.no_grad():
log_q = q.log()
(weight, bias, vander) = mega2_ops.ema_parameters_fwd(p, log_q, gamma, h, length)
ctx.save_for_backward(p, log_q, gamma, h, vander)
return (weight, bias)
def backward(ctx: FunctionCtx, weight_grad: torch.Tensor, bias_grad: Optional[torch.Tensor]) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor])]:
(p, log_q, gamma, h, vander) = ctx.saved_tensors
(p_grad, q_grad, gamma_grad, h_grad) = mega2_ops.ema_parameters_bwd(weight_grad, bias_grad, p, log_q, gamma, h, vander)
return (p_grad, q_grad, gamma_grad, h_grad, None) |
class AttentionLayerBahdanauTest(AttentionLayerTest):
def _create_layer(self):
return AttentionLayerBahdanau(params={'num_units': self.attention_dim}, mode=tf.contrib.learn.ModeKeys.TRAIN)
def test_layer(self):
self._test_layer() |
_module()
class MobileNetV2(BaseBackbone):
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
def __init__(self, widen_factor=1.0, out_indices=(7,), frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'), norm_eval=False, with_cp=False):
norm_cfg = copy.deepcopy(norm_cfg)
act_cfg = copy.deepcopy(act_cfg)
super().__init__()
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if (index not in range(0, 8)):
raise ValueError(f'the item in out_indices must in range(0, 8). But received {index}')
if (frozen_stages not in range((- 1), 8)):
raise ValueError(f'frozen_stages must be in range(-1, 8). But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible((32 * widen_factor), 8)
self.conv1 = ConvModule(in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.layers = []
for (i, layer_cfg) in enumerate(self.arch_settings):
(expand_ratio, channel, num_blocks, stride) = layer_cfg
out_channels = make_divisible((channel * widen_factor), 8)
inverted_res_layer = self.make_layer(out_channels=out_channels, num_blocks=num_blocks, stride=stride, expand_ratio=expand_ratio)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if (widen_factor > 1.0):
self.out_channel = int((1280 * widen_factor))
else:
self.out_channel = 1280
layer = ConvModule(in_channels=self.in_channels, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
layers = []
for i in range(num_blocks):
if (i >= 1):
stride = 1
layers.append(InvertedResidual(self.in_channels, out_channels, stride, expand_ratio=expand_ratio, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
outs = []
for (i, layer_name) in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if (i in self.out_indices):
outs.append(x)
if (len(outs) == 1):
return outs[0]
return tuple(outs)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super().train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
def llama_copy_state_data(ctx: llama_context_p, dst) -> int:
return _lib.llama_copy_state_data(ctx, dst) |
class NpzDataset(object):
def __init__(self, name):
self.name = os.path.expanduser(name)
try:
os.mkdir(name)
except OSError:
pass
def write(self, example, i, r, image_type=None):
if (r > 0.0):
status = 'success'
else:
status = 'failure'
filename = ('example%06d.%s.npz' % (i, status))
filename = os.path.join(self.name, filename)
if (image_type is not None):
example['image_type'] = image_type
np.savez(filename, **example)
def load(self, success_only=False):
files = os.listdir(self.name)
files.sort()
data = {}
i = 0
for f in files:
if (not (f[0] == '.')):
i += 1
print(('%d:' % i), f)
if success_only:
name = f.split('.')
if (name[0] == 'failure'):
continue
fdata = np.load(os.path.join(self.name, f))
for (key, value) in fdata.items():
if (key not in data):
data[key] = value
if (value.shape[0] == 0):
continue
data[key] = np.concatenate([data[key], value], axis=0)
return data
def preprocess(self, train=0.6, val=0.2):
assert (((train + val) <= 1.0) and ((train + val) > 0.0))
test = ((1.0 - train) - val) |
class DefaultObservable(Observable):
def __init__(self):
self.observers = []
def register(self, observer: Observer):
if (observer not in self.observers):
self.observers.append(observer)
def deregister(self, observer: Observer):
if (observer in self.observers):
self.observers.remove(observer)
def deregister_all(self):
if self.observers:
del self.observers[:]
def notify_all(self, *args, **kwargs):
for observer in self.observers:
observer.update(*args, **kwargs) |
def mlp(t_in, widths, final_nonlinearity=False):
weights = []
prev_width = t_in.get_shape()[(- 1)]
prev_layer = t_in
for (i_layer, width) in enumerate(widths):
v_w = tf.get_variable(('w%d' % i_layer), shape=(prev_width, width), initializer=tf.uniform_unit_scaling_initializer(factor=RELU_SCALE))
v_b = tf.get_variable(('b%d' % i_layer), shape=(width,), initializer=tf.constant_initializer(0.0))
weights += [v_w, v_b]
t_layer = (batch_matmul(prev_layer, v_w) + v_b)
if (final_nonlinearity or (i_layer < (len(widths) - 1))):
t_layer = tf.nn.tanh(t_layer)
prev_layer = t_layer
prev_width = width
return (prev_layer, weights) |
def encrypt_with_AES_CBC(plain_text, secret_key, salt, key_len=128, block_size=16):
ct_bytes = encrypt_bytes_with_AES_CBC(plain_text.encode(), secret_key, salt, key_len, block_size)
return base64.b64encode(ct_bytes).decode() |
def _update_args(objs, obj_pos):
for (obj, pos) in zip(objs, obj_pos):
(_, arg, idx) = pos
arg[idx] = obj |
def run_fn_for_gptq(model, dataloader_for_calibration, *args):
logger.info('Collecting calibration inputs...')
for batch in tqdm(dataloader_for_calibration):
batch = move_input_to_device(batch, device=None)
try:
if (isinstance(batch, tuple) or isinstance(batch, list)):
model(batch[0])
elif isinstance(batch, dict):
model(**batch)
else:
model(batch)
except ValueError:
pass
return |
def parse_cmd_options(argv, opt=None):
parser = argparse.ArgumentParser()
parser.add_argument('--plainnet_struct', type=str, default=None, help='PlainNet structure string')
parser.add_argument('--plainnet_struct_txt', type=str, default=None, help='PlainNet structure file name')
parser.add_argument('--num_classes', type=int, default=None, help='how to prune')
(module_opt, _) = parser.parse_known_args(argv)
return module_opt |
_module()
class CyclicLrUpdaterHook(LrUpdaterHook):
def __init__(self, by_epoch=False, target_ratio=(10, 0.0001), cyclic_times=1, step_ratio_up=0.4, anneal_strategy='cos', gamma=1, **kwargs):
if isinstance(target_ratio, float):
target_ratio = (target_ratio, (target_ratio / 100000.0))
elif isinstance(target_ratio, tuple):
target_ratio = ((target_ratio[0], (target_ratio[0] / 100000.0)) if (len(target_ratio) == 1) else target_ratio)
else:
raise ValueError(f'target_ratio should be either float or tuple, got {type(target_ratio)}')
assert (len(target_ratio) == 2), '"target_ratio" must be list or tuple of two floats'
assert (0 <= step_ratio_up < 1.0), '"step_ratio_up" must be in range [0,1)'
assert (0 < gamma <= 1), '"gamma" must be in range (0, 1]'
self.target_ratio = target_ratio
self.cyclic_times = cyclic_times
self.step_ratio_up = step_ratio_up
self.gamma = gamma
self.max_iter_per_phase = None
self.lr_phases = []
if (anneal_strategy not in ['cos', 'linear']):
raise ValueError(f'anneal_strategy must be one of "cos" or "linear", instead got {anneal_strategy}')
elif (anneal_strategy == 'cos'):
self.anneal_func = annealing_cos
elif (anneal_strategy == 'linear'):
self.anneal_func = annealing_linear
assert (not by_epoch), 'currently only support "by_epoch" = False'
super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs)
def before_run(self, runner):
super(CyclicLrUpdaterHook, self).before_run(runner)
self.max_iter_per_phase = (runner.max_iters // self.cyclic_times)
iter_up_phase = int((self.step_ratio_up * self.max_iter_per_phase))
self.lr_phases.append([0, iter_up_phase, 1, self.target_ratio[0]])
self.lr_phases.append([iter_up_phase, self.max_iter_per_phase, self.target_ratio[0], self.target_ratio[1]])
def get_lr(self, runner, base_lr):
curr_iter = (runner.iter % self.max_iter_per_phase)
curr_cycle = (runner.iter // self.max_iter_per_phase)
scale = (self.gamma ** curr_cycle)
for (start_iter, end_iter, start_ratio, end_ratio) in self.lr_phases:
if (start_iter <= curr_iter < end_iter):
if (start_iter == 0):
end_ratio = ((1 - scale) + (end_ratio * scale))
else:
start_ratio = ((1 - scale) + (start_ratio * scale))
progress = (curr_iter - start_iter)
return self.anneal_func((base_lr * start_ratio), (base_lr * end_ratio), (progress / (end_iter - start_iter))) |
def bias_variable(shape, pos_initial_bias):
if pos_initial_bias:
values = tf.abs(tf.truncated_normal(shape, stddev=0.1))
else:
values = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(values) |
def digit_version(version_str: str, length: int=4):
version = parse(version_str)
assert version.release, f'failed to parse version {version_str}'
release = list(version.release)
release = release[:length]
if (len(release) < length):
release = (release + ([0] * (length - len(release))))
if version.is_prerelease:
mapping = {'a': (- 3), 'b': (- 2), 'rc': (- 1)}
val = (- 4)
if version.pre:
if (version.pre[0] not in mapping):
warnings.warn(f'unknown prerelease version {version.pre[0]}, version checking may go wrong')
else:
val = mapping[version.pre[0]]
release.extend([val, version.pre[(- 1)]])
else:
release.extend([val, 0])
elif version.is_postrelease:
release.extend([1, version.post])
else:
release.extend([0, 0])
return tuple(release) |
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def prune_heads(self, heads):
if (len(heads) == 0):
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view((- 1)).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = (self.self.num_attention_heads - len(heads))
self.self.all_head_size = (self.self.attention_head_size * self.self.num_attention_heads)
def forward(self, input_tensor, attention_mask, head_mask=None, history_state=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask, history_state)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = ((attention_output,) + self_outputs[1:])
return outputs |
class DeleteObjCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if (token is None):
print('Not logged in')
exit(1)
try:
self._api.delete_obj(token, filename=self.args.filename)
except HTTPError as e:
print(e)
exit(1)
print('Done') |
def get_mixins(kernel):
if isinstance(kernel, gp_models.GeneralizedProjectionKernel):
mixins = []
for k in kernel.kernel.kernels:
mixins.append(k.outputscale.item())
return mixins
elif isinstance(kernel, gpytorch.kernels.ScaleKernel):
return get_mixins(kernel.base_kernel)
else:
return None |
_module()
class PISASSDHead(SSDHead):
def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores]
assert (len(featmap_sizes) == self.prior_generator.num_levels)
device = cls_scores[0].device
(anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device)
cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False, return_sampling_results=True)
if (cls_reg_targets is None):
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
num_images = len(img_metas)
all_cls_scores = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, (- 1), self.cls_out_channels) for s in cls_scores], 1)
all_labels = torch.cat(labels_list, (- 1)).view(num_images, (- 1))
all_label_weights = torch.cat(label_weights_list, (- 1)).view(num_images, (- 1))
all_bbox_preds = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, (- 1), 4) for b in bbox_preds], (- 2))
all_bbox_targets = torch.cat(bbox_targets_list, (- 2)).view(num_images, (- 1), 4)
all_bbox_weights = torch.cat(bbox_weights_list, (- 2)).view(num_images, (- 1), 4)
all_anchors = []
for i in range(num_images):
all_anchors.append(torch.cat(anchor_list[i]))
isr_cfg = self.train_cfg.get('isr', None)
all_targets = (all_labels.view((- 1)), all_label_weights.view((- 1)), all_bbox_targets.view((- 1), 4), all_bbox_weights.view((- 1), 4))
if (isr_cfg is not None):
all_targets = isr_p(all_cls_scores.view((- 1), all_cls_scores.size((- 1))), all_bbox_preds.view((- 1), 4), all_targets, torch.cat(all_anchors), sampling_results_list, loss_cls=CrossEntropyLoss(), bbox_coder=self.bbox_coder, **self.train_cfg.isr, num_class=self.num_classes)
(new_labels, new_label_weights, new_bbox_targets, new_bbox_weights) = all_targets
all_labels = new_labels.view(all_labels.shape)
all_label_weights = new_label_weights.view(all_label_weights.shape)
all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape)
all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape)
carl_loss_cfg = self.train_cfg.get('carl', None)
if (carl_loss_cfg is not None):
loss_carl = carl_loss(all_cls_scores.view((- 1), all_cls_scores.size((- 1))), all_targets[0], all_bbox_preds.view((- 1), 4), all_targets[2], SmoothL1Loss(beta=1.0), **self.train_cfg.carl, avg_factor=num_total_pos, num_class=self.num_classes)
assert torch.isfinite(all_cls_scores).all().item(), 'classification scores become infinite or NaN!'
assert torch.isfinite(all_bbox_preds).all().item(), 'bbox predications become infinite or NaN!'
(losses_cls, losses_bbox) = multi_apply(self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos)
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
if (carl_loss_cfg is not None):
loss_dict.update(loss_carl)
return loss_dict |
def get_sys_writer_function(args):
def writer_fn(num_round, ids, metrics, groups, num_samples):
metrics_writer.print_metrics(num_round, ids, metrics, groups, num_samples, 'train', args.metrics_dir, '{}_{}'.format(args.metrics_name, 'sys'))
return writer_fn |
class rigid_SURREAL_for_Ours_full_permute(torch.utils.data.Dataset):
def __init__(self, args, file_name, transform=None, soft_label=False, show=False, pick_out=None, train=None, npoints=None, ratio_list=[0.02, 0.04, 0.06, 0.08, 0.1], gaussian_noise=False, partition='train', factor=4):
self.args = args
self.gaussian_noise = gaussian_noise
self.partition = partition
self.factor = factor
self.ratio_list = ratio_list
self.file_name = file_name
self.xyz2 = None
with h5py.File(self.file_name, 'r') as file:
self.len = len(file['xyz2'])
self.pair_len = (self.len // 2)
self.L = list(range(0, self.len))
self.epoch = None
def permuted_transfrom(self, xyz1, index, random_fix=True):
assert (len(xyz1.shape) == 2)
assert (xyz1.shape[1] == 3)
npoint = xyz1.shape[0]
I = np.eye(npoint)
p = I.copy()
while np.array_equal(p, I):
if (random_fix == True):
np.random.seed(index)
np.random.shuffle(p)
permuted_xyz1 = np.dot(p, xyz1)
label = p
return (label, permuted_xyz1)
def __getitem__(self, item):
if (self.xyz2 == None):
self.xyz2 = h5py.File(self.file_name, 'r')['xyz2']
pointcloud = np.array(self.xyz2[item])
(src, target, rotation_ab, translation_ab, rotation_ba, translation_ba, euler_ab, euler_ba, corr_matrix_label) = self.rigid_transform_to_make_pair(pointcloud, item)
corr_matrix_label = torch.from_numpy(corr_matrix_label).float()
return (corr_matrix_label, src, target, item)
def rigid_transform_to_make_pair(self, pointcloud, item):
if self.gaussian_noise:
pointcloud = jitter_pointcloud(pointcloud)
if (self.partition != 'train'):
np.random.seed(item)
anglex = ((np.random.uniform() * np.pi) / self.factor)
angley = ((np.random.uniform() * np.pi) / self.factor)
anglez = ((np.random.uniform() * np.pi) / self.factor)
cosx = np.cos(anglex)
cosy = np.cos(angley)
cosz = np.cos(anglez)
sinx = np.sin(anglex)
siny = np.sin(angley)
sinz = np.sin(anglez)
Rx = np.array([[1, 0, 0], [0, cosx, (- sinx)], [0, sinx, cosx]])
Ry = np.array([[cosy, 0, siny], [0, 1, 0], [(- siny), 0, cosy]])
Rz = np.array([[cosz, (- sinz), 0], [sinz, cosz, 0], [0, 0, 1]])
R_ab = Rx.dot(Ry).dot(Rz)
R_ba = R_ab.T
translation_ab = np.array([np.random.uniform((- 0.5), 0.5), np.random.uniform((- 0.5), 0.5), np.random.uniform((- 0.5), 0.5)])
translation_ba = (- R_ba.dot(translation_ab))
pointcloud1 = pointcloud.T
rotation_ab = Rotation.from_euler('zyx', [anglez, angley, anglex])
pointcloud2 = (rotation_ab.apply(pointcloud1.T).T + np.expand_dims(translation_ab, axis=1))
euler_ab = np.asarray([anglez, angley, anglex])
euler_ba = (- euler_ab[::(- 1)])
if (self.partition != 'train'):
np.random.seed(item)
pointcloud1 = np.random.permutation(pointcloud1.T)
np.random.seed(item)
pointcloud2 = np.random.permutation(pointcloud2.T).T
(corr_matrix_label, permuted_pointcloud) = self.permuted_transfrom(pointcloud1, item)
pointcloud1 = permuted_pointcloud.T
elif (self.partition == 'train'):
now = int(time.time())
np.random.seed(now)
pointcloud1 = np.random.permutation(pointcloud1.T)
np.random.seed(now)
pointcloud2 = np.random.permutation(pointcloud2.T).T
(corr_matrix_label, permuted_pointcloud) = self.permuted_transfrom(pointcloud1, now)
pointcloud1 = permuted_pointcloud.T
return (pointcloud1.astype('float32'), pointcloud2.astype('float32'), R_ab.astype('float32'), translation_ab.astype('float32'), R_ba.astype('float32'), translation_ba.astype('float32'), euler_ab.astype('float32'), euler_ba.astype('float32'), corr_matrix_label)
def __len__(self):
return self.len |
def test_isotropic_nfw_widrow_against_improved():
pot = potential.NFWPotential(amp=2.3, a=1.3)
dfp = isotropicNFWdf(pot=pot)
dfpw = isotropicNFWdf(pot=pot, widrow=True)
Es = numpy.linspace(((- dfp._Etildemax) * 0.999), 0, 101, endpoint=False)
assert numpy.all((numpy.fabs((1.0 - (dfp.fE(Es) / dfpw.fE(Es)))) < 0.01)), 'isotropic NFW with widrow=True does not agree on f(E) with widrow=False'
return None |
def wrap_main(main_fn):
world_size = torch.cuda.device_count()
def main(**args):
if ('RANK' in os.environ):
mp.set_start_method('spawn')
_torchrun_worker_fn(main_fn, args)
else:
os.environ['PYTHONUNBUFFERED'] = '1'
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(random.randint(1024, 65536))
mp.set_start_method('spawn')
if (world_size == 1):
_worker_fn(0, world_size, main_fn, args)
else:
mp.spawn(_worker_fn, (world_size, main_fn, args), nprocs=world_size, join=True)
return main |
def main():
cfg = load_config(FLAGS.config)
merge_config(FLAGS.opt)
check_config(cfg)
check_gpu(cfg.use_gpu)
check_version()
main_arch = cfg.architecture
dataset = cfg.TestReader['dataset']
test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)
dataset.set_images(test_images)
place = (fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace())
exe = fluid.Executor(place)
model = create(main_arch)
startup_prog = fluid.Program()
infer_prog = fluid.Program()
with fluid.program_guard(infer_prog, startup_prog):
with fluid.unique_name.guard():
inputs_def = cfg['TestReader']['inputs_def']
inputs_def['iterable'] = True
(feed_vars, loader) = model.build_inputs(**inputs_def)
test_fetches = model.test(feed_vars)
infer_prog = infer_prog.clone(True)
reader = create_reader(cfg.TestReader, devices_num=1)
loader.set_sample_list_generator(reader, place)
exe.run(startup_prog)
if cfg.weights:
checkpoint.load_params(exe, infer_prog, cfg.weights)
assert (cfg.metric in ['COCO', 'VOC', 'OID', 'WIDERFACE']), 'unknown metric type {}'.format(cfg.metric)
extra_keys = []
if (cfg['metric'] in ['COCO', 'OID']):
extra_keys = ['im_info', 'im_id', 'im_shape']
if ((cfg['metric'] == 'VOC') or (cfg['metric'] == 'WIDERFACE')):
extra_keys = ['im_id', 'im_shape']
(keys, values, _) = parse_fetches(test_fetches, infer_prog, extra_keys)
if (cfg.metric == 'COCO'):
from ppdet.utils.coco_eval import bbox2out, mask2out, get_category_info
if (cfg.metric == 'OID'):
from ppdet.utils.oid_eval import bbox2out, get_category_info
if (cfg.metric == 'VOC'):
from ppdet.utils.voc_eval import bbox2out, get_category_info
if (cfg.metric == 'WIDERFACE'):
from ppdet.utils.widerface_eval_utils import bbox2out, get_category_info
anno_file = dataset.get_anno()
with_background = dataset.with_background
use_default_label = dataset.use_default_label
(clsid2catid, catid2name) = get_category_info(anno_file, with_background, use_default_label)
is_bbox_normalized = False
if (hasattr(model, 'is_bbox_normalized') and callable(model.is_bbox_normalized)):
is_bbox_normalized = model.is_bbox_normalized()
if FLAGS.use_vdl:
from visualdl import LogWriter
vdl_writer = LogWriter(FLAGS.vdl_log_dir)
vdl_image_step = 0
vdl_image_frame = 0
imid2path = dataset.get_imid2path()
for (iter_id, data) in enumerate(loader()):
outs = exe.run(infer_prog, feed=data, fetch_list=values, return_numpy=False)
res = {k: (np.array(v), ([[v.shape()[0]]] if (v.shape()[1] == 6) else v.recursive_sequence_lengths())) for (k, v) in zip(keys, outs)}
logger.info('Infer iter {}'.format(iter_id))
bbox_results = None
mask_results = None
if ('bbox' in res):
bbox_results = bbox2out([res], clsid2catid, is_bbox_normalized)
if ('mask' in res):
mask_results = mask2out([res], clsid2catid, model.mask_head.resolution)
im_ids = res['im_id'][0]
for im_id in im_ids:
image_path = imid2path[int(im_id)]
image = Image.open(image_path).convert('RGB')
if FLAGS.use_vdl:
original_image_np = np.array(image)
vdl_writer.add_image('original/frame_{}'.format(vdl_image_frame), original_image_np, vdl_image_step)
image = visualize_results(image, int(im_id), catid2name, FLAGS.draw_threshold, bbox_results, mask_results)
if FLAGS.use_vdl:
infer_image_np = np.array(image)
vdl_writer.add_image('bbox/frame_{}'.format(vdl_image_frame), infer_image_np, vdl_image_step)
vdl_image_step += 1
if ((vdl_image_step % 10) == 0):
vdl_image_step = 0
vdl_image_frame += 1
save_name = get_save_image_name(FLAGS.output_dir, image_path)
logger.info('Detection bbox results save in {}'.format(save_name))
image.save(save_name, quality=95) |
class StateCacher(object):
def __init__(self, in_memory, cache_dir=None):
self.in_memory = in_memory
self.cache_dir = cache_dir
if (self.cache_dir is None):
import tempfile
self.cache_dir = tempfile.gettempdir()
elif (not os.path.isdir(self.cache_dir)):
raise ValueError('Given `cache_dir` is not a valid directory.')
self.cached = {}
def store(self, key, state_dict):
if self.in_memory:
self.cached.update({key: copy.deepcopy(state_dict)})
else:
fn = os.path.join(self.cache_dir, 'state_{}_{}.pt'.format(key, id(self)))
self.cached.update({key: fn})
torch.save(state_dict, fn)
def retrieve(self, key):
if (key not in self.cached):
raise KeyError('Target {} was not cached.'.format(key))
if self.in_memory:
return self.cached.get(key)
else:
fn = self.cached.get(key)
if (not os.path.exists(fn)):
raise RuntimeError('Failed to load state in {}. File does not exist anymore.'.format(fn))
state_dict = torch.load(fn, map_location=(lambda storage, location: storage))
return state_dict
def __del__(self):
if self.in_memory:
return
for k in self.cached:
if os.path.exists(self.cached[k]):
os.remove(self.cached[k]) |
def test_streamspraydf_setup_paramsAsQuantity():
from galpy.df import streamspraydf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion
(ro, vo) = (8.0, 220.0)
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
obs = Orbit([1., 0., (- 1.), 0., (- 0.), 0.])
Mass = ((2 * (10.0 ** 4.0)) * units.Msun)
tdisrupt = (4.5 * units.Gyr)
spdf_bovy14_nou = streamspraydf((Mass.to_value(units.Msun) / conversion.mass_in_msol(vo, ro)), progenitor=obs, pot=lp, tdisrupt=(tdisrupt.to_value(units.Gyr) / conversion.time_in_Gyr(vo, ro)))
spdf_bovy14 = streamspraydf(Mass, progenitor=obs, pot=lp, tdisrupt=tdisrupt, ro=ro, vo=vo)
numpy.random.seed(10)
sam = spdf_bovy14.sample(n=2)
numpy.random.seed(10)
sam_nou = spdf_bovy14_nou.sample(n=2)
assert numpy.all((numpy.fabs((sam.r(use_physical=False) - sam_nou.r(use_physical=False))) < 1e-08)), 'Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output'
assert numpy.all((numpy.fabs((sam.vr(use_physical=False) - sam_nou.vr(use_physical=False))) < 1e-08)), 'Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output'
return None |
def ether_lock_can_send(op, stack, trace, debug):
if (op in ['SUICIDE']):
global stop_search
MyGlobals.stop_search = True
return (True, True)
elif (MyGlobals.ETHER_LOCK_GOOD_IF_CAN_CALL and (op in ['CALL', 'CALLCODE', 'DELEGATECALL'])):
global stop_search
MyGlobals.stop_search = True
return (True, True)
else:
return (True, False) |
class testLosses(unittest.TestCase):
def setUp(self):
self.device = torch.device('cuda:1')
def testCase1(self):
max_disp = 5
start_disp = (- 2)
dilation = 2
(h, w) = (3, 4)
d = (((max_disp + dilation) - 1) // dilation)
variance = 2
gtDisp = ((torch.rand(1, 1, h, w) * max_disp) + start_disp)
gtDisp = gtDisp.to(self.device)
cfg = Config(dict(data=dict(sparse=False), model=dict(losses=dict(focal_loss=dict(max_disp=max_disp, start_disp=start_disp, dilation=dilation, weight=1.0, weights=1.0, coefficient=5.0)))))
estCost = torch.ones(1, d, h, w).to(self.device)
estCost.requires_grad = True
print('\n \n Test Case 1:')
print(('*' * 60))
print('Estimated Cost volume:')
print(estCost)
stereo_focal_loss_evaluator = make_focal_loss_evaluator(cfg)
print(stereo_focal_loss_evaluator)
print(('*' * 60))
print(stereo_focal_loss_evaluator(estCost=estCost, gtDisp=gtDisp, variance=variance, disp_sample=None))
def testCase2(self):
max_disp = 5
start_disp = (- 2)
variance = 2
(h, w) = (3, 4)
disp_sample = torch.Tensor([(- 2), 0, 2]).repeat(1, h, w, 1).permute(0, 3, 1, 2).contiguous()
d = disp_sample.shape[1]
gtDisp = ((torch.rand(1, 1, h, w) * max_disp) + start_disp)
gtDisp = gtDisp.to(self.device)
gtDisp.requires_grad = True
cfg = Config(dict(data=dict(sparse=False), model=dict(losses=dict(focal_loss=dict(max_disp=max_disp, start_disp=start_disp, weight=1.0, weights=1.0, coefficient=5.0)))))
print('\n \n Test Case 2:')
print(('*' * 60))
print('Ground Truth Disparity:')
print(gtDisp)
estCost = torch.ones(1, d, h, w).to(self.device)
estCost.requires_grad = True
print(('*' * 60))
print('Estimated Cost volume:')
print(estCost)
stereo_focal_loss_evaluator = make_focal_loss_evaluator(cfg)
print(stereo_focal_loss_evaluator)
print(('*' * 60))
print(stereo_focal_loss_evaluator(estCost=estCost, gtDisp=gtDisp, variance=variance, disp_sample=disp_sample)) |
class RealmTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = RealmTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if (token in self.basic_tokenizer.never_split):
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def batch_encode_candidates(self, text, **kwargs):
kwargs['padding'] = PaddingStrategy.MAX_LENGTH
batch_text = text
batch_text_pair = kwargs.pop('text_pair', None)
return_tensors = kwargs.pop('return_tensors', None)
output_data = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []}
for (idx, candidate_text) in enumerate(batch_text):
if (batch_text_pair is not None):
candidate_text_pair = batch_text_pair[idx]
else:
candidate_text_pair = None
encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs)
encoded_input_ids = encoded_candidates.get('input_ids')
encoded_attention_mask = encoded_candidates.get('attention_mask')
encoded_token_type_ids = encoded_candidates.get('token_type_ids')
if (encoded_input_ids is not None):
output_data['input_ids'].append(encoded_input_ids)
if (encoded_attention_mask is not None):
output_data['attention_mask'].append(encoded_attention_mask)
if (encoded_token_type_ids is not None):
output_data['token_type_ids'].append(encoded_token_type_ids)
output_data = dict(((key, item) for (key, item) in output_data.items() if (len(item) != 0)))
return BatchEncoding(output_data, tensor_type=return_tensors)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
else:
vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,) |
class SGD(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (momentum < 0.0):
raise ValueError('Invalid momentum value: {}'.format(momentum))
if (weight_decay < 0.0):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov)
if (nesterov and ((momentum <= 0) or (dampening != 0))):
raise ValueError('Nesterov momentum requires a momentum and zero dampening')
super(SGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def clear_states(self):
self.state = defaultdict(dict)
_grad()
def step(self, params, grads, closure=None, inner_step=None):
param_groups = list(params)
if (len(param_groups) == 0):
raise ValueError('optimizer got an empty parameter list')
self.param_groups[0]['params'] = param_groups
update_values = []
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for (idx, (p, grad)) in enumerate(zip(group['params'], grads)):
if (grad is None):
continue
d_p = grad
if (weight_decay != 0):
d_p = d_p.add(p, alpha=weight_decay)
if (momentum != 0):
param_state = self.state[idx]
if ('momentum_buffer' not in param_state):
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=(1 - dampening))
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
update_values.append(((- group['lr']) * d_p))
return (loss, update_values) |
def checkout_commit(repo, commit_id):
current_head = (repo.head.commit if repo.head.is_detached else repo.head.ref)
try:
repo.git.checkout(commit_id)
(yield)
finally:
repo.git.checkout(current_head) |
def load_gguf_baichuan(loader: GGUFFileLoader, dtype: torch.dtype=torch.float):
config = loader.config
baichuan_config = BaiChuanConfig(vocab_size=len(config['tokenizer.ggml.tokens']), hidden_size=config['baichuan.embedding_length'], intermediate_size=config['baichuan.feed_forward_length'], num_hidden_layers=config['baichuan.block_count'], num_attention_heads=config['baichuan.attention.head_count'], num_key_value_heads=config['baichuan.attention.head_count_kv'], hidden_act='silu', max_position_embeddings=config['baichuan.context_length'], rms_norm_eps=config['baichuan.attention.layer_norm_rms_epsilon'], use_cache=True, pad_token_id=None, bos_token_id=config['tokenizer.ggml.bos_token_id'], eos_token_id=config['tokenizer.ggml.eos_token_id'], pretraining_tp=1)
ckpt = loader.tensors(dtype)
n_head = config['baichuan.attention.head_count']
n_head_kv = config['baichuan.attention.head_count_kv']
ckpt = restore_baichuan_weight(ckpt, n_head, n_head_kv)
state_dict = {}
state_dict['model.embed_tokens.weight'] = ckpt['token_embd.weight']
state_dict['model.norm.weight'] = ckpt['output_norm.weight']
state_dict['lm_head.weight'] = ckpt['output.weight']
for i in range(config['baichuan.block_count']):
a = ckpt[f'blk.{i}.attn_q.weight']
b = ckpt[f'blk.{i}.attn_k.weight']
c = ckpt[f'blk.{i}.attn_v.weight']
d = torch.cat([a, b, c], dim=0)
state_dict[f'model.layers.{i}.self_attn.W_pack.weight'] = d
state_dict[f'model.layers.{i}.self_attn.o_proj.weight'] = ckpt[f'blk.{i}.attn_output.weight']
state_dict[f'model.layers.{i}.mlp.gate_proj.weight'] = ckpt[f'blk.{i}.ffn_gate.weight']
state_dict[f'model.layers.{i}.mlp.up_proj.weight'] = ckpt[f'blk.{i}.ffn_up.weight']
state_dict[f'model.layers.{i}.mlp.down_proj.weight'] = ckpt[f'blk.{i}.ffn_down.weight']
state_dict[f'model.layers.{i}.input_layernorm.weight'] = ckpt[f'blk.{i}.attn_norm.weight']
state_dict[f'model.layers.{i}.post_attention_layernorm.weight'] = ckpt[f'blk.{i}.ffn_norm.weight']
with init_empty_weights():
model = BaiChuanForCausalLM(baichuan_config)
for (name, weight) in state_dict.items():
set_module_tensor_to_device(model, name, 'cpu', weight, dtype=dtype)
model = model.cpu()
from transformers.convert_slow_tokenizer import import_protobuf
spm_pb2 = import_protobuf('Failed to import protobuf')
pieces = loader.tokenizer_pieces()
trainer_spec = spm_pb2.TrainerSpec(byte_fallback=True, model_type=spm_pb2.TrainerSpec.ModelType.BPE)
proto = spm_pb2.ModelProto(pieces=pieces, trainer_spec=trainer_spec)
proto = proto.SerializeToString()
with NamedTemporaryFile(delete=False) as f:
f.write(proto)
f.close()
tokenizer = BaiChuanTokenizer(f.name)
os.remove(f.name)
return (model, tokenizer) |
class TestPadding():
.parametrize('mode', ['silence', 'wrap', 'reflect'])
.parametrize('pad_section', ['start', 'end'])
def test_padding_mono_1d(self, mode, pad_section):
random.seed(546)
samples = np.array([0.5, 0.6, (- 0.2), 1.0], dtype=np.float32)
sample_rate = 16000
input_shape = samples.shape
augmenter = Padding(mode=mode, pad_section=pad_section, p=1.0)
samples = augmenter(samples=samples, sample_rate=sample_rate)
assert (samples.dtype == np.float32)
assert (samples.shape == input_shape)
def test_padding_mono_2d(self):
samples = np.array([[0.9, 0.5, (- 0.25), (- 0.125), 0.0]], dtype=np.float32)
sample_rate = 16000
input_shape = samples.shape
augmenter = Padding(p=1.0)
samples = augmenter(samples=samples, sample_rate=sample_rate)
assert (samples.dtype == np.float32)
assert (samples.shape == input_shape)
.parametrize('mode', ['silence', 'wrap', 'reflect'])
.parametrize('pad_section', ['start', 'end'])
def test_padding_multichannel(self, mode, pad_section):
samples = np.array([[0.9, 0.5, (- 0.25), (- 0.125), 0.0], [0.95, 0.5, (- 0.25), (- 0.125), 0.0], [0.95, 0.5, (- 0.25), (- 0.125), 0.0]], dtype=np.float32)
sample_rate = 16000
input_shape = samples.shape
augmenter = Padding(mode=mode, pad_section=pad_section, p=1.0)
samples = augmenter(samples=samples, sample_rate=sample_rate)
assert (samples.dtype == np.float32)
assert (samples.shape == input_shape)
def test_padding_reflect_start(self):
samples = np.array([0.5, 0.6, 0.9, (- 0.2), 1.0], dtype=np.float32)
sample_rate = 16000
augmenter = Padding(mode='reflect', pad_section='start', min_fraction=0.4, max_fraction=0.4, p=1.0)
samples = augmenter(samples=samples, sample_rate=sample_rate)
assert array_equal(samples, np.array([1.0, (- 0.2), 0.9, (- 0.2), 1.0], dtype=np.float32))
def test_padding_reflect_end(self):
samples = np.array([0.5, 0.6, 0.9, (- 0.2), 1.0], dtype=np.float32)
sample_rate = 16000
augmenter = Padding(mode='reflect', pad_section='end', min_fraction=0.4, max_fraction=0.4, p=1.0)
samples = augmenter(samples=samples, sample_rate=sample_rate)
assert array_equal(samples, np.array([0.5, 0.6, 0.9, 0.6, 0.5], dtype=np.float32))
def test_pad_nothing(self):
samples = np.array([0.5, 0.6, (- 0.2), 0.1], dtype=np.float32)
sample_rate = 16000
input_shape = samples.shape
augmenter = Padding(min_fraction=0.0, max_fraction=0.0, p=1.0)
samples = augmenter(samples=samples, sample_rate=sample_rate)
assert array_equal(samples, np.array([0.5, 0.6, (- 0.2), 0.1], dtype=np.float32))
assert (samples.dtype == np.float32)
assert (samples.shape == input_shape)
def test_pad_everything(self):
samples = np.array([0.5, 0.6, (- 0.2), 0.7], dtype=np.float32)
sample_rate = 16000
input_shape = samples.shape
augmenter = Padding(min_fraction=1.0, max_fraction=1.0, p=1.0)
samples = augmenter(samples=samples, sample_rate=sample_rate)
assert (not np.any(samples))
assert (samples.dtype == np.float32)
assert (samples.shape == input_shape) |
class ProcessModelAction(nn.Module):
def __init__(self, num_ensemble, dim_x, dim_a):
super(ProcessModelAction, self).__init__()
self.num_ensemble = num_ensemble
self.dim_x = dim_x
self.dim_a = dim_a
self.bayes1 = LinearFlipout(in_features=self.dim_x, out_features=64)
self.bayes2 = LinearFlipout(in_features=64, out_features=128)
self.bayes3 = LinearFlipout(in_features=128, out_features=64)
self.bayes_a1 = LinearFlipout(in_features=self.dim_a, out_features=64)
self.bayes_a2 = LinearFlipout(in_features=64, out_features=128)
self.bayes_a3 = LinearFlipout(in_features=128, out_features=64)
self.bayes4 = LinearFlipout(in_features=128, out_features=64)
self.bayes5 = LinearFlipout(in_features=64, out_features=self.dim_x)
def forward(self, last_state, action):
batch_size = last_state.shape[0]
last_state = rearrange(last_state, 'bs k dim -> (bs k) dim', bs=batch_size, k=self.num_ensemble)
action = rearrange(action, 'bs k dim -> (bs k) dim', bs=batch_size, k=self.num_ensemble)
(x, _) = self.bayes1(last_state)
x = F.relu(x)
(x, _) = self.bayes2(x)
x = F.relu(x)
(x, _) = self.bayes3(x)
x = F.relu(x)
(y, _) = self.bayes_a1(action)
y = F.relu(y)
(y, _) = self.bayes_a2(y)
y = F.relu(y)
(y, _) = self.bayes_a3(y)
y = F.relu(y)
merge = torch.cat((x, y), axis=1)
(merge, _) = self.bayes4(merge)
(update, _) = self.bayes5(merge)
state = (last_state + update)
state = rearrange(state, '(bs k) dim -> bs k dim', bs=batch_size, k=self.num_ensemble)
return state |
def match_ion_state(line, all_lines):
matches = match_ion_state_all(line, all_lines)
N_matches = len(matches)
if (N_matches == 0):
msg = 'No matches found!'
line_match = None
elif (N_matches == 1):
line_match = matches[0]
msg = ('Found 1 match: %s' % line_match.tag)
else:
line_strength = [(ll.l0 * ll.f) for ll in matches]
idx = np.argmax(line_strength)
line_match = matches[idx]
msg = ('Found %i matches. Strongest line: %s' % (N_matches, line_match.tag))
return (line_match, msg) |
class NATSpeechToTextDatasetCreator(SpeechToTextDatasetCreator):
DEFAULT_TGT_TEXT = ''
def _from_list(cls, split_name: str, is_train_split, samples: List[Dict], cfg: S2TDataConfig, tgt_dict, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, multitask: Optional[Dict]=None) -> NATSpeechToTextDataset:
audio_root = Path(cfg.audio_root)
ids = [s[cls.KEY_ID] for s in samples]
audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples]
n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples]
tgt_texts = [s.get(cls.KEY_TGT_TEXT, cls.DEFAULT_TGT_TEXT) for s in samples]
src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples]
speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples]
src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples]
tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples]
has_multitask = ((multitask is not None) and (len(multitask.keys()) > 0))
dataset_cls = (NATSpeechToTextMultitaskDataset if has_multitask else NATSpeechToTextDataset)
ds = dataset_cls(split=split_name, is_train_split=is_train_split, cfg=cfg, audio_paths=audio_paths, n_frames=n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, n_frames_per_step=n_frames_per_step, speaker_to_id=speaker_to_id)
if has_multitask:
for (task_name, task_obj) in multitask.items():
task_data = NATTextTargetMultitaskData(task_obj.args, split_name, task_obj.target_dictionary)
ds.add_multitask_dataset(task_name, task_data)
return ds
def _from_tsv(cls, root: str, cfg: S2TDataConfig, split: str, tgt_dict, is_train_split: bool, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, multitask: Optional[Dict]=None) -> NATSpeechToTextDataset:
samples = cls._load_samples_from_tsv(root, split)
return cls._from_list(split, is_train_split, samples, cfg, tgt_dict, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, multitask)
def from_tsv(cls, root: str, cfg: S2TDataConfig, splits: str, tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split: bool, epoch: int, seed: int, n_frames_per_step: int=1, speaker_to_id=None, multitask: Optional[Dict]=None) -> NATSpeechToTextDataset:
datasets = [cls._from_tsv(root=root, cfg=cfg, split=split, tgt_dict=tgt_dict, is_train_split=is_train_split, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, n_frames_per_step=n_frames_per_step, speaker_to_id=speaker_to_id, multitask=multitask) for split in splits.split(',')]
if (is_train_split and (len(datasets) > 1) and (cfg.sampling_alpha != 1.0)):
size_ratios = cls.get_size_ratios(datasets, alpha=cfg.sampling_alpha)
datasets = [ResamplingDataset(d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0)) for (r, d) in zip(size_ratios, datasets)]
return (ConcatDataset(datasets) if (len(datasets) > 1) else datasets[0]) |
.unused
def vflip(img):
if (not _is_pil_image(img)):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_TOP_BOTTOM) |
(version='2.0')
class Distillation(Component):
def __init__(self, conf_fname_or_obj=None):
super(Distillation, self).__init__()
if isinstance(conf_fname_or_obj, DistillationConf):
self.conf = conf_fname_or_obj
elif isinstance(conf_fname_or_obj, Config):
self.conf = DistillationConf()
self.conf.map_pyconfig_to_cfg(conf_fname_or_obj)
else:
self.conf = DistillationConf(conf_fname_or_obj)
self._init_with_conf()
self._teacher_model = None
self._criterion = None
self._optimizer = None
self._epoch_ran = 0
self.eval_frequency = 1
self.best_score = 0
self.best_model = None
self._train_cfg = None
def _on_train_begin(self, dataloader=None):
assert self._model, 'student_model must be set.'
if (self._eval_func is not None):
if self.teacher_model:
score = self._eval_func((self.teacher_model if getattr(self._eval_func, 'builtin', None) else self.teacher_model.model))
logger.info('teacher model score is {}.'.format(str(score)))
score = self._eval_func((self._model if getattr(self._eval_func, 'builtin', None) else self._model.model))
logger.info('initial model score is {}.'.format(str(score)))
if (self.eval_frequency > 0):
self.best_score = score
if (self.framework == 'pytorch'):
self.best_model = copy.deepcopy(self._model)
else:
self.best_model = self._model
def _on_step_begin(self, batch_id):
if ((self.criterion is not None) and hasattr(self.criterion, 'clear_features')):
self.criterion.clear_features()
def _on_after_compute_loss(self, input, student_output, student_loss, teacher_output=None):
if (self.criterion is None):
self.create_criterion()
assert self.criterion, 'criterion must be set in yaml config file.'
if (teacher_output is None):
assert self.teacher_model, 'teacher_model must be set.'
teacher_output = self.criterion.teacher_model_forward(input, teacher_model=self.teacher_model._model)
return self.criterion.loss_cal_sloss(student_output, teacher_output, student_loss)
def on_post_forward(self, input, teacher_output=None):
assert False, 'This method is deprecated. please use `on_after_compute_loss` instead.on_after_compute_loss(input, student_output, student_loss, teacher_output=None)'
def _on_epoch_end(self):
self._epoch_ran += 1
if ((self._eval_func is not None) and (self.eval_frequency > 0) and ((self._epoch_ran % self.eval_frequency) == 0)):
score = self._eval_func((self._model if getattr(self._eval_func, 'builtin', None) else self._model.model))
logger.info('model score of epoch {} is {}.'.format(self._epoch_ran, str(score)))
if ((isinstance(score, list) and all([(s > b_s) for (s, b_s) in zip(score, self.best_score)])) or (score > self.best_score)):
self.best_score = score
if (self.framework == 'pytorch'):
self.best_model = copy.deepcopy(self._model)
else:
self.best_model = self._model
def init_train_cfg(self):
if (self._train_cfg is None):
self._train_cfg = self.cfg.distillation.train
assert self._train_cfg, 'train field of distillation section in yaml file must be configured for distillation if train_func is NOT set.'
def create_criterion(self):
self.init_train_cfg()
if (self.criterion is None):
assert ('criterion' in self._train_cfg.keys()), 'criterion part in train field of distillation section in yaml file must be configured for distillation if criterion is NOT set.'
if isinstance(self._train_cfg.criterion, DotDict):
criterion_cfg = self._train_cfg.criterion
else:
criterion_cfg = self._train_cfg.criterion.config
assert (len(criterion_cfg) == 1), 'There must be exactly one loss in criterion part, instead got {} loss.'.format(len(criterion_cfg))
loss = [i for i in criterion_cfg.keys()][0]
loss_cfg = criterion_cfg[loss]
criterion_builder = Criterions(self.framework)[loss](loss_cfg)
criterion_tuple = criterion_builder()
if (self.teacher_model and self.student_model):
if (self.framework == 'tensorflow'):
teacher_model = self.teacher_model._model
student_model = self.student_model._model
else:
teacher_model = self.teacher_model.model
student_model = self.student_model.model
criterion_tuple[1]['student_model'] = student_model
criterion_tuple[1]['teacher_model'] = teacher_model
self.criterion = criterion_tuple[0](**criterion_tuple[1])
else:
logger.warning('Use user defined criterion, ignoring the criterion setting in yaml file.')
self._train_cfg.criterion = self.criterion
def create_optimizer(self):
self.init_train_cfg()
if (self.optimizer is None):
assert ('optimizer' in self._train_cfg.keys()), 'optimizer part in train field of distillation section in yaml file must be configured for distillation if optimizer is NOT set.'
optimizer_cfg = self._train_cfg.optimizer
assert (len(optimizer_cfg) == 1), 'There must be exactly one optimizer in optimizer part, instead got {} optimizer.'.format(len(optimizer_cfg))
optimizer_name = list(optimizer_cfg.keys())[0]
optimizer_cfg_ = optimizer_cfg[optimizer_name]
optimizer_builder = Optimizers(self.framework)[optimizer_name](optimizer_cfg_)
optimizer_tuple = optimizer_builder()
if (self.framework == 'tensorflow'):
self.optimizer = optimizer_tuple[0](**optimizer_tuple[1])
elif (self.framework == 'pytorch'):
self.optimizer = optimizer_tuple[0](self.model.model.parameters(), **optimizer_tuple[1])
else:
logger.warning('Use user defined optimizer, ignoring the optimizer setting in yaml file.')
self._train_cfg.optimizer = self.optimizer
def prepare(self):
self.generate_hooks()
self.create_criterion()
def pre_process(self):
framework_specific_info = {'device': self.cfg.device, 'random_seed': self.cfg.tuning.random_seed, 'workspace_path': self.cfg.tuning.workspace.path, 'q_dataloader': None, 'format': 'default', 'backend': 'default'}
if (self.framework == 'tensorflow'):
framework_specific_info.update({'inputs': self.cfg.model.inputs, 'outputs': self.cfg.model.outputs})
self.adaptor = FRAMEWORKS[self.framework](framework_specific_info)
self.generate_hooks()
assert isinstance(self._model, BaseModel), 'need set neural_compressor Model for distillation....'
if ((self._train_dataloader is None) and (self._train_func is None) and (self.cfg.distillation.train.dataloader is not None)):
train_dataloader_cfg = self.cfg.distillation.train.dataloader
self._train_dataloader = create_dataloader(self.framework, train_dataloader_cfg)
if (self.cfg.evaluation and self.cfg.evaluation.accuracy and self.cfg.evaluation.accuracy.dataloader and (self._eval_dataloader is None) and (self._eval_func is None)):
eval_dataloader_cfg = self.cfg.evaluation.accuracy.dataloader
assert (eval_dataloader_cfg is not None), 'dataloader field of evaluation in yaml file should be configured as eval_dataloader property is NOT set!'
self._eval_dataloader = create_dataloader(self.framework, eval_dataloader_cfg)
if (self._train_func is None):
if (self.criterion is None):
self.create_criterion()
self.create_optimizer()
if (self._train_dataloader is not None):
self._train_func = create_train_func(self.framework, self.train_dataloader, self.adaptor, self._train_cfg, hooks=self.hooks)
if (self.cfg.evaluation and self.eval_dataloader and (self._eval_func is None)):
eval_cfg = self.cfg.evaluation
assert eval_cfg, 'eval field of distillation section in yaml file must be configured for distillation if eval_func is NOT set.'
self._eval_func = create_eval_func(self.framework, self.eval_dataloader, self.adaptor, eval_cfg.accuracy.metric, eval_cfg.accuracy.postprocess, fp32_baseline=False)
def execute(self):
self._train_func((self._model if getattr(self._train_func, 'builtin', None) else self._model.model))
if ((self.criterion is not None) and hasattr(self.criterion, 'remove_all_hooks')):
self.criterion.remove_all_hooks()
logger.info('Model distillation is done.')
if (self._eval_func is not None):
logger.info('Start to evaluate the distilled model.')
self._model = (self.best_model if self.best_model else self._model)
score = self._eval_func((self._model if getattr(self._eval_func, 'builtin', None) else self._model.model))
logger.info('distilled model score is {}.'.format(str(score)))
return self._model
def generate_hooks(self):
self.register_hook('on_train_begin', self._on_train_begin)
self.register_hook('on_step_begin', self._on_step_begin)
self.register_hook('on_after_compute_loss', self._on_after_compute_loss)
self.register_hook('on_epoch_end', self._on_epoch_end)
def __call__(self):
return super(Distillation, self).__call__()
fit = __call__
def criterion(self):
return self._criterion
def criterion(self, user_criterion):
self._criterion = user_criterion
def optimizer(self):
return self._optimizer
def optimizer(self, user_optimizer):
self._optimizer = user_optimizer
def teacher_model(self):
return self._teacher_model
_model.setter
def teacher_model(self, user_model):
if (not isinstance(user_model, BaseModel)):
logger.warning('Force convert framework model to neural_compressor model.')
self._teacher_model = Model(user_model)
else:
self._teacher_model = user_model
def student_model(self):
return self._model
_model.setter
def student_model(self, user_model):
if (not isinstance(user_model, BaseModel)):
logger.warning('Force convert framework model to neural_compressor model.')
self._model = Model(user_model)
else:
self._model = user_model
def train_cfg(self):
return self._train_cfg
def evaluation_distributed(self):
return self._evaluation_distributed
_distributed.setter
def evaluation_distributed(self, distributed):
self._evaluation_distributed = distributed
def train_distributed(self):
return self._train_distributed
_distributed.setter
def train_distributed(self, distributed):
self._train_distributed = distributed
def __repr__(self):
return 'Distillation' |
class Triples():
def __init__(self, data_dir='../code/triples'):
self.data = self.load_data(data_dir)
(self.entities, self.entity2id) = self.get_entities(self.data)
(self.attributes, self.attribute2id) = self.get_attributes(self.data)
(self.relations, self.relation2id) = self.get_relations(self.data)
self.triples = self.read_triple(self.data, self.entity2id, self.relation2id)
self.h2rt = self.h2rt(self.triples)
self.t2rh = self.t2rh(self.triples)
def load_data(self, data_dir):
with open(('%s.txt' % data_dir), 'r') as f:
data = f.read().strip().split('\n')
data = [i.split() for i in data]
return data
def get_relations(self, data):
relations = sorted(list(set([d[1] for d in data])))
relationid = [i for i in range(len(relations))]
relation2id = dict(zip(relations, relationid))
return (relations, relation2id)
def get_entities(self, data):
entities = sorted(list(set(([d[0] for d in data] + [d[2] for d in data]))))
entityid = [i for i in range(len(entities))]
entity2id = dict(zip(entities, entityid))
return (entities, entity2id)
def get_attributes(self, data):
attributes = sorted(list(set([d[0] for d in data])))
attributeid = [i for i in range(len(attributes))]
attribute2id = dict(zip(attributes, attributeid))
return (attributes, attribute2id)
def read_triple(self, data, entity2id, relation2id):
triples = []
for triple in data:
h = triple[0]
r = triple[1]
t = triple[2]
triples.append((entity2id[h], relation2id[r], entity2id[t]))
return triples
def h2rt(self, triples):
h2rt = ddict(list)
for tri in triples:
(h, r, t) = tri
h2rt[h].append((r, t))
return h2rt
def t2rh(self, triples):
t2rh = ddict(list)
for tri in triples:
(h, r, t) = tri
t2rh[t].append((r, h))
return t2rh |
class ShapeEncoderPC(nn.Module):
def __init__(self, feature_dim=1024):
super(ShapeEncoderPC, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, feature_dim, 1)
self.bn1 = torch.nn.BatchNorm1d(64)
self.bn2 = torch.nn.BatchNorm1d(128)
self.bn3 = torch.nn.BatchNorm1d(feature_dim)
self.feature_dim = feature_dim
def forward(self, shapes):
x = F.relu(self.bn1(self.conv1(shapes)))
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
(x, _) = torch.max(x, 2)
x = x.view((- 1), self.feature_dim)
return x |
def conv3x3(in_channels, out_channels, groups=1, stride=1):
return nn.Conv2d(in_channels, out_channels, kernel_size=3, groups=groups, stride=stride, padding=1) |
def main(args):
files = [f for f in os.listdir(args.mmcif_dir) if ('.cif' in f)]
fn = partial(parse_file, args=args)
data = {}
with Pool(processes=args.no_workers) as p:
with tqdm(total=len(files)) as pbar:
for d in p.imap_unordered(fn, files, chunksize=args.chunksize):
data.update(d)
pbar.update()
with open(args.output_path, 'w') as fp:
fp.write(json.dumps(data, indent=4)) |
def order_sequence(tokens, start, end, variables):
cpos = (start + 1)
chunks = [(start, start)]
in_quote = False
while (cpos < end):
for part in tokens[cpos].split('"'):
in_quote = (not in_quote)
in_quote = (not in_quote)
sub = subquery_range(None, cpos, tokens, in_quote)
if (sub is None):
chunks.append((cpos, cpos))
cpos += 1
else:
chunks.append((cpos, (sub[1] - 1)))
order_sequence(tokens, sub[0], (sub[1] - 1), variables)
cpos = sub[1]
cur_chunk = 0
while (cur_chunk < len(chunks)):
next_select = get_matching_chunk(tokens, chunks, cur_chunk, 'SELECT')
if (next_select is None):
break
next_distinct = get_matching_chunk(tokens, chunks, next_select, 'DISTINCT')
next_all = get_matching_chunk(tokens, chunks, next_select, 'ALL')
if ((next_distinct == (next_select + 1)) or (next_all == (next_select + 1))):
next_select += 1
next_from = get_matching_chunk(tokens, chunks, next_select, 'FROM', len(chunks))
sort_chunk_list((next_select + 1), next_from, chunks, tokens)
cur_chunk = next_from
for symbol in ['=', '!=']:
cur_chunk = 0
while (cur_chunk < len(chunks)):
next_equals = get_matching_chunk(tokens, chunks, cur_chunk, symbol)
if (next_equals is None):
break
left = tokens_for_chunk(tokens, chunks[(next_equals - 1)])
right = tokens_for_chunk(tokens, chunks[(next_equals + 1)])
left_text = ' '.join(left)
right_text = ' '.join(right)
swap = ((left_text < right_text) or (left_text in variables) or (left_text[0] in string.digits) or (left_text[0] in ['"', "'", '(']) or (' ' in left_text) or ('.' not in left_text))
if ((right_text in variables) or (right_text[0] in string.digits) or (right_text[0] in ['"', "'", '(']) or (' ' in right_text) or ('.' not in right_text)):
swap = False
if swap:
cpos = chunks[(next_equals - 1)][0]
for token in right:
tokens[cpos] = token
cpos += 1
tokens[cpos] = symbol
cpos += 1
for token in left:
tokens[cpos] = token
cpos += 1
cur_chunk = (next_equals + 2)
cur_chunk = 0
while (cur_chunk < len(chunks)):
next_from = get_matching_chunk(tokens, chunks, cur_chunk, 'FROM')
if (next_from is None):
break
next_item = min(get_matching_chunk(tokens, chunks, next_from, 'WHERE', len(chunks)), get_matching_chunk(tokens, chunks, next_from, 'JOIN', len(chunks)), get_matching_chunk(tokens, chunks, next_from, 'GROUP', len(chunks)), get_matching_chunk(tokens, chunks, next_from, 'HAVING', len(chunks)), get_matching_chunk(tokens, chunks, next_from, 'LIMIT', len(chunks)), get_matching_chunk(tokens, chunks, next_from, 'ORDER', len(chunks)), get_matching_chunk(tokens, chunks, next_from, ';', len(chunks)))
sort_chunk_list((next_from + 1), next_item, chunks, tokens)
cur_chunk = next_item
cur_chunk = 0
while (cur_chunk < len(chunks)):
next_where = get_matching_chunk(tokens, chunks, cur_chunk, 'WHERE')
if (next_where is None):
if ((tokens[chunks[0][0]] != 'SELECT') and (tokens[chunks[1][1]] != 'SELECT')):
next_where = 0
else:
break
next_item = min(get_matching_chunk(tokens, chunks, next_where, 'GROUP', len(chunks)), get_matching_chunk(tokens, chunks, next_where, 'HAVING', len(chunks)), get_matching_chunk(tokens, chunks, next_where, 'LIMIT', len(chunks)), get_matching_chunk(tokens, chunks, next_where, 'ORDER', len(chunks)), get_matching_chunk(tokens, chunks, next_where, ';', len(chunks)))
has_and = False
has_or = False
saw_between = False
for v in range((next_where + 1), next_item):
chunk = chunks[v]
if (tokens[chunk[0]] == 'BETWEEN'):
saw_between = True
if ((chunk[0] == chunk[1]) and (tokens[chunk[0]] == 'AND')):
if (not saw_between):
has_and = True
else:
saw_between = False
if ((chunk[0] == chunk[1]) and (tokens[chunk[0]] == 'OR')):
has_or = True
if (not (has_and and has_or)):
min_pos = min([chunks[v][0] for v in range((next_where + 1), next_item)])
max_pos = max([chunks[v][1] for v in range((next_where + 1), next_item)])
ctokens = tokens[min_pos:(max_pos + 1)]
sort_chunk_list((next_where + 1), next_item, chunks, tokens, 'AND')
sort_chunk_list((next_where + 1), next_item, chunks, tokens, 'OR')
cur_chunk = next_item |
def xresnet34_2(pretrained=False, **kwargs):
model = XResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['xresnet34']))
return model |
def astroNNPath(dr=None):
if (dr is None):
dr = _default_dr()
if (int(dr) < 14):
raise ValueError('astroNN catalog for DR<14 not available')
specReduxPath = apogeeSpectroReduxDirPath(dr=dr)
if (dr == '14'):
return os.path.join(specReduxPath, 'r8', 'stars', 'l31c', _redux_dr(dr=dr), 'astroNN_apogee_dr14_catalog.fits')
elif (dr == '16'):
return os.path.join(_APOGEE_DATA, 'dr16', 'apogee', 'vac', 'apogee-astronn', 'apogee_astroNN-DR{}-v1.fits'.format(dr))
elif (dr == '17'):
return os.path.join(_APOGEE_DATA, 'dr17', 'apogee', 'vac', 'apogee-astronn', 'apogee_astroNN-DR{}.fits'.format(dr)) |
class GraphConvolution(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = (1.0 / math.sqrt(self.weight.size(1)))
self.weight.data.uniform_((- stdv), stdv)
if (self.bias is not None):
self.bias.data.uniform_((- stdv), stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if (self.bias is not None):
return (output + self.bias)
else:
return output
def __repr__(self):
return (((((self.__class__.__name__ + ' (') + str(self.in_features)) + ' -> ') + str(self.out_features)) + ')') |
class NoiseResNet(nn.Module):
def __init__(self, block, nblocks, nfilters, nclasses, pool, level, first_filter_size=3):
super(NoiseResNet, self).__init__()
self.in_planes = nfilters
if (first_filter_size == 7):
pool = 1
self.pre_layers = nn.Sequential(nn.Conv2d(3, nfilters, kernel_size=first_filter_size, stride=2, padding=3, bias=False), nn.BatchNorm2d(nfilters), nn.ReLU(True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
elif (first_filter_size == 3):
pool = 4
self.pre_layers = nn.Sequential(nn.Conv2d(3, nfilters, kernel_size=first_filter_size, stride=1, padding=1, bias=False), nn.BatchNorm2d(nfilters), nn.ReLU(True))
elif (first_filter_size == 0):
print('\n\nThe original noiseresnet18 model does not support noise masks in the first layer, use perturb_resnet18 model, or set first_filter_size to 3 or 7\n\n')
return
self.pre_layers[0].weight.requires_grad = False
self.layer1 = self._make_layer(block, (1 * nfilters), nblocks[0], stride=1, level=level)
self.layer2 = self._make_layer(block, (2 * nfilters), nblocks[1], stride=2, level=level)
self.layer3 = self._make_layer(block, (4 * nfilters), nblocks[2], stride=2, level=level)
self.layer4 = self._make_layer(block, (8 * nfilters), nblocks[3], stride=2, level=level)
self.avgpool = nn.AvgPool2d(pool, stride=1)
self.linear = nn.Linear(((8 * nfilters) * block.expansion), nclasses)
def _make_layer(self, block, planes, nblocks, stride=1, level=0.2, filter_size=1):
shortcut = None
if ((stride != 1) or (self.in_planes != (planes * block.expansion))):
shortcut = nn.Sequential(nn.Conv2d(self.in_planes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.in_planes, planes, stride, shortcut, level=level))
self.in_planes = (planes * block.expansion)
for i in range(1, nblocks):
layers.append(block(self.in_planes, planes, level=level))
return nn.Sequential(*layers)
def forward(self, x):
x1 = self.pre_layers(x)
x2 = self.layer1(x1)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
x6 = self.avgpool(x5)
x7 = x6.view(x6.size(0), (- 1))
x8 = self.linear(x7)
return x8 |
def load_model(teacher_str, student_str, dataset, device, ensemble):
if ((dataset == 'cifar10') or (dataset == 'svhn')):
num_classes = 10
elif (dataset == 'cifar100'):
num_classes = 100
elif (dataset == 'tiny-imagenet'):
num_classes = 200
elif (dataset == 'imagenet'):
num_classes = 1000
if ((dataset == 'cifar10') or (dataset == 'cifar100') or (dataset == 'svhn')):
if (teacher_str is not None):
bn_aff = (False if ('nobn' in teacher_str) else True)
shortcut = (False if ('demo' in teacher_str) else True)
if ensemble:
teacher_cand = teacher_str.split(',')
teacher = {}
for teacher_str_cand in teacher_cand:
if ('wrn' in teacher_str_cand):
teacher_depth = int(teacher_str_cand.split('-')[1])
teacher_widen_factor = int(teacher_str_cand.split('-')[2])
teacher_tmp = cifar.WideResNet(depth=teacher_depth, widen_factor=teacher_widen_factor, num_classes=num_classes, bn_aff=bn_aff, shortcut=shortcut)
filename = './model_checkpoints/{}/None/{}/alp_0.1_T_1.0/random_0.0-1.0_random_0.0-1.0_seed9999_none_noclas.t1'.format(dataset, teacher_str_cand)
teacher_tmp.cpu()
teacher_tmp.load_state_dict(torch.load(filename, map_location='cpu')['199'])
teacher_tmp = teacher_tmp.to(device)
teacher[teacher_str_cand] = teacher_tmp
else:
if ('wrn' in teacher_str):
teacher_depth = int(teacher_str.split('-')[1])
teacher_widen_factor = int(teacher_str.split('-')[2])
teacher = cifar.WideResNet(depth=teacher_depth, widen_factor=teacher_widen_factor, num_classes=num_classes, bn_aff=bn_aff, shortcut=shortcut)
elif ('res' in teacher_str):
teacher_depth = int(teacher_str.split('-')[1])
teacher_widen_factor = int(teacher_str.split('-')[2])
teacher = cifar.ResNet(depth=teacher_depth, width=teacher_widen_factor, num_classes=num_classes, bn_aff=bn_aff, shortcut=shortcut)
filename = './model_checkpoints/{}/None/{}/alp_0.1_T_1.0/random_0.0-1.0_random_0.0-1.0_seed9999_none_noclas.t1'.format(dataset, teacher_str)
teacher.cpu()
teacher.load_state_dict(torch.load(filename, map_location='cpu')['199'])
teacher = teacher.to(device)
else:
teacher = None
if ('wrn' in student_str):
student_depth = int(student_str.split('-')[1])
student_widen_factor = int(student_str.split('-')[2])
student = cifar.WideResNet(depth=student_depth, widen_factor=student_widen_factor, num_classes=num_classes)
elif ('res' in student_str):
student_depth = int(student_str.split('-')[1])
student_widen_factor = int(student_str.split('-')[2])
student = cifar.ResNet(depth=student_depth, widen_factor=student_widen_factor, num_classes=num_classes)
elif (dataset == 'imagenet'):
bn_aff = (False if ('nobn' in teacher_str) else True)
shortcut = (False if ('demo' in teacher_str) else True)
if ('res' in teacher_str):
teacher_depth = int(teacher_str.split('-')[1])
if (teacher_depth == 152):
teacher = imagenet.resnet152(pretrained=True)
elif (teacher_depth == 50):
teacher = imagenet.resnet50(pretrained=True)
elif (teacher_depth == 34):
teacher = imagenet.resnet34(pretrained=True)
else:
teacher = None
if ('res' in student_str):
student_depth = int(student_str.split('-')[1])
if (student_depth == 152):
student = imagenet.resnet152()
elif (student_depth == 50):
student = imagenet.resnet50()
elif (dataset == 'tiny-imagenet'):
if (teacher_str is not None):
if ('res' in teacher_str):
teacher_depth = int(teacher_str.split('-')[1])
if (teacher_depth == 152):
teacher = imagenet.resnet152(num_classes=num_classes)
elif (teacher_depth == 50):
teacher = imagenet.resnet50(num_classes=num_classes)
elif (teacher_depth == 34):
teacher = imagenet.resnet34(num_classes=num_classes)
filename = './model_checkpoints/{}/None/{}/alp_0.1_T_1.0/random_highest_1.0_random_highest_1.0_seed1_none.t1'.format(dataset, teacher_str)
teacher.cpu()
teacher.load_state_dict(torch.load(filename, map_location='cpu')['199'])
teacher = teacher.to(device)
else:
teacher = None
if ('res' in student_str):
student_depth = int(student_str.split('-')[1])
if (student_depth == 152):
student = imagenet.resnet152(num_classes=num_classes)
elif (student_depth == 50):
student = imagenet.resnet50(num_classes=num_classes)
elif (student_depth == 34):
student = imagenet.resnet34(num_classes=num_classes)
return (teacher, student) |
def call_main(cfg: FairseqConfig, main, **kwargs):
if (cfg.distributed_training.distributed_init_method is None):
infer_init_method(cfg.distributed_training)
if (cfg.distributed_training.distributed_init_method is not None):
if (not cfg.distributed_training.distributed_no_spawn):
start_rank = cfg.distributed_training.distributed_rank
cfg.distributed_training.distributed_rank = None
kwargs['start_rank'] = start_rank
torch.multiprocessing.spawn(fn=distributed_main, args=(main, cfg, kwargs), nprocs=min(torch.cuda.device_count(), cfg.distributed_training.distributed_world_size), join=True)
else:
distributed_main(cfg.distributed_training.device_id, main, cfg, kwargs)
elif (cfg.common.tpu and (cfg.distributed_training.distributed_world_size > 1)):
import torch_xla.distributed.xla_multiprocessing as xmp
torch.multiprocessing.set_sharing_strategy('file_system')
xmp.spawn(fn=distributed_main, args=(main, cfg, kwargs), nprocs=min(cfg.distributed_training.distributed_world_size, 8))
else:
main(cfg, **kwargs) |
def load_jsonl(file: Union[(str, Path)]) -> Iterable[Any]:
with open(file, 'r', encoding='utf-8') as f:
for line in f:
try:
(yield json.loads(line))
except:
print('Error in loading:', line)
exit() |
class Env(object):
metadata = {'render.modes': []}
reward_range = ((- float('inf')), float('inf'))
spec = None
action_space = None
observation_space = None
def step(self, action):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def render(self, mode='human'):
raise NotImplementedError
def close(self):
return
def seed(self, seed=None):
logger.warn('Could not seed environment %s', self)
return
def unwrapped(self):
return self
def __str__(self):
if (self.spec is None):
return '<{} instance>'.format(type(self).__name__)
else:
return '<{}<{}>>'.format(type(self).__name__, self.spec.id)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
return False |
def resnet18_full(pretrained=False, **kwargs):
model = ResNet_Full(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model |
class ClassifierChoice(AutotabularChoice):
def get_components(cls):
components = OrderedDict()
components.update(_classifiers)
components.update(_addons.components)
return components
def get_available_components(cls, dataset_properties=None, include=None, exclude=None):
if (dataset_properties is None):
dataset_properties = {}
available_comp = cls.get_components()
components_dict = OrderedDict()
if ((include is not None) and (exclude is not None)):
raise ValueError('The argument include and exclude cannot be used together.')
if (include is not None):
for incl in include:
if (incl not in available_comp):
raise ValueError(('Trying to include unknown component: %s' % incl))
for name in available_comp:
if ((include is not None) and (name not in include)):
continue
elif ((exclude is not None) and (name in exclude)):
continue
entry = available_comp[name]
if (entry == ClassifierChoice):
continue
if (entry.get_properties()['handles_classification'] is False):
continue
if ((dataset_properties.get('multiclass') is True) and (entry.get_properties()['handles_multiclass'] is False)):
continue
if ((dataset_properties.get('multilabel') is True) and (available_comp[name].get_properties()['handles_multilabel'] is False)):
continue
components_dict[name] = entry
return components_dict
def get_hyperparameter_search_space(self, dataset_properties=None, default=None, include=None, exclude=None):
if (dataset_properties is None):
dataset_properties = {}
if ((include is not None) and (exclude is not None)):
raise ValueError('The arguments include_estimators and exclude_estimators cannot be used together.')
cs = ConfigurationSpace()
available_estimators = self.get_available_components(dataset_properties=dataset_properties, include=include, exclude=exclude)
if (len(available_estimators) == 0):
raise ValueError('No classifiers found')
if (default is None):
defaults = (['random_forest', 'liblinear_svc', 'sgd', 'libsvm_svc'] + list(available_estimators.keys()))
for default_ in defaults:
if (default_ in available_estimators):
if ((include is not None) and (default_ not in include)):
continue
if ((exclude is not None) and (default_ in exclude)):
continue
default = default_
break
estimator = CategoricalHyperparameter('__choice__', list(available_estimators.keys()), default_value=default)
cs.add_hyperparameter(estimator)
for estimator_name in available_estimators.keys():
estimator_configuration_space = available_estimators[estimator_name].get_hyperparameter_search_space(dataset_properties)
parent_hyperparameter = {'parent': estimator, 'value': estimator_name}
cs.add_configuration_space(estimator_name, estimator_configuration_space, parent_hyperparameter=parent_hyperparameter)
self.configuration_space = cs
self.dataset_properties = dataset_properties
return cs
def predict_proba(self, X):
return self.choice.predict_proba(X)
def estimator_supports_iterative_fit(self):
return hasattr(self.choice, 'iterative_fit')
def get_max_iter(self):
if self.estimator_supports_iterative_fit():
return self.choice.get_max_iter()
else:
raise NotImplementedError()
def get_current_iter(self):
if self.estimator_supports_iterative_fit():
return self.choice.get_current_iter()
else:
raise NotImplementedError()
def iterative_fit(self, X, y, n_iter=1, **fit_params):
self.fitted_ = True
if (fit_params is None):
fit_params = {}
return self.choice.iterative_fit(X, y, n_iter=n_iter, **fit_params)
def configuration_fully_fitted(self):
return self.choice.configuration_fully_fitted() |
class NodeDistributedSampler(Sampler):
def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
if (local_rank is None):
local_rank = int(os.environ.get('LOCAL_RANK', 0))
if (local_size is None):
local_size = int(os.environ.get('LOCAL_SIZE', 1))
self.dataset = dataset
self.shuffle = shuffle
self.num_replicas = num_replicas
self.num_parts = local_size
self.rank = rank
self.local_rank = local_rank
self.epoch = 0
self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
self.total_size_parts = ((self.num_samples * self.num_replicas) // self.num_parts)
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices = [i for i in indices if ((i % self.num_parts) == self.local_rank)]
indices += indices[:(self.total_size_parts - len(indices))]
assert (len(indices) == self.total_size_parts)
indices = indices[(self.rank // self.num_parts):self.total_size_parts:(self.num_replicas // self.num_parts)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch |
class CNN(nn.Module):
def __init__(self, pretrained=False, in_channel=1, out_channel=10):
super(CNN, self).__init__()
if (pretrained == True):
warnings.warn('Pretrained model is not available')
self.layer1 = nn.Sequential(nn.Conv1d(in_channel, 16, kernel_size=15), nn.BatchNorm1d(16), nn.ReLU(inplace=True))
self.layer2 = nn.Sequential(nn.Conv1d(16, 32, kernel_size=3), nn.BatchNorm1d(32), nn.ReLU(inplace=True), nn.MaxPool1d(kernel_size=2, stride=2))
self.layer3 = nn.Sequential(nn.Conv1d(32, 64, kernel_size=3), nn.BatchNorm1d(64), nn.ReLU(inplace=True))
self.layer4 = nn.Sequential(nn.Conv1d(64, 128, kernel_size=3), nn.BatchNorm1d(128), nn.ReLU(inplace=True), nn.AdaptiveMaxPool1d(4))
self.layer5 = nn.Sequential(nn.Linear((128 * 4), 256), nn.ReLU(inplace=True), nn.Linear(256, 64), nn.ReLU(inplace=True))
self.fc = nn.Linear(64, out_channel)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0), (- 1))
x = self.layer5(x)
x = self.fc(x)
return x |
def get_dates(min_year, max_year):
lo = date(min_year, 3, 1)
hi = date(max_year, 11, 10)
def date_to_str(d):
return d[0].strftime('%Y-%m-%d')
dates = [date_to_str(d) for d in date_range(lo, hi, 1) if (date_to_str(d) not in already_done)]
return dates |
def test_DropColumns() -> None:
drop_columns = DropColumns(apply_to=['confound'])
drop_columns.fit(X_with_types)
X_trans = drop_columns.transform(X_with_types)
support = drop_columns.get_support()
non_confound = ['a__:type:__continuous', 'b__:type:__continuous', 'e__:type:__categorical', 'f__:type:__categorical']
X_non_confound = X_with_types[non_confound]
assert_frame_equal(X_trans, X_non_confound)
assert_frame_equal(X_with_types.drop(columns=['c__:type:__confound', 'd__:type:__confound']), X_trans)
assert all((support == [1, 1, 0, 0, 1, 1])) |
def conv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
(output_height, output_width) = output.shape[2:]
(kernel_height, kernel_width) = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = (out_channels // groups)
conv_per_position_flops = (((kernel_height * kernel_width) * in_channels) * filters_per_channel)
active_elements_count = ((batch_size * output_height) * output_width)
if (conv_module.__mask__ is not None):
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = (conv_per_position_flops * active_elements_count)
bias_flops = 0
if (conv_module.bias is not None):
bias_flops = (out_channels * active_elements_count)
overall_flops = (overall_conv_flops + bias_flops)
conv_module.__flops__ += overall_flops |
def _superimpose_single(reference, coords):
reference_np = reference.detach().cpu().numpy()
coords_np = coords.detach().cpu().numpy()
(superimposed, rmsd) = _superimpose_np(reference_np, coords_np)
return (coords.new_tensor(superimposed), coords.new_tensor(rmsd)) |
def get_bel_type_override(bt):
if (bt.endswith('6LUT') or (bt == 'LUT_OR_MEM6') or (bt == 'LUT6')):
return 'SLICE_LUTX'
elif (bt.endswith('5LUT') or (bt == 'LUT_OR_MEM5') or (bt == 'LUT5')):
return 'SLICE_LUTX'
elif ((len(bt) == 4) and bt.endswith('FF2')):
return 'SLICE_FFX'
elif ((len(bt) == 3) and bt.endswith('FF')):
return 'SLICE_FFX'
elif ((bt == 'FF_INIT') or (bt == 'REG_INIT')):
return 'SLICE_FFX'
iolParts = ['COMBUF_', 'IDDR_', 'IPFF_', 'OPFF_', 'OPTFF_', 'TFF_']
for p in iolParts:
if bt.startswith(p):
return ('IOL_' + p.replace('_', ''))
if bt.endswith('_VREF'):
return 'IOB_VREF'
elif bt.endswith('_DIFFINBUF'):
return 'IOB_DIFFINBUF'
elif bt.startswith('PSS_ALTO_CORE_PAD_'):
return 'PSS_PAD'
elif (bt.startswith('LAGUNA_RX_REG') or bt.startswith('LAGUNA_TX_REG')):
return 'LAGUNA_REGX'
elif bt.startswith('BSCAN'):
return 'BSCAN'
elif (bt == 'BUFGCTRL_BUFGCTRL'):
return 'BUFGCTRL'
elif ((bt == 'RAMB18E2_U_RAMB18E2') or (bt == 'RAMB18E2_L_RAMB18E2')):
return 'RAMB18E2_RAMB18E2'
else:
return bt |
class ToNumpy():
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if (np_img.ndim < 3):
np_img = np.expand_dims(np_img, axis=(- 1))
np_img = np.rollaxis(np_img, 2)
return np_img |
def duplicate_dual_clean_bn(model):
found_noise_bn = False
if (not isinstance(model, dict)):
model_state_dict = model.state_dict()
else:
model_state_dict = model
for key in model_state_dict:
if ('noise_bn' in key):
found_noise_bn = True
clean = model_state_dict[key.replace('noise_bn', 'clean_bn')].data
model_state_dict[key].data.copy_(clean)
if (not found_noise_bn):
raise ValueError(f'Not found noise BN. Please make suer you are using dual BN in your model.') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.