code stringlengths 101 5.91M |
|---|
()
_option()
def cli():
logging.getLogger('dgp2widker').setLevel(level=logging.INFO)
logging.getLogger('py4j').setLevel(level=logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('PIL').setLevel(logging.CRITICAL) |
class Scenario(BaseScenario):
def make_world(self):
world = World()
num_agents = 3
num_adversaries = 1
num_landmarks = 2
world.dim_c = 4
world.agents = [CryptoAgent() for i in range(num_agents)]
for (i, agent) in enumerate(world.agents):
agent.name = ('agent %d' % i)
agent.collide = False
agent.adversary = (True if (i < num_adversaries) else False)
agent.speaker = (True if (i == 2) else False)
agent.movable = False
world.landmarks = [Landmark() for i in range(num_landmarks)]
for (i, landmark) in enumerate(world.landmarks):
landmark.name = ('landmark %d' % i)
landmark.collide = False
landmark.movable = False
self.reset_world(world)
return world
def reset_world(self, world):
for (i, agent) in enumerate(world.agents):
agent.color = np.array([0.25, 0.25, 0.25])
if agent.adversary:
agent.color = np.array([0.75, 0.25, 0.25])
agent.key = None
color_list = [np.zeros(world.dim_c) for i in world.landmarks]
for (i, color) in enumerate(color_list):
color[i] += 1
for (color, landmark) in zip(color_list, world.landmarks):
landmark.color = color
goal = np.random.choice(world.landmarks)
world.agents[1].color = goal.color
world.agents[2].key = np.random.choice(world.landmarks).color
for agent in world.agents:
agent.goal_a = goal
for agent in world.agents:
agent.state.p_pos = np.random.uniform((- 1), (+ 1), world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for (i, landmark) in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform((- 1), (+ 1), world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
return (agent.state.c, agent.goal_a.color)
def good_listeners(self, world):
return [agent for agent in world.agents if ((not agent.adversary) and (not agent.speaker))]
def good_agents(self, world):
return [agent for agent in world.agents if (not agent.adversary)]
def adversaries(self, world):
return [agent for agent in world.agents if agent.adversary]
def reward(self, agent, world):
return (self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world))
def agent_reward(self, agent, world):
good_listeners = self.good_listeners(world)
adversaries = self.adversaries(world)
good_rew = 0
adv_rew = 0
for a in good_listeners:
if (a.state.c == np.zeros(world.dim_c)).all():
continue
else:
good_rew -= np.sum(np.square((a.state.c - agent.goal_a.color)))
for a in adversaries:
if (a.state.c == np.zeros(world.dim_c)).all():
continue
else:
adv_l1 = np.sum(np.square((a.state.c - agent.goal_a.color)))
adv_rew += adv_l1
return (adv_rew + good_rew)
def adversary_reward(self, agent, world):
rew = 0
if (not (agent.state.c == np.zeros(world.dim_c)).all()):
rew -= np.sum(np.square((agent.state.c - agent.goal_a.color)))
return rew
def observation(self, agent, world):
goal_color = np.zeros(world.dim_color)
if (agent.goal_a is not None):
goal_color = agent.goal_a.color
entity_pos = []
for entity in world.landmarks:
entity_pos.append((entity.state.p_pos - agent.state.p_pos))
comm = []
for other in world.agents:
if ((other is agent) or (other.state.c is None) or (not other.speaker)):
continue
comm.append(other.state.c)
confer = np.array([0])
if (world.agents[2].key is None):
confer = np.array([1])
key = np.zeros(world.dim_c)
goal_color = np.zeros(world.dim_c)
else:
key = world.agents[2].key
prnt = False
if agent.speaker:
if prnt:
print('speaker')
print(agent.state.c)
print(np.concatenate(((([goal_color] + [key]) + [confer]) + [np.random.randn(1)])))
return np.concatenate(([goal_color] + [key]))
if ((not agent.speaker) and (not agent.adversary)):
if prnt:
print('listener')
print(agent.state.c)
print(np.concatenate((([key] + comm) + [confer])))
return np.concatenate(([key] + comm))
if ((not agent.speaker) and agent.adversary):
if prnt:
print('adversary')
print(agent.state.c)
print(np.concatenate((comm + [confer])))
return np.concatenate(comm) |
def IsTainted(split_line_of_utt):
return ((len(split_line_of_utt) > 8) and (split_line_of_utt[8] == 'tainted')) |
class ROIBoxHead(torch.nn.Module):
def __init__(self, cfg, in_channels):
super(ROIBoxHead, self).__init__()
self.feature_extractor = make_roi_box_feature_extractor(cfg, in_channels)
self.predictor = make_roi_box_predictor(cfg, self.feature_extractor.out_channels)
self.post_processor = make_roi_box_post_processor(cfg)
self.loss_evaluator = make_roi_box_loss_evaluator(cfg)
def forward(self, features, proposals, targets=None):
if self.training:
with torch.no_grad():
proposals = self.loss_evaluator.subsample(proposals, targets)
x = self.feature_extractor(features, proposals)
(class_logits, box_regression) = self.predictor(x)
if (not self.training):
result = self.post_processor((class_logits, box_regression), proposals)
return (x, result, {})
(loss_classifier, loss_box_reg) = self.loss_evaluator([class_logits], [box_regression])
return (x, proposals, dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg)) |
class ModulatedDeformConvPack(ModulatedDeformConv):
_version = 2
def __init__(self, *args, **kwargs):
super(ModulatedDeformConvPack, self).__init__(*args, **kwargs)
self.conv_offset = nn.Conv2d(self.in_channels, (((self.deformable_groups * 3) * self.kernel_size[0]) * self.kernel_size[1]), kernel_size=self.kernel_size, stride=_pair(self.stride), padding=_pair(self.padding), bias=True)
self.init_offset()
def init_offset(self):
self.conv_offset.weight.data.zero_()
self.conv_offset.bias.data.zero_()
def forward(self, x):
out = self.conv_offset(x)
(o1, o2, mask) = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if ((version is None) or (version < 2)):
if (((prefix + 'conv_offset.weight') not in state_dict) and ((prefix[:(- 1)] + '_offset.weight') in state_dict)):
state_dict[(prefix + 'conv_offset.weight')] = state_dict.pop((prefix[:(- 1)] + '_offset.weight'))
if (((prefix + 'conv_offset.bias') not in state_dict) and ((prefix[:(- 1)] + '_offset.bias') in state_dict)):
state_dict[(prefix + 'conv_offset.bias')] = state_dict.pop((prefix[:(- 1)] + '_offset.bias'))
if ((version is not None) and (version > 1)):
print_log('ModulatedDeformConvPack {} is upgraded to version 2.'.format(prefix.rstrip('.')), logger='root')
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) |
class SUNRGBDData(object):
def __init__(self, root_path, split='train', use_v1=False):
self.root_dir = root_path
self.split = split
self.split_dir = osp.join(root_path, 'sunrgbd_trainval')
self.classes = ['bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', 'night_stand', 'bookshelf', 'bathtub']
self.cat2label = {cat: self.classes.index(cat) for cat in self.classes}
self.label2cat = {label: self.classes[label] for label in range(len(self.classes))}
assert (split in ['train', 'val', 'test'])
split_file = osp.join(self.split_dir, f'{split}_data_idx.txt')
mmcv.check_file_exist(split_file)
self.sample_id_list = map(int, mmcv.list_from_file(split_file))
self.image_dir = osp.join(self.split_dir, 'image')
self.calib_dir = osp.join(self.split_dir, 'calib')
self.depth_dir = osp.join(self.split_dir, 'depth')
if use_v1:
self.label_dir = osp.join(self.split_dir, 'label_v1')
else:
self.label_dir = osp.join(self.split_dir, 'label')
def __len__(self):
return len(self.sample_id_list)
def get_image(self, idx):
img_filename = osp.join(self.image_dir, f'{idx:06d}.jpg')
return mmcv.imread(img_filename)
def get_image_shape(self, idx):
image = self.get_image(idx)
return np.array(image.shape[:2], dtype=np.int32)
def get_depth(self, idx):
depth_filename = osp.join(self.depth_dir, f'{idx:06d}.mat')
depth = sio.loadmat(depth_filename)['instance']
return depth
def get_calibration(self, idx):
calib_filepath = osp.join(self.calib_dir, f'{idx:06d}.txt')
lines = [line.rstrip() for line in open(calib_filepath)]
Rt = np.array([float(x) for x in lines[0].split(' ')])
Rt = np.reshape(Rt, (3, 3), order='F').astype(np.float32)
K = np.array([float(x) for x in lines[1].split(' ')])
K = np.reshape(K, (3, 3), order='F').astype(np.float32)
return (K, Rt)
def get_label_objects(self, idx):
label_filename = osp.join(self.label_dir, f'{idx:06d}.txt')
lines = [line.rstrip() for line in open(label_filename)]
objects = [SUNRGBDInstance(line) for line in lines]
return objects
def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):
def process_single_scene(sample_idx):
print(f'{self.split} sample_idx: {sample_idx}')
SAMPLE_NUM = 50000
pc_upright_depth = self.get_depth(sample_idx)
pc_upright_depth_subsampled = random_sampling(pc_upright_depth, SAMPLE_NUM)
info = dict()
pc_info = {'num_features': 6, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))
pc_upright_depth_subsampled.tofile(osp.join(self.root_dir, 'points', f'{sample_idx:06d}.bin'))
info['pts_path'] = osp.join('points', f'{sample_idx:06d}.bin')
img_path = osp.join('image', f'{sample_idx:06d}.jpg')
image_info = {'image_idx': sample_idx, 'image_shape': self.get_image_shape(sample_idx), 'image_path': img_path}
info['image'] = image_info
(K, Rt) = self.get_calibration(sample_idx)
calib_info = {'K': K, 'Rt': Rt}
info['calib'] = calib_info
if has_label:
obj_list = self.get_label_objects(sample_idx)
annotations = {}
annotations['gt_num'] = len([obj.classname for obj in obj_list if (obj.classname in self.cat2label.keys())])
if (annotations['gt_num'] != 0):
annotations['name'] = np.array([obj.classname for obj in obj_list if (obj.classname in self.cat2label.keys())])
annotations['bbox'] = np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list if (obj.classname in self.cat2label.keys())], axis=0)
annotations['location'] = np.concatenate([obj.centroid.reshape(1, 3) for obj in obj_list if (obj.classname in self.cat2label.keys())], axis=0)
annotations['dimensions'] = (2 * np.array([[obj.l, obj.h, obj.w] for obj in obj_list if (obj.classname in self.cat2label.keys())]))
annotations['rotation_y'] = np.array([obj.heading_angle for obj in obj_list if (obj.classname in self.cat2label.keys())])
annotations['index'] = np.arange(len(obj_list), dtype=np.int32)
annotations['class'] = np.array([self.cat2label[obj.classname] for obj in obj_list if (obj.classname in self.cat2label.keys())])
annotations['gt_boxes_upright_depth'] = np.stack([obj.box3d for obj in obj_list if (obj.classname in self.cat2label.keys())], axis=0)
info['annos'] = annotations
return info
sample_id_list = (sample_id_list if (sample_id_list is not None) else self.sample_id_list)
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos) |
def rar_wrapper(pde, model, conf):
data = model.data
train = model.train
def wrapper(*args, **kwargs):
total_iter = kwargs['iterations']
(interval, count) = (conf['interval'], conf['count'])
assert ((total_iter % interval) == 0)
kwargs['iterations'] = interval
for i in range((total_iter // interval)):
if (i == 0):
train(*args, **kwargs)
continue
X = model.train_state.X_train
f = model.predict(X, operator=pde.pde)
err = np.abs(f).squeeze()
if (err.ndim == 2):
err = np.sum(err, axis=0)
elif (err.ndim > 2):
raise ValueError('RAR: Error occured when calculate pde residue: err.ndim > 2')
mean_err = np.mean(err)
print(f'mean residual: {mean_err}')
top_k_idx = np.argsort(err)[(- count):]
data.add_anchors(X[top_k_idx])
train(*args, **kwargs, disregard_previous_best=True, save_model=False)
return wrapper |
def diverse_bleu(answers):
div_bleu = 0
m = len(answers)
if (m == 1):
return 0.0
for i in range(m):
for j in range((i + 1), m):
prediction = answers[i][1].lower().split()
ground_truths = [answers[j][1].lower().split()]
div_bleu += compute_bleu([ground_truths], [prediction], smooth=True)[0]
div_bleu /= ((m * (m - 1)) / 2.0)
return (1.0 - div_bleu) |
def rotated_feature_align(features, best_rbboxes, spatial_scale=(1 / 8), points=1):
return RotatedFeatureAlignFunction.apply(features, best_rbboxes, spatial_scale, points) |
class Filter2(nn.Module):
def __init__(self, config):
super().__init__()
hidden_size = 512
self.word_vlad = NetVLAD(cluster_size=4, feature_size=hidden_size)
self.ws1 = nn.Linear(hidden_size, hidden_size, bias=True)
self.ws2 = nn.Linear(hidden_size, hidden_size, bias=False)
self.wst = nn.Linear(hidden_size, 1, bias=False)
def forward(self, frames_feat, words_feat, words_len, words_mask, **kwargs):
(frames_feat, words_feat) = (frames_feat.detach(), words_feat.detach())
k = self.word_vlad(words_feat, words_mask, False)
q = self.ws1(frames_feat)
k = self.ws2(k)
sim = self.wst((q.unsqueeze(2) + k.unsqueeze(1)).tanh()).squeeze((- 1))
sim = F.softmax(sim, (- 1))
x = sim.mean(dim=(- 1))
(x_max, x_min) = (x.max(dim=(- 1), keepdim=True)[0], x.min(dim=(- 1), keepdim=True)[0])
x = (((x - x_min) + 1e-10) / ((x_max - x_min) + 1e-10))
x = x.unsqueeze((- 1))
return x |
class SublayerConnection(nn.Module):
def __init__(self, d_model, dropout):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
return (x + self.dropout(sublayer(self.norm(x)))) |
def test_actionAngleTorus_interppot_freqs():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import LogarithmicHaloPotential, interpRZPotential
lp = LogarithmicHaloPotential(normalize=1.0)
ip = interpRZPotential(RZPot=lp, interpPot=True, interpDens=True, interpRforce=True, interpzforce=True, enable_c=True)
aAT = actionAngleTorus(pot=lp)
aATi = actionAngleTorus(pot=ip)
(jr, jphi, jz) = (0.05, 1.1, 0.02)
om = aAT.Freqs(jr, jphi, jz)
omi = aATi.Freqs(jr, jphi, jz)
assert (numpy.fabs(((om[0] - omi[0]) / om[0])) < 0.2), 'Radial frequency computed using the torus machine does not agree between potential and interpolated potential'
assert (numpy.fabs(((om[1] - omi[1]) / om[1])) < 0.2), 'Azimuthal frequency computed using the torus machine does not agree between potential and interpolated potential'
assert (numpy.fabs(((om[2] - omi[2]) / om[2])) < 0.8), 'Vertical frequency computed using the torus machine does not agree between potential and interpolated potential'
return None |
class test_training(unittest.TestCase):
def setUp(self, prevdir=prevdir, training_scripts=training_scripts):
os.chdir(prevdir)
gopen = open('settings.json', 'r')
settings = json.load(gopen)
gopen.close()
settings['default_training_script'] = training_scripts
settings['default_text_features'] = ['nltk_features']
settings['select_features'] = False
settings['scale_features'] = False
settings['reduce_dimensions'] = False
settings['remove_outliers'] = True
settings['visualize_data'] = False
settings['clean_data'] = False
settings['augment_data'] = False
settings['model_compress'] = False
jsonfile = open('settings.json', 'w')
json.dump(settings, jsonfile)
jsonfile.close()
def tearDown(self, textdir=textdir, prevdir=prevdir, training_scripts=training_scripts, clean_data=clean_data, augment_data=augment_data, model_compress=model_compress):
os.chdir(prevdir)
gopen = open('settings.json', 'r')
settings = json.load(gopen)
gopen.close()
settings['default_training_script'] = training_scripts
settings['clean_data'] = clean_data
settings['augment_data'] = augment_data
settings['model_compress'] = model_compress
jsonfile = open('settings.json', 'w')
json.dump(settings, jsonfile)
jsonfile.close()
def test_training(self, cur_dir=cur_dir, train_dir=train_dir, model_dir=model_dir, clean_data=clean_data, augment_data=augment_data, test_dir=test_dir):
os.chdir(train_dir)
shutil.copytree((test_dir + '/helpers/model_test/one'), (os.getcwd() + '/one'))
shutil.copytree((test_dir + '/helpers/model_test/two'), (os.getcwd() + '/two'))
os.chdir(model_dir)
os.system('python3 model.py text 2 c onetwo one two')
os.chdir(train_dir)
shutil.rmtree('one')
shutil.rmtree('two')
os.chdir(textdir)
listdir = os.listdir()
b = False
for i in range(len(listdir)):
if (listdir[i].find('onetwo') >= 0):
b = True
shutil.rmtree(listdir[i])
break
else:
os.remove(listdir[i])
self.assertEqual(True, b) |
class ConfigTester(object):
def __init__(self, parent, config_class=None, has_text_modality=True, **kwargs):
self.parent = parent
self.config_class = config_class
self.has_text_modality = has_text_modality
self.inputs_dict = kwargs
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
common_properties = ['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.has_text_modality:
common_properties.extend(['vocab_size'])
for prop in common_properties:
self.parent.assertTrue(hasattr(config, prop), msg=f'`{prop}` does not exist')
for (idx, name) in enumerate(common_properties):
try:
setattr(config, name, idx)
self.parent.assertEqual(getattr(config, name), idx, msg=f'`{name} value {idx} expected, but was {getattr(config, name)}')
except NotImplementedError:
pass
for (idx, name) in enumerate(common_properties):
try:
config = self.config_class(**{name: idx})
self.parent.assertEqual(getattr(config, name), idx, msg=f'`{name} value {idx} expected, but was {getattr(config, name)}')
except NotImplementedError:
pass
def create_and_test_config_to_json_string(self):
config = self.config_class(**self.inputs_dict)
obj = json.loads(config.to_json_string())
for (key, value) in self.inputs_dict.items():
self.parent.assertEqual(obj[key], value)
def create_and_test_config_to_json_file(self):
config_first = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, 'config.json')
config_first.to_json_file(json_file_path)
config_second = self.config_class.from_json_file(json_file_path)
self.parent.assertEqual(config_second.to_dict(), config_first.to_dict())
def create_and_test_config_from_and_save_pretrained(self):
config_first = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(tmpdirname)
config_second = self.config_class.from_pretrained(tmpdirname)
self.parent.assertEqual(config_second.to_dict(), config_first.to_dict())
def create_and_test_config_with_num_labels(self):
config = self.config_class(**self.inputs_dict, num_labels=5)
self.parent.assertEqual(len(config.id2label), 5)
self.parent.assertEqual(len(config.label2id), 5)
config.num_labels = 3
self.parent.assertEqual(len(config.id2label), 3)
self.parent.assertEqual(len(config.label2id), 3)
def check_config_can_be_init_without_params(self):
if self.config_class.is_composition:
return
config = self.config_class()
self.parent.assertIsNotNone(config)
def check_config_arguments_init(self):
kwargs = copy.deepcopy(config_common_kwargs)
config = self.config_class(**kwargs)
wrong_values = []
for (key, value) in config_common_kwargs.items():
if (key == 'torch_dtype'):
if (not is_torch_available()):
continue
else:
import torch
if (config.torch_dtype != torch.float16):
wrong_values.append(('torch_dtype', config.torch_dtype, torch.float16))
elif (getattr(config, key) != value):
wrong_values.append((key, getattr(config, key), value))
if (len(wrong_values) > 0):
errors = '\n'.join([f'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values])
raise ValueError(f'''The following keys were not properly set in the config:
{errors}''')
def run_common_tests(self):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init() |
_pipeline_test
class PipelinePadTest(unittest.TestCase):
_torch
def test_pipeline_padding(self):
import torch
items = [{'label': 'label1', 'input_ids': torch.LongTensor([[1, 23, 24, 2]]), 'attention_mask': torch.LongTensor([[0, 1, 1, 0]])}, {'label': 'label2', 'input_ids': torch.LongTensor([[1, 23, 24, 43, 44, 2]]), 'attention_mask': torch.LongTensor([[0, 1, 1, 1, 1, 0]])}]
self.assertEqual(_pad(items, 'label', 0, 'right'), ['label1', 'label2'])
self.assertTrue(torch.allclose(_pad(items, 'input_ids', 10, 'right'), torch.LongTensor([[1, 23, 24, 2, 10, 10], [1, 23, 24, 43, 44, 2]])))
self.assertTrue(torch.allclose(_pad(items, 'input_ids', 10, 'left'), torch.LongTensor([[10, 10, 1, 23, 24, 2], [1, 23, 24, 43, 44, 2]])))
self.assertTrue(torch.allclose(_pad(items, 'attention_mask', 0, 'right'), torch.LongTensor([[0, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 0]])))
_torch
def test_pipeline_image_padding(self):
import torch
items = [{'label': 'label1', 'pixel_values': torch.zeros((1, 3, 10, 10))}, {'label': 'label2', 'pixel_values': torch.zeros((1, 3, 10, 10))}]
self.assertEqual(_pad(items, 'label', 0, 'right'), ['label1', 'label2'])
self.assertTrue(torch.allclose(_pad(items, 'pixel_values', 10, 'right'), torch.zeros((2, 3, 10, 10))))
_torch
def test_pipeline_offset_mapping(self):
import torch
items = [{'offset_mappings': torch.zeros([1, 11, 2], dtype=torch.long)}, {'offset_mappings': torch.zeros([1, 4, 2], dtype=torch.long)}]
self.assertTrue(torch.allclose(_pad(items, 'offset_mappings', 0, 'right'), torch.zeros((2, 11, 2), dtype=torch.long))) |
def create_var(tensor, requires_grad=False):
return Variable(tensor, requires_grad=requires_grad) |
class UNet(nn.Module):
def __init__(self, input_dim=1, num_classes=2):
super(UNet, self).__init__()
self.num_classes = num_classes
self.dec1 = UNetDec(input_dim, 64)
self.dec2 = UNetDec(64, 128)
self.dec3 = UNetDec(128, 256)
self.dec4 = UNetDec(256, 512, dropout=True)
self.center = nn.Sequential(nn.Conv2d(512, 1024, 3), nn.ReLU(inplace=True), nn.Conv2d(1024, 1024, 3), nn.ReLU(inplace=True), nn.Dropout(), nn.ConvTranspose2d(1024, 512, 2, stride=2), nn.ReLU(inplace=True))
self.enc4 = UNetEnc(1024, 512, 256)
self.enc3 = UNetEnc(512, 256, 128)
self.enc2 = UNetEnc(256, 128, 64)
self.enc1 = nn.Sequential(nn.Conv2d(128, 64, 3), nn.ReLU(inplace=True), nn.Conv2d(64, 64, 3), nn.ReLU(inplace=True))
self.final = nn.Conv2d(64, num_classes, 1)
def forward(self, x):
dec1 = self.dec1(x)
dec2 = self.dec2(dec1)
dec3 = self.dec3(dec2)
dec4 = self.dec4(dec3)
center = self.center(dec4)
enc4 = self.enc4(torch.cat([center, F.upsample_bilinear(dec4, center.size()[2:])], 1))
enc3 = self.enc3(torch.cat([enc4, F.upsample_bilinear(dec3, enc4.size()[2:])], 1))
enc2 = self.enc2(torch.cat([enc3, F.upsample_bilinear(dec2, enc3.size()[2:])], 1))
enc1 = self.enc1(torch.cat([enc2, F.upsample_bilinear(dec1, enc2.size()[2:])], 1))
return F.upsample_bilinear(self.final(enc1), x.size()[2:]) |
def gen_final(In, Out):
(yield nn.Conv2d(In, Out, 3, padding=1))
(yield nn.ReLU(inplace=True)) |
def track_progress(func, tasks, bar_width=50, **kwargs):
if isinstance(tasks, tuple):
assert (len(tasks) == 2)
assert isinstance(tasks[0], collections_abc.Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, collections_abc.Iterable):
task_num = len(tasks)
else:
raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple')
prog_bar = ProgressBar(task_num, bar_width)
results = []
for task in tasks:
results.append(func(task, **kwargs))
prog_bar.update()
sys.stdout.write('\n')
return results |
class MultiLoss(nn.Module):
def __init__(self, *args, dbg=()):
nn.Module.__init__(self)
assert ((len(args) % 2) == 0), 'args must be a list of (float, loss)'
self.weights = []
self.losses = nn.ModuleList()
for i in range((len(args) // 2)):
weight = float(args[((2 * i) + 0)])
loss = args[((2 * i) + 1)]
assert isinstance(loss, nn.Module), ('%s is not a loss!' % loss)
self.weights.append(weight)
self.losses.append(loss)
def forward(self, select=None, **variables):
assert ((not select) or all(((1 <= n <= len(self.losses)) for n in select)))
d = dict()
cum_loss = 0
for (num, (weight, loss_func)) in enumerate(zip(self.weights, self.losses), 1):
if ((select is not None) and (num not in select)):
continue
l = loss_func(**{k: v for (k, v) in variables.items()})
if isinstance(l, tuple):
assert ((len(l) == 2) and isinstance(l[1], dict))
else:
l = (l, {loss_func.name: l})
cum_loss = (cum_loss + (weight * l[0]))
for (key, val) in l[1].items():
d[('loss_' + key)] = float(val)
d['loss'] = float(cum_loss)
return (cum_loss, d) |
def _compute_num_images_per_worker(cfg: CfgNode) -> int:
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert ((images_per_batch % num_workers) == 0), 'SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).'.format(images_per_batch, num_workers)
assert (images_per_batch >= num_workers), 'SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).'.format(images_per_batch, num_workers)
images_per_worker = (images_per_batch // num_workers)
return images_per_worker |
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
"\n If you'd like to do anything fancier than the standard training logic,\n consider writing your own training loop (see plain_train_net.py) or\n subclassing the trainer.\n "
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks([hooks.EvalHook(0, (lambda : trainer.test_with_TTA(cfg, trainer.model)))])
return trainer.train() |
def compute_flat_grad(output, inputs, filter_input_ids=set(), retain_graph=False, create_graph=False):
if create_graph:
retain_graph = True
inputs = list(inputs)
params = []
for (i, param) in enumerate(inputs):
if (i not in filter_input_ids):
params.append(param)
grads = torch.autograd.grad(output, params, retain_graph=retain_graph, create_graph=create_graph)
j = 0
out_grads = []
for (i, param) in enumerate(inputs):
if (i in filter_input_ids):
out_grads.append(zeros(param.view((- 1)).shape))
else:
out_grads.append(grads[j].view((- 1)))
j += 1
grads = torch.cat(out_grads)
for param in params:
param.grad = None
return grads |
class T5LMTokenizerWrapper(TokenizerWrapper):
def __init__(self, max_seq_length: int, tokenizer: PreTrainedTokenizer, truncate_method: Optional[str]='tail', decoder_max_length: Optional[int]=128, decode_from_pad: Optional[bool]=True, predict_eos_token: Optional[bool]=False, **kwargs):
super().__init__(max_seq_length=max_seq_length, tokenizer=tokenizer, truncate_method=truncate_method)
self.decoder_max_length = decoder_max_length
self.decode_from_pad = decode_from_pad
self.predict_eos = predict_eos_token
if self.create_token_type_ids:
logger.warning('token_type_ids is not valid in T5. will be depreciated.')
def mask_token(self, i):
return self.tokenizer.additional_special_tokens[i]
def mask_token_ids(self, i):
return self.tokenizer.additional_special_tokens_ids[i]
def num_special_tokens_to_add(self):
if (not hasattr(self, '_num_specials')):
self._num_specials = self.tokenizer.num_special_tokens_to_add()
return self._num_specials
def tokenize_one_example(self, wrapped_example, teacher_forcing):
(wrapped_example, others) = wrapped_example
if teacher_forcing:
tgt_text = others['tgt_text']
if isinstance(tgt_text, str):
tgt_text = [tgt_text]
encoder_inputs = defaultdict(list)
num_mask_token_used = 0
decoder_input_ids = []
loss_ids = []
for (piece_id, piece) in enumerate(wrapped_example):
if (piece['text'] == self.template_mask_token):
if teacher_forcing:
decoder_input_ids.append(self.mask_token_ids(num_mask_token_used))
loss_ids.append(0)
encode_text = []
tgt_text_ids = self.tokenizer.encode((' ' + tgt_text[num_mask_token_used]), add_special_tokens=False)
decoder_input_ids.extend(tgt_text_ids)
loss_ids.extend(([1] * len(tgt_text_ids)))
else:
decoder_input_ids.append(self.mask_token_ids(num_mask_token_used))
encode_text = []
loss_ids.append(1)
break
else:
if (piece['text'] in self.special_tokens_maps.keys()):
to_replace = self.special_tokens_maps[piece['text']]
if (to_replace is not None):
piece['text'] = to_replace
else:
raise KeyError("This tokenizer doesn't specify {} token.".format(piece['text']))
if (('soft_token_ids' in piece) and (piece['soft_token_ids'] != 0)):
encode_text = [0]
else:
encode_text = self.tokenizer.encode(piece['text'], add_special_tokens=False)
encoding_length = len(encode_text)
encoder_inputs['input_ids'].append(encode_text)
for key in piece:
if (key not in ['text', 'loss_ids']):
encoder_inputs[key].append(([piece[key]] * encoding_length))
decoder_inputs = {'decoder_input_ids': decoder_input_ids, 'loss_ids': loss_ids}
decoder_inputs = self.truncate_decoder_inputs(decoder_inputs)
encoder_inputs = self.truncate(encoder_inputs=encoder_inputs)
encoder_inputs.pop('shortenable_ids')
encoder_inputs = self.concate_parts(input_dict=encoder_inputs)
encoder_inputs = self.add_special_tokens(encoder_inputs=encoder_inputs)
encoder_inputs['attention_mask'] = ([1] * len(encoder_inputs['input_ids']))
encoder_inputs = self.padding(input_dict=encoder_inputs, max_len=self.max_seq_length, pad_id_for_inputs=self.tokenizer.pad_token_id)
all_input_ids = {**encoder_inputs, **decoder_inputs}
return all_input_ids
def truncate_decoder_inputs(self, inputs):
if self.decode_from_pad:
inputs['decoder_input_ids'].insert(0, self.tokenizer.pad_token_id)
inputs['loss_ids'].insert(0, 0)
for key in inputs:
inputs[key] = inputs[key][:(self.decoder_max_length - 1)]
inputs['decoder_input_ids'].append(self.tokenizer.eos_token_id)
if self.predict_eos:
inputs['loss_ids'].append(1)
else:
inputs['loss_ids'].append(0)
inputs = self.padding(inputs, max_len=self.decoder_max_length)
return inputs |
class ShowProcess():
i = 1
max_steps = 0
max_arrow = 50
def __init__(self, max_steps):
self.max_steps = max_steps
self.i = 1
def show_process(self, i=None):
if (i is not None):
self.i = i
num_arrow = int(((self.i * self.max_arrow) / self.max_steps))
num_line = (self.max_arrow - num_arrow)
percent = ((self.i * 100.0) / self.max_steps)
process_bar = (((((('[' + ('>' * num_arrow)) + ('-' * num_line)) + ']') + ('%.2f' % percent)) + '%') + '\r')
sys.stdout.write(process_bar)
sys.stdout.flush()
self.i += 1
def close(self, words='done'):
print('')
print(words)
self.i = 1 |
class Conv2dSame(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super().__init__()
padding = self.conv_same_pad(kernel_size, stride)
if (type(padding) is not tuple):
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)
else:
self.conv = nn.Sequential(nn.ConstantPad2d((padding * 2), 0), nn.Conv2d(in_channels, out_channels, kernel_size, stride, 0))
def conv_same_pad(self, ksize, stride):
if (((ksize - stride) % 2) == 0):
return ((ksize - stride) // 2)
else:
left = ((ksize - stride) // 2)
right = (left + 1)
return (left, right)
def forward(self, x):
return self.conv(x) |
def build_densepose_data_filter(cfg: CfgNode):
dp_filter = DensePoseDataFilter(cfg)
return dp_filter |
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('--token_vocab', type=str)
parser.add_argument('--concept_vocab', type=str)
parser.add_argument('--predictable_token_vocab', type=str)
parser.add_argument('--token_char_vocab', type=str)
parser.add_argument('--concept_char_vocab', type=str)
parser.add_argument('--relation_vocab', type=str)
parser.add_argument('--pretrained_file', type=str, default=None)
parser.add_argument('--token_char_dim', type=int)
parser.add_argument('--token_dim', type=int)
parser.add_argument('--concept_char_dim', type=int)
parser.add_argument('--concept_dim', type=int)
parser.add_argument('--cnn_filters', type=int, nargs='+')
parser.add_argument('--char2word_dim', type=int)
parser.add_argument('--char2concept_dim', type=int)
parser.add_argument('--rel_dim', type=int)
parser.add_argument('--rnn_hidden_size', type=int)
parser.add_argument('--rnn_num_layers', type=int)
parser.add_argument('--embed_dim', type=int)
parser.add_argument('--ff_embed_dim', type=int)
parser.add_argument('--num_heads', type=int)
parser.add_argument('--snt_layers', type=int)
parser.add_argument('--graph_layers', type=int)
parser.add_argument('--inference_layers', type=int)
parser.add_argument('--dropout', type=float)
parser.add_argument('--unk_rate', type=float)
parser.add_argument('--total_train_steps', type=int)
parser.add_argument('--train_data', type=str)
parser.add_argument('--dev_data', type=str)
parser.add_argument('--train_batch_size', type=int)
parser.add_argument('--dev_batch_size', type=int)
parser.add_argument('--lr', type=float)
parser.add_argument('--warmup_steps', type=int)
parser.add_argument('--ckpt', type=str)
parser.add_argument('--print_every', type=int)
parser.add_argument('--eval_every', type=int)
parser.add_argument('--world_size', type=int)
parser.add_argument('--gpus', type=int)
parser.add_argument('--MASTER_ADDR', type=str)
parser.add_argument('--MASTER_PORT', type=str)
parser.add_argument('--start_rank', type=int)
return parser.parse_args() |
def get_data_loader(args):
train_dataset = PPIDataset(mode='train')
valid_dataset = PPIDataset(mode='valid')
test_dataset = PPIDataset(mode='test')
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, collate_fn=collate, num_workers=4, shuffle=True)
fixed_train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, collate_fn=collate, num_workers=4)
valid_dataloader = DataLoader(valid_dataset, batch_size=args.batch_size, collate_fn=collate, num_workers=2)
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=collate, num_workers=2)
n_classes = train_dataset.labels.shape[1]
num_feats = train_dataset.features.shape[1]
g = train_dataset.graph
data_info = {}
data_info['n_classes'] = n_classes
data_info['num_feats'] = num_feats
data_info['g'] = g
return ((train_dataloader, valid_dataloader, test_dataloader, fixed_train_dataloader), data_info) |
def dump_tabular(*args, **kwargs):
if (not _disabled):
wh = kwargs.pop('write_header', None)
if (len(_tabular) > 0):
if _log_tabular_only:
table_printer.print_tabular(_tabular)
else:
for line in tabulate(_tabular).split('\n'):
log(line, *args, **kwargs)
if (not _tabular_disabled):
tabular_dict = dict(_tabular)
for (tabular_file_name, tabular_fd) in list(_tabular_fds.items()):
keys = tabular_dict.keys()
if (tabular_file_name in _tabular_headers):
existing_keys = _tabular_headers[tabular_file_name]
if (not set(existing_keys).issuperset(set(keys))):
joint_keys = set(keys).union(set(existing_keys))
tabular_fd.flush()
read_fd = open(tabular_file_name, 'r')
reader = csv.DictReader(read_fd)
rows = list(reader)
read_fd.close()
tabular_fd.close()
tabular_fd = _tabular_fds[tabular_file_name] = open(tabular_file_name, 'w')
new_writer = csv.DictWriter(tabular_fd, fieldnames=list(joint_keys))
new_writer.writeheader()
for row in rows:
for key in joint_keys:
if (key not in row):
row[key] = np.nan
new_writer.writerows(rows)
_tabular_headers[tabular_file_name] = list(joint_keys)
else:
_tabular_headers[tabular_file_name] = keys
writer = csv.DictWriter(tabular_fd, fieldnames=_tabular_headers[tabular_file_name])
if (wh or ((wh is None) and (tabular_file_name not in _tabular_header_written))):
writer.writeheader()
_tabular_header_written.add(tabular_file_name)
_tabular_headers[tabular_file_name] = keys
for key in _tabular_headers[tabular_file_name]:
if (key not in tabular_dict):
tabular_dict[key] = np.nan
writer.writerow(tabular_dict)
tabular_fd.flush()
del _tabular[:] |
def _draw_constraint(surface, constraint):
if (isinstance(constraint, pymunk.GrooveJoint) and hasattr(constraint, 'groove_a')):
pv1 = (constraint.a.position + constraint.groove_a)
pv2 = (constraint.a.position + constraint.groove_b)
p1 = to_pygame(pv1, surface)
p2 = to_pygame(pv2, surface)
pygame.draw.aalines(surface, pygame.color.THECOLORS['darkgray'], False, [p1, p2])
elif isinstance(constraint, pymunk.PinJoint):
pv1 = (constraint.a.position + constraint.anchr1.rotated(constraint.a.angle))
pv2 = (constraint.b.position + constraint.anchr2.rotated(constraint.b.angle))
p1 = to_pygame(pv1, surface)
p2 = to_pygame(pv2, surface)
pygame.draw.aalines(surface, pygame.color.THECOLORS['darkgray'], False, [p1, p2])
elif isinstance(constraint, pymunk.GearJoint):
pv1 = constraint.a.position
pv2 = constraint.a.position
p1 = to_pygame(pv1, surface)
p2 = to_pygame(pv2, surface)
pygame.draw.circle(surface, pygame.color.THECOLORS['darkgray'], p1, 3)
pygame.draw.circle(surface, pygame.color.THECOLORS['darkgray'], p2, 3)
elif hasattr(constraint, 'anchr1'):
pv1 = (constraint.a.position + constraint.anchr1.rotated(constraint.a.angle))
pv2 = (constraint.b.position + constraint.anchr2.rotated(constraint.b.angle))
p1 = to_pygame(pv1, surface)
p2 = to_pygame(pv2, surface)
pygame.draw.aalines(surface, pygame.color.THECOLORS['darkgray'], False, [p1, p2])
else:
pv1 = constraint.a.position
pv2 = constraint.b.position
p1 = to_pygame(pv1, surface)
p2 = to_pygame(pv2, surface)
pygame.draw.aalines(surface, pygame.color.THECOLORS['darkgray'], False, [p1, p2]) |
class CompressedWriteObjective(CompressedObjective):
__slots__ = ('chi', 'compress_late', 'secondary_weight')
def __init__(self, chi='auto', compress_late=False, secondary_weight=0.001):
self.secondary_weight = secondary_weight
super().__init__(chi=chi, compress_late=compress_late)
def get_compressed_stats_tracker(self, hg):
return CompressedStatsTrackerWrite(hg, chi=self.chi, secondary_weight=self.secondary_weight)
def __call__(self, trial):
stats = self.compute_compressed_stats(trial)
cr = ((math.log2(stats.write) + (self.secondary_weight * math.log2(stats.flops))) + (self.secondary_weight * math.log2(stats.peak_size)))
trial['size'] = stats.write
trial['flops'] = stats.flops
trial['write'] = stats.write
return cr |
def prepare_rnn(config):
data_config = config['data']
train_config = config['train']
train_data = load_csv(data_config['train_path'])
vocab = CharVocab.from_data(train_data)
if ('load' in config['model']):
print('LOADING')
model = VAE_RNN.load(config['model']['load'])
vocab = model.vocab
else:
model = VAE_RNN(vocab=vocab, **config['model'])
collate_pad = partial(collate, pad=vocab.pad)
train_dataset = StringDataset(vocab, train_data)
train_loader = DataLoader(train_dataset, collate_fn=collate_pad, batch_size=train_config['batch_size'], shuffle=True)
if ('test_path' in data_config):
test_data = load_csv(data_config['test_path'])
test_dataset = StringDataset(vocab, test_data)
test_loader = DataLoader(test_dataset, collate_fn=collate_pad, batch_size=train_config['batch_size'])
else:
test_loader = None
return (train_loader, test_loader, model) |
class BinPack(Environment[State]):
def __init__(self, generator: Optional[Generator]=None, obs_num_ems: int=40, reward_fn: Optional[RewardFn]=None, normalize_dimensions: bool=True, debug: bool=False, viewer: Optional[Viewer[State]]=None):
self.generator = (generator or RandomGenerator(max_num_items=20, max_num_ems=40, split_num_same_items=2))
self.obs_num_ems = obs_num_ems
self.reward_fn = (reward_fn or DenseReward())
self.normalize_dimensions = normalize_dimensions
self._viewer = (viewer or BinPackViewer('BinPack', render_mode='human'))
self.debug = debug
def __repr__(self) -> str:
return '\n'.join(['BinPack environment:', f' - generator: {self.generator}', f' - max_num_items: {self.generator.max_num_items}', f' - obs_num_ems: {self.obs_num_ems}', f' - max_num_ems: {self.generator.max_num_ems}', f' - reward_fn: {self.reward_fn}', f' - normalize_dimensions: {self.normalize_dimensions}', f' - debug: {self.debug}'])
def observation_spec(self) -> specs.Spec[Observation]:
obs_num_ems = self.obs_num_ems
max_num_items = self.generator.max_num_items
max_dim = max(self.generator.container_dims)
if self.normalize_dimensions:
ems_dict = {f'{coord_name}': specs.BoundedArray((obs_num_ems,), float, 0.0, 1.0, coord_name) for coord_name in ['x1', 'x2', 'y1', 'y2', 'z1', 'z2']}
else:
ems_dict = {f'{coord_name}': specs.BoundedArray((obs_num_ems,), jnp.int32, 0, max_dim, coord_name) for coord_name in ['x1', 'x2', 'y1', 'y2', 'z1', 'z2']}
ems = specs.Spec(EMS, 'EMSSpec', **ems_dict)
ems_mask = specs.BoundedArray((obs_num_ems,), bool, False, True, 'ems_mask')
if self.normalize_dimensions:
items_dict = {f'{axis}': specs.BoundedArray((max_num_items,), float, 0.0, 1.0, axis) for axis in ['x_len', 'y_len', 'z_len']}
else:
items_dict = {f'{axis}': specs.BoundedArray((max_num_items,), jnp.int32, 0, max_dim, axis) for axis in ['x_len', 'y_len', 'z_len']}
items = specs.Spec(Item, 'ItemsSpec', **items_dict)
items_mask = specs.BoundedArray((max_num_items,), bool, False, True, 'items_mask')
items_placed = specs.BoundedArray((max_num_items,), bool, False, True, 'items_placed')
action_mask = specs.BoundedArray((obs_num_ems, max_num_items), bool, False, True, 'action_mask')
return specs.Spec(Observation, 'ObservationSpec', ems=ems, ems_mask=ems_mask, items=items, items_mask=items_mask, items_placed=items_placed, action_mask=action_mask)
def action_spec(self) -> specs.MultiDiscreteArray:
num_values = jnp.array([self.obs_num_ems, self.generator.max_num_items], jnp.int32)
return specs.MultiDiscreteArray(num_values=num_values, name='action')
def reset(self, key: chex.PRNGKey) -> Tuple[(State, TimeStep[Observation])]:
state = self.generator(key)
(state, observation, extras) = self._make_observation_and_extras(state)
extras.update(invalid_action=jnp.array(False))
if self.debug:
extras.update(invalid_ems_from_env=jnp.array(False))
timestep = restart(observation, extras)
return (state, timestep)
def step(self, state: State, action: chex.Array) -> Tuple[(State, TimeStep[Observation])]:
action_is_valid = state.action_mask[tuple(action)]
(obs_ems_id, item_id) = action
ems_id = state.sorted_ems_indexes[obs_ems_id]
next_state = jax.lax.cond(action_is_valid, (lambda s: self._pack_item(s, ems_id, item_id)), (lambda s: s), state)
(next_state, observation, extras) = self._make_observation_and_extras(next_state)
done = ((~ jnp.any(next_state.action_mask)) | (~ action_is_valid))
reward = self.reward_fn(state, action, next_state, action_is_valid, done)
extras.update(invalid_action=(~ action_is_valid))
if self.debug:
ems_are_all_valid = self._ems_are_all_valid(next_state)
extras.update(invalid_ems_from_env=(~ ems_are_all_valid))
timestep = jax.lax.cond(done, (lambda : termination(reward=reward, observation=observation, extras=extras)), (lambda : transition(reward=reward, observation=observation, extras=extras)))
return (next_state, timestep)
def render(self, state: State) -> Optional[NDArray]:
return self._viewer.render(state)
def animate(self, states: Sequence[State], interval: int=200, save_path: Optional[str]=None) -> matplotlib.animation.FuncAnimation:
return self._viewer.animate(states, interval, save_path)
def close(self) -> None:
self._viewer.close()
def _make_observation_and_extras(self, state: State) -> Tuple[(State, Observation, Dict)]:
(obs_ems, obs_ems_mask, sorted_ems_indexes) = self._get_set_of_largest_ems(state.ems, state.ems_mask)
state.sorted_ems_indexes = sorted_ems_indexes
items = state.items
action_mask = self._get_action_mask(obs_ems, obs_ems_mask, items, state.items_mask, state.items_placed)
state.action_mask = action_mask
if self.normalize_dimensions:
(obs_ems, items) = self._normalize_ems_and_items(state, obs_ems, items)
observation = Observation(ems=obs_ems, ems_mask=obs_ems_mask, items=items, items_mask=state.items_mask, items_placed=state.items_placed, action_mask=action_mask)
extras = self._get_extras(state)
return (state, observation, extras)
def _get_extras(self, state: State) -> Dict:
items_volume = jnp.sum((item_volume(state.items) * state.items_placed))
volume_utilization = (items_volume / state.container.volume())
packed_items = jnp.sum(state.items_placed)
ratio_packed_items = (packed_items / jnp.sum(state.items_mask))
active_ems = jnp.sum(state.ems_mask)
extras = {'volume_utilization': volume_utilization, 'packed_items': packed_items, 'ratio_packed_items': ratio_packed_items, 'active_ems': active_ems}
return extras
def _normalize_ems_and_items(self, state: State, obs_ems: EMS, items: Item) -> Tuple[(EMS, Item)]:
(x_len, y_len, z_len) = container_item = item_from_space(state.container)
norm_space = Space(x1=x_len, x2=x_len, y1=y_len, y2=y_len, z1=z_len, z2=z_len)
obs_ems = jax.tree_util.tree_map((lambda ems, container: (ems / container)), obs_ems, norm_space)
items = jax.tree_util.tree_map((lambda item, container: (item / container)), items, container_item)
return (obs_ems, items)
def _ems_are_all_valid(self, state: State) -> chex.Array:
item_spaces = space_from_item_and_location(state.items, state.items_location)
ems_intersect_items = jax.vmap(Space.intersect, in_axes=(0, None))(state.ems, item_spaces)
ems_intersect_items &= jnp.outer(state.ems_mask, state.items_placed)
ems_intersection_with_items = jnp.any(ems_intersect_items)
ems_outside_container = jnp.any((state.ems_mask & (~ state.ems.is_included(state.container))))
return ((~ ems_intersection_with_items) & (~ ems_outside_container))
def _get_set_of_largest_ems(self, ems: EMS, ems_mask: chex.Array) -> Tuple[(EMS, chex.Array, chex.Array)]:
ems_volumes = (ems.volume() * ems_mask)
sorted_ems_indexes = jnp.argsort((- ems_volumes))
obs_ems_indexes = sorted_ems_indexes[:self.obs_num_ems]
obs_ems = jax.tree_util.tree_map((lambda x: x[obs_ems_indexes]), ems)
obs_ems_mask = ems_mask[obs_ems_indexes]
return (obs_ems, obs_ems_mask, sorted_ems_indexes)
def _get_action_mask(self, obs_ems: EMS, obs_ems_mask: chex.Array, items: Item, items_mask: chex.Array, items_placed: chex.Array) -> chex.Array:
def is_action_allowed(ems: EMS, ems_mask: chex.Array, item: Item, item_mask: chex.Array, item_placed: chex.Array) -> chex.Array:
item_fits_in_ems = item_fits_in_item(item, item_from_space(ems))
return ((((~ item_placed) & item_mask) & ems_mask) & item_fits_in_ems)
action_mask = jax.vmap(jax.vmap(is_action_allowed, in_axes=(None, None, 0, 0, 0)), in_axes=(0, 0, None, None, None))(obs_ems, obs_ems_mask, items, items_mask, items_placed)
return action_mask
def _pack_item(self, state: State, ems_id: int, item_id: chex.Numeric) -> State:
ems = tree_slice(state.ems, ems_id)
state.items_location = tree_add_element(state.items_location, item_id, Location(ems.x1, ems.y1, ems.z1))
state.items_placed = state.items_placed.at[item_id].set(True)
state = self._update_ems(state, item_id)
return state
def _update_ems(self, state: State, item_id: chex.Numeric) -> State:
item_space = space_from_item_and_location(tree_slice(state.items, item_id), tree_slice(state.items_location, item_id))
ems_mask_after_intersect = ((~ item_space.intersect(state.ems)) & state.ems_mask)
(intersections_ems_dict, intersections_mask_dict) = self._get_intersections_dict(state, item_space, ems_mask_after_intersect)
new_ems = state.ems
new_ems_mask = ems_mask_after_intersect
for (intersection_ems, intersection_mask) in zip(intersections_ems_dict.values(), intersections_mask_dict.values()):
(new_ems, new_ems_mask) = self._add_ems(intersection_ems, intersection_mask, new_ems, new_ems_mask)
state.ems = new_ems
state.ems_mask = new_ems_mask
return state
def _get_intersections_dict(self, state: State, item_space: Space, ems_mask_after_intersect: chex.Array) -> Tuple[(Dict[(str, Space)], Dict[(str, chex.Array)])]:
intersections_ems_dict: Dict[(str, Space)] = {f'{axis}_{direction}': item_space.hyperplane(axis, direction).intersection(state.ems) for (axis, direction) in itertools.product(['x', 'y', 'z'], ['lower', 'upper'])}
intersections_mask_dict: Dict[(str, chex.Array)] = jax.tree_util.tree_map((lambda intersections_ems: ((state.ems_mask & (~ intersections_ems.is_empty())) & (~ (intersections_ems.is_included(state.ems) & ems_mask_after_intersect)))), intersections_ems_dict, is_leaf=(lambda x: isinstance(x, Space)))
num_ems = len(state.ems_mask)
for ((direction, direction_intersections_ems), (_, direction_intersections_mask)) in zip(intersections_ems_dict.items(), intersections_mask_dict.items()):
for ((alt_direction, alt_direction_intersections_ems), (_, alt_direction_intersections_mask)) in zip(intersections_ems_dict.items(), intersections_mask_dict.items()):
directions_included_in_alt_directions = jax.vmap(jax.vmap(Space.is_included, in_axes=(None, 0)), in_axes=(0, None))(direction_intersections_ems, alt_direction_intersections_ems)
if (direction == alt_direction):
directions_included_in_alt_directions = directions_included_in_alt_directions.at[(jnp.arange(num_ems), jnp.arange(num_ems))].set(False)
directions_included_in_alt_directions = (directions_included_in_alt_directions & jnp.outer(direction_intersections_mask, alt_direction_intersections_mask))
alt_directions_included_in_directions = jax.vmap(jax.vmap(Space.is_included, in_axes=(None, 0)), in_axes=(0, None))(alt_direction_intersections_ems, direction_intersections_ems)
if (direction == alt_direction):
alt_directions_included_in_directions = alt_directions_included_in_directions.at[(jnp.arange(num_ems), jnp.arange(num_ems))].set(False)
alt_directions_included_in_directions = (alt_directions_included_in_directions & jnp.outer(alt_direction_intersections_mask, direction_intersections_mask))
to_remove = jnp.any((directions_included_in_alt_directions & (~ alt_directions_included_in_directions.T)), axis=(- 1))
intersections_mask_dict[direction] &= (~ to_remove)
return (intersections_ems_dict, intersections_mask_dict)
def _add_ems(self, intersection_ems: EMS, intersection_mask: chex.Array, ems: EMS, ems_mask: chex.Array) -> Tuple[(EMS, chex.Array)]:
def add_one_ems(carry: Tuple[(EMS, chex.Array)], x: Tuple[(EMS, chex.Array)]) -> Tuple[(Tuple[(EMS, chex.Array)], None)]:
(intersection_ems, intersection_mask) = x
def add_the_ems(ems: EMS, ems_mask: chex.Array) -> Tuple[(EMS, chex.Array)]:
ems_index = jnp.argmin(ems_mask)
ems = tree_add_element(ems, ems_index, intersection_ems)
ems_mask = ems_mask.at[ems_index].set(True)
return (ems, ems_mask)
def inclusion_check(ems: EMS, ems_mask: chex.Array) -> chex.Array:
is_included = (intersection_ems.is_included(ems) & ems_mask)
return (~ is_included.any())
(ems, ems_mask) = jax.lax.cond((intersection_mask & inclusion_check(*carry)), add_the_ems, (lambda *_: _), *carry)
return ((ems, ems_mask), None)
((ems, ems_mask), _) = jax.lax.scan(add_one_ems, (ems, ems_mask), (intersection_ems, intersection_mask))
return (ems, ems_mask) |
class GCNSyntheticPerturb(nn.Module):
def __init__(self, nfeat, nhid, nout, nclass, adj, dropout, beta, edge_additions=False):
super(GCNSyntheticPerturb, self).__init__()
self.adj = adj
self.nclass = nclass
self.beta = beta
self.num_nodes = self.adj.shape[0]
self.edge_additions = edge_additions
self.P_vec_size = (int((((self.num_nodes * self.num_nodes) - self.num_nodes) / 2)) + self.num_nodes)
if self.edge_additions:
self.P_vec = Parameter(torch.FloatTensor(torch.zeros(self.P_vec_size)))
else:
self.P_vec = Parameter(torch.FloatTensor(torch.ones(self.P_vec_size)))
self.reset_parameters()
self.gc1 = GraphConvolutionPerturb(nfeat, nhid)
self.gc2 = GraphConvolutionPerturb(nhid, nhid)
self.gc3 = GraphConvolution(nhid, nout)
self.lin = nn.Linear(((nhid + nhid) + nout), nclass)
self.dropout = dropout
def reset_parameters(self, eps=(10 ** (- 4))):
with torch.no_grad():
if self.edge_additions:
adj_vec = create_vec_from_symm_matrix(self.adj, self.P_vec_size).numpy()
for i in range(len(adj_vec)):
if (i < 1):
adj_vec[i] = (adj_vec[i] - eps)
else:
adj_vec[i] = (adj_vec[i] + eps)
torch.add(self.P_vec, torch.FloatTensor(adj_vec))
else:
torch.sub(self.P_vec, eps)
def forward(self, x, sub_adj):
self.sub_adj = sub_adj
self.P_hat_symm = create_symm_matrix_from_vec(self.P_vec, self.num_nodes)
A_tilde = torch.FloatTensor(self.num_nodes, self.num_nodes)
A_tilde.requires_grad = True
if self.edge_additions:
A_tilde = (F.sigmoid(self.P_hat_symm) + torch.eye(self.num_nodes))
else:
A_tilde = ((F.sigmoid(self.P_hat_symm) * self.sub_adj) + torch.eye(self.num_nodes))
D_tilde = get_degree_matrix(A_tilde).detach()
D_tilde_exp = (D_tilde ** ((- 1) / 2))
D_tilde_exp[torch.isinf(D_tilde_exp)] = 0
norm_adj = torch.mm(torch.mm(D_tilde_exp, A_tilde), D_tilde_exp)
x1 = F.relu(self.gc1(x, norm_adj))
x1 = F.dropout(x1, self.dropout, training=self.training)
x2 = F.relu(self.gc2(x1, norm_adj))
x2 = F.dropout(x2, self.dropout, training=self.training)
x3 = self.gc3(x2, norm_adj)
x = self.lin(torch.cat((x1, x2, x3), dim=1))
return F.log_softmax(x, dim=1)
def forward_prediction(self, x):
self.P = (F.sigmoid(self.P_hat_symm) >= 0.5).float()
if self.edge_additions:
A_tilde = (self.P + torch.eye(self.num_nodes))
else:
A_tilde = ((self.P * self.adj) + torch.eye(self.num_nodes))
D_tilde = get_degree_matrix(A_tilde)
D_tilde_exp = (D_tilde ** ((- 1) / 2))
D_tilde_exp[torch.isinf(D_tilde_exp)] = 0
norm_adj = torch.mm(torch.mm(D_tilde_exp, A_tilde), D_tilde_exp)
x1 = F.relu(self.gc1(x, norm_adj))
x1 = F.dropout(x1, self.dropout, training=self.training)
x2 = F.relu(self.gc2(x1, norm_adj))
x2 = F.dropout(x2, self.dropout, training=self.training)
x3 = self.gc3(x2, norm_adj)
x = self.lin(torch.cat((x1, x2, x3), dim=1))
return (F.log_softmax(x, dim=1), self.P)
def loss(self, output, y_pred_orig, y_pred_new_actual):
pred_same = (y_pred_new_actual == y_pred_orig).float()
output = output.unsqueeze(0)
y_pred_orig = y_pred_orig.unsqueeze(0)
if self.edge_additions:
cf_adj = self.P
else:
cf_adj = (self.P * self.adj)
cf_adj.requires_grad = True
loss_pred = (- F.nll_loss(output, y_pred_orig))
loss_graph_dist = (sum(sum(abs((cf_adj - self.adj)))) / 2)
loss_total = ((pred_same * loss_pred) + (self.beta * loss_graph_dist))
return (loss_total, loss_pred, loss_graph_dist, cf_adj) |
def pascal_palette():
palette = {(0, 0, 0): 0, (128, 0, 0): 1, (0, 128, 0): 2, (128, 128, 0): 3, (0, 0, 128): 4, (128, 0, 128): 5, (0, 128, 128): 6, (128, 128, 128): 7, (64, 0, 0): 8, (192, 0, 0): 9, (64, 128, 0): 10, (192, 128, 0): 11, (64, 0, 128): 12, (192, 0, 128): 13, (64, 128, 128): 14, (192, 128, 128): 15, (0, 64, 0): 16, (128, 64, 0): 17, (0, 192, 0): 18, (128, 192, 0): 19, (0, 64, 128): 20, (224, 224, 192): 255}
return palette |
def first_el(x: Any) -> Any:
if is_listy(x):
return first_el(x[0])
if is_dict(x):
return first_el(x[list(x.keys())[0]])
return x |
def embed(*, header='', compile_flags=None, **kwargs):
config = kwargs.get('config')
if (config is None):
config = load_default_config()
config.InteractiveShellEmbed = config.TerminalInteractiveShell
kwargs['config'] = config
using = kwargs.get('using', 'sync')
if using:
kwargs['config'].update({'TerminalInteractiveShell': {'loop_runner': using, 'colors': 'NoColor', 'autoawait': (using != 'sync')}})
ps1 = None
ps2 = None
try:
ps1 = sys.ps1
ps2 = sys.ps2
except AttributeError:
pass
saved_shell_instance = InteractiveShell._instance
if (saved_shell_instance is not None):
cls = type(saved_shell_instance)
cls.clear_instance()
frame = sys._getframe(1)
shell = IPyflowInteractiveShellEmbed.instance(_init_location_id=('%s:%s' % (frame.f_code.co_filename, frame.f_lineno)), **kwargs)
shell(header=header, stack_depth=2, compile_flags=compile_flags, _call_location_id=('%s:%s' % (frame.f_code.co_filename, frame.f_lineno)))
IPyflowInteractiveShellEmbed.clear_instance()
if (saved_shell_instance is not None):
cls = type(saved_shell_instance)
cls.clear_instance()
for subclass in cls._walk_mro():
subclass._instance = saved_shell_instance
if (ps1 is not None):
sys.ps1 = ps1
sys.ps2 = ps2 |
class mit_b4(Segformer_b0_b1):
def __init__(self, **kwargs):
super(mit_b4, self).__init__(num_classes=21, patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], drop_rate=0.0, drop_path_rate=0.1, decoder_dim=768) |
class VoxelBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=0.001, momentum=0.01)
self.sparse_shape = (grid_size[::(- 1)] + [1, 0, 0])
self.conv_input = spconv.SparseSequential(spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'), norm_fn(16), nn.ReLU())
block = post_act_block
self.conv1 = spconv.SparseSequential(block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'))
self.conv2 = spconv.SparseSequential(block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'))
self.conv3 = spconv.SparseSequential(block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'))
self.conv4 = spconv.SparseSequential(block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'))
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad, bias=False, indice_key='spconv_down2'), norm_fn(128), nn.ReLU())
self.num_point_features = 128
self.backbone_channels = {'x_conv1': 16, 'x_conv2': 32, 'x_conv3': 64, 'x_conv4': 64}
def forward(self, batch_dict):
(voxel_features, voxel_coords) = (batch_dict['voxel_features'], batch_dict['voxel_coords'])
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(features=voxel_features, indices=voxel_coords.int(), spatial_shape=self.sparse_shape, batch_size=batch_size)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
out = self.conv_out(x_conv4)
batch_dict.update({'encoded_spconv_tensor': out, 'encoded_spconv_tensor_stride': 8})
batch_dict.update({'multi_scale_3d_features': {'x_conv1': x_conv1, 'x_conv2': x_conv2, 'x_conv3': x_conv3, 'x_conv4': x_conv4}})
batch_dict.update({'multi_scale_3d_strides': {'x_conv1': 1, 'x_conv2': 2, 'x_conv3': 4, 'x_conv4': 8}})
return batch_dict |
def particle_freq(s, tokens=None):
if (tokens == None):
tokens = word_tokenize(s)
pos = pos_tag(tokens)
particles = []
for [token, tag] in pos:
part = map_tag('en-ptb', 'universal', tag)
if (part == 'PRT'):
particles.append(token)
if (len(tokens) == 0):
return float(0)
else:
return (float(len(particles)) / float(len(tokens))) |
def _get_items(split, mode):
file = kr.get_split_file(split, mode)
if ((split == 'benchmark') and (mode == 'test')):
return []
side2cam = {'l': 'image_02', 'r': 'image_03'}
lines = [line.split() for line in io.readlines(file)]
items = [{'seq': line[0].split('/')[0], 'drive': line[0].split('/')[1], 'cam': side2cam[line[2]], 'stem': int(line[1])} for line in lines]
return items |
def _deep_fusion_fc_layers(num_layers, layer_sizes, input_rois, input_weights, fusion_method, l2_weight_decay, keep_prob, num_final_classes, box_rep, is_training):
if (l2_weight_decay > 0):
weights_regularizer = slim.l2_regularizer(l2_weight_decay)
else:
weights_regularizer = None
fusion_layer = avod_fc_layer_utils.feature_fusion(fusion_method, input_rois, input_weights)
fusion_layer = slim.flatten(fusion_layer, scope='flatten')
with slim.arg_scope([slim.fully_connected], weights_regularizer=weights_regularizer):
for layer_idx in range(num_layers):
fc_name_idx = (6 + layer_idx)
all_branches = []
for branch_idx in range(len(input_rois)):
fc_layer = slim.fully_connected(fusion_layer, layer_sizes[layer_idx], scope='br{}_fc{}'.format(branch_idx, fc_name_idx))
fc_drop = slim.dropout(fc_layer, keep_prob=keep_prob, is_training=is_training, scope='br{}_fc{}_drop'.format(branch_idx, fc_name_idx))
all_branches.append(fc_drop)
fusion_layer = avod_fc_layer_utils.feature_fusion(fusion_method, all_branches, input_weights)
output_layers = build_output_layers(fusion_layer, num_final_classes, box_rep)
return output_layers |
class gpipe_encoder(nn.Module):
def __init__(self, model_name, **kwargs):
super(gpipe_encoder, self).__init__()
if ('MeSHProbeNet' in model_name):
self.net = MeSHProbeNet_encoder(**kwargs)
elif ('XMLCNN' in model_name):
self.net = XMLCNN_encoder(**kwargs)
elif ('BertXML' in model_name):
self.net = BaseBertModel(**kwargs)
def forward(self, input_variables):
context_vectors = self.net(input_variables)
return context_vectors |
def gen_fixed_noise(noise, to_save):
gan_gen.eval()
autoencoder.eval()
fake_hidden = gan_gen(noise)
max_indices = autoencoder.generate(fake_hidden, args.maxlen, sample=args.sample)
with open(to_save, 'w') as f:
max_indices = max_indices.data.cpu().numpy()
for idx in max_indices:
words = [corpus.dictionary.idx2word[x] for x in idx]
truncated_sent = []
for w in words:
if (w != '<eos>'):
truncated_sent.append(w)
else:
break
chars = ' '.join(truncated_sent)
f.write((chars + '\n')) |
def run_seq(cfg, scene, seq):
print(' Start {}'.format(seq))
pcd_names = uio.list_files(osp.join(cfg.dataset_root, scene, seq), '*.ply', alphanum_sort=True)
if (cfg.threads > 1):
from joblib import Parallel, delayed
import multiprocessing
Parallel(n_jobs=cfg.threads)((delayed(compute_radius)(cfg, scene, seq, pcd_name) for pcd_name in pcd_names))
else:
for pcd_name in pcd_names:
compute_radius(cfg, scene, seq, pcd_name)
print(' Finished {}'.format(seq)) |
def save_model(model, optimizer, save_variable_list, args, before_finetune=False):
argparse_dict = vars(args)
with open(os.path.join(args.save_path, ('config.json' if (not before_finetune) else 'config_before.json')), 'w') as fjson:
json.dump(argparse_dict, fjson)
torch.save({**save_variable_list, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, os.path.join(args.save_path, ('checkpoint' if (not before_finetune) else 'checkpoint_before')))
entity_embedding = model.entity_embedding.detach().cpu().numpy()
np.save(os.path.join(args.save_path, ('entity_embedding' if (not before_finetune) else 'entity_embedding_before')), entity_embedding)
relation_embedding = model.relation_embedding.detach().cpu().numpy()
np.save(os.path.join(args.save_path, ('relation_embedding' if (not before_finetune) else 'relation_embedding_before')), relation_embedding) |
def all_reg_init(y, delta_logp):
batch_size = y.shape[0]
return (y, delta_logp, jnp.zeros((batch_size, 1)), jnp.zeros((batch_size, 1)), jnp.zeros((batch_size, 1))) |
.parametrize('method', ['items', 'iteritems', 'iterkeys', 'itervalues', 'keys', 'popitem', 'update', 'values', 'viewitems', 'viewkeys', 'viewvalues', '__iter__', '__len__'])
def test_not_implemented(method, fbdict):
with pytest.raises(NotImplementedError):
getattr(fbdict, method)() |
def check_risk_budget(riskbudgets, n):
if (riskbudgets is None):
return
if (np.isnan(riskbudgets).sum() > 0):
raise ValueError('Risk budget contains missing values')
if ((np.array(riskbudgets) < 0).sum() > 0):
raise ValueError('Risk budget contains negative values')
if (n != len(riskbudgets)):
raise ValueError('Risk budget size is not equal to the number of asset.')
if all(((v < RISK_BUDGET_TOL) for v in riskbudgets)):
raise ValueError('One of the budget is smaller than {}. If you want a risk budget of 0 please remove the asset.'.format(RISK_BUDGET_TOL)) |
class MultiHeadAttention():
def __init__(self, n_head, d_model, dropout, mode=0):
self.mode = mode
self.n_head = n_head
self.d_k = self.d_v = d_k = d_v = (d_model // n_head)
self.dropout = dropout
if (mode == 0):
self.qs_layer = Dense((n_head * d_k), use_bias=False)
self.ks_layer = Dense((n_head * d_k), use_bias=False)
self.vs_layer = Dense((n_head * d_v), use_bias=False)
elif (mode == 1):
self.qs_layers = []
self.ks_layers = []
self.vs_layers = []
for _ in range(n_head):
self.qs_layers.append(TimeDistributed(Dense(d_k, use_bias=False)))
self.ks_layers.append(TimeDistributed(Dense(d_k, use_bias=False)))
self.vs_layers.append(TimeDistributed(Dense(d_v, use_bias=False)))
self.attention = ScaledDotProductAttention()
self.w_o = TimeDistributed(Dense(d_model))
def __call__(self, q, k, v, mask=None):
(d_k, d_v) = (self.d_k, self.d_v)
n_head = self.n_head
if (self.mode == 0):
qs = self.qs_layer(q)
ks = self.ks_layer(k)
vs = self.vs_layer(v)
def reshape1(x):
s = tf.shape(x)
x = tf.reshape(x, [s[0], s[1], n_head, (s[2] // n_head)])
x = tf.transpose(x, [2, 0, 1, 3])
x = tf.reshape(x, [(- 1), s[1], (s[2] // n_head)])
return x
qs = Lambda(reshape1)(qs)
ks = Lambda(reshape1)(ks)
vs = Lambda(reshape1)(vs)
if (mask is not None):
mask = Lambda((lambda x: K.repeat_elements(x, n_head, 0)))(mask)
(head, attn) = self.attention(qs, ks, vs, mask=mask)
def reshape2(x):
s = tf.shape(x)
x = tf.reshape(x, [n_head, (- 1), s[1], s[2]])
x = tf.transpose(x, [1, 2, 0, 3])
x = tf.reshape(x, [(- 1), s[1], (n_head * d_v)])
return x
head = Lambda(reshape2)(head)
elif (self.mode == 1):
heads = []
attns = []
for i in range(n_head):
qs = self.qs_layers[i](q)
ks = self.ks_layers[i](k)
vs = self.vs_layers[i](v)
(head, attn) = self.attention(qs, ks, vs, mask)
heads.append(head)
attns.append(attn)
head = (Concatenate()(heads) if (n_head > 1) else heads[0])
attn = (Concatenate()(attns) if (n_head > 1) else attns[0])
outputs = self.w_o(head)
outputs = Dropout(self.dropout)(outputs)
return (outputs, attn) |
def generate_parameter_dict(seed, config, end_time, with_log):
if with_log:
log_orders = True
exchange_log_orders = True
book_freq = 0
else:
log_orders = None
exchange_log_orders = None
book_freq = None
parameters = {'old': {'sha': 'f1968a56fdb55fd7c70be1db052be07cb701a5fb', 'script': 'abides_cmd.py', 'config': config}, 'new': {'sha': 'f1968a56fdb55fd7c70be1db052be07cb701a5fb', 'script': 'abides_cmd.py', 'config': config}, 'config_new': config, 'end-time': end_time, 'with_log': with_log, 'shared': {'end-time': end_time, 'end_time': end_time, 'seed': seed, 'verbose': 0, 'log_orders': log_orders, 'exchange_log_orders': exchange_log_orders, 'book_freq': book_freq}}
parameters['command'] = generate_command(parameters)
return parameters |
def build_dataset(cfg):
avai_datasets = DATASET_REGISTRY.registered_names()
check_availability(cfg.DATASET.NAME, avai_datasets)
if cfg.VERBOSE:
print('Loading dataset: {}'.format(cfg.DATASET.NAME))
return DATASET_REGISTRY.get(cfg.DATASET.NAME)(cfg) |
class ColorInfo():
def __init__(self):
self.mBaseColor = Color()
self.mIsBaseColorMode = True
self.mColorTableSize = 0
self.mOpacity = 0
self.mRangeMin = 0
self.mRangeMax = 255
self.mGammaCorrection = 1
self.mColorTableList: List[Color] = []
def set_base_color(self, base_color: Color):
self.mBaseColor = base_color
self.mIsBaseColorMode = True
def set_color_table(self, color_list: List[Color]):
self.mIsBaseColorMode = False
self.mColorTableSize = len(color_list)
self.mColorTableList = color_list
def get_c_color_info(self):
c_color_info = bpConverterTypesC_ColorInfo()
c_color_info.mIsBaseColorMode = self.mIsBaseColorMode
c_color_info.mBaseColor = self.mBaseColor.get_c_color()
self._create_color_table(c_color_info)
c_color_info.mOpacity = self.mOpacity
c_color_info.mRangeMin = self.mRangeMin
c_color_info.mRangeMax = self.mRangeMax
c_color_info.mGammaCorrection = self.mGammaCorrection
return c_color_info
def _create_color_table(self, c_color_info):
num_colors = len(self.mColorTableList)
c_color_info.mColorTableSize = num_colors
c_color_info.mColorTable = (bpConverterTypesC_Color * num_colors)()
for (i, color) in enumerate(self.mColorTableList):
c_color_info.mColorTable[i] = color.get_c_color()
def __str__(self):
return '{} (IsBase: {} BaseColor: {} ColorTableSize: {} Opacity: {} RangeMin: {} RangeMax: {} GammaCorrection: {})'.format(self.__class__.__name__, self.mIsBaseColorMode, self.mBaseColor, self.mColorTableSize, self.mOpacity, self.mRangeMin, self.mRangeMax, self.mGammaCorrection) |
class ReassembleBlocks(BaseModule):
def __init__(self, in_channels=768, out_channels=[96, 192, 384, 768], readout_type='ignore', patch_size=16, init_cfg=None):
super(ReassembleBlocks, self).__init__(init_cfg)
assert (readout_type in ['ignore', 'add', 'project'])
self.readout_type = readout_type
self.patch_size = patch_size
self.projects = nn.ModuleList([ConvModule(in_channels=in_channels, out_channels=out_channel, kernel_size=1, act_cfg=None) for out_channel in out_channels])
self.resize_layers = nn.ModuleList([nn.ConvTranspose2d(in_channels=out_channels[0], out_channels=out_channels[0], kernel_size=4, stride=4, padding=0), nn.ConvTranspose2d(in_channels=out_channels[1], out_channels=out_channels[1], kernel_size=2, stride=2, padding=0), nn.Identity(), nn.Conv2d(in_channels=out_channels[3], out_channels=out_channels[3], kernel_size=3, stride=2, padding=1)])
if (self.readout_type == 'project'):
self.readout_projects = nn.ModuleList()
for _ in range(len(self.projects)):
self.readout_projects.append(nn.Sequential(Linear((2 * in_channels), in_channels), build_activation_layer(dict(type='GELU'))))
def forward(self, inputs):
assert isinstance(inputs, list)
out = []
for (i, x) in enumerate(inputs):
assert (len(x) == 2)
(x, cls_token) = (x[0], x[1])
feature_shape = x.shape
if (self.readout_type == 'project'):
x = x.flatten(2).permute((0, 2, 1))
readout = cls_token.unsqueeze(1).expand_as(x)
x = self.readout_projects[i](torch.cat((x, readout), (- 1)))
x = x.permute(0, 2, 1).reshape(feature_shape)
elif (self.readout_type == 'add'):
x = (x.flatten(2) + cls_token.unsqueeze((- 1)))
x = x.reshape(feature_shape)
else:
pass
x = self.projects[i](x)
x = self.resize_layers[i](x)
out.append(x)
return out |
def copy_etomo_files(src, name, target):
if exists(join(src, (name + 'local.xf'))):
cp(join(src, (name + 'local.xf')), target)
cp(join(src, (name + '.xf')), target)
cp(join(src, 'eraser.com'), target)
cp(join(src, 'ctfcorrection.com'), target)
cp(join(src, 'tilt.com'), target)
cp(join(src, 'newst.com'), target)
cp(join(src, (name + '.xtilt')), target)
cp(join(src, (name + '.tlt')), target)
cp(join(src, (name + '.defocus')), target)
cp(join(src, 'rotation.xf'), target) |
def update_q(detectors_q, cost, r_detector_belief, r_accu_spam_beliefs, remain_reviews, lr1):
for (d, q) in detectors_q.items():
grad_sum = 0
for review in remain_reviews:
grad_sum += ((((- 1) * cost[review]) * r_detector_belief[review][d]) * expit((- r_accu_spam_beliefs[review])))
grad_norm = (grad_sum / len(remain_reviews))
detectors_q[d] = (q - (lr1 * grad_norm))
return detectors_q |
class TFXGLMForCausalLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class SyntheticImagesDdpmCIFAR10(torch.utils.data.Dataset):
def __init__(self, src, labels):
self.src = src
self.labels = np.load(labels)
(self.nddpm, self.nIddpm) = (6002688, 9400000)
def sample_image(self, df, idx):
df.seek((idx * 3072))
image = np.array(np.frombuffer(df.read(3072), dtype='uint8').reshape(32, 32, 3))
return (torch.from_numpy(image).permute(2, 0, 1).float() / 255.0)
def __len__(self):
return
def __getitem__(self, idx):
with open(self.src, 'rb') as df:
img = self.sample_image(df, idx)
df.close()
label = self.labels[idx]
return (img, label) |
class VideoModelGlobalCoordLatent(nn.Module):
def __init__(self, opt):
super(VideoModelGlobalCoordLatent, self).__init__()
self.nr_boxes = opt.num_boxes
self.nr_actions = opt.num_classes
self.nr_frames = opt.num_frames
self.img_feature_dim = opt.img_feature_dim
self.coord_feature_dim = opt.coord_feature_dim
self.i3D = Net(self.nr_actions, extract_features=True, loss_type='softmax')
self.dropout = nn.Dropout(0.3)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.conv = nn.Conv3d(2048, 512, kernel_size=(1, 1, 1), stride=1)
self.category_embed_layer = nn.Embedding(3, (opt.coord_feature_dim // 2), padding_idx=0, scale_grad_by_freq=True)
self.c_coord_category_fusion = nn.Sequential(nn.Linear((self.coord_feature_dim + (self.coord_feature_dim // 2)), self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU(inplace=True))
self.c_coord_to_feature = nn.Sequential(nn.Linear(4, (self.coord_feature_dim // 2), bias=False), nn.BatchNorm1d((self.coord_feature_dim // 2)), nn.ReLU(inplace=True), nn.Linear((self.coord_feature_dim // 2), self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU())
self.c_spatial_node_fusion = nn.Sequential(nn.Linear((self.coord_feature_dim * 2), self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU(inplace=True), nn.Linear(self.coord_feature_dim, self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU())
self.c_box_feature_fusion = nn.Sequential(nn.Linear(((self.nr_frames // 2) * self.coord_feature_dim), self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU(inplace=True), nn.Linear(self.coord_feature_dim, self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU())
self.classifier = nn.Sequential(nn.Linear((self.coord_feature_dim + (2 * self.img_feature_dim)), self.coord_feature_dim), nn.ReLU(inplace=True), nn.Linear(self.coord_feature_dim, 512), nn.ReLU(inplace=True), nn.Linear(512, self.nr_actions))
if opt.fine_tune:
self.fine_tune(opt.fine_tune)
if opt.restore_i3d:
self.restore_i3d(opt.restore_i3d)
if opt.restore_custom:
self.restore_custom(opt.restore_custom)
def train(self, mode=True):
super(VideoModelGlobalCoordLatent, self).train(mode)
for m in self.modules():
if (isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d)):
m.eval()
m.weight.requires_grad = False
m.bias.requires_grad = False
def restore_custom(self, restore_path):
print('restoring path {}'.format(restore_path))
weights = torch.load(restore_path)
ks = list(weights.keys())
print('\n\n BEFORE', weights[ks[0]][(0, 0, 0)])
new_weights = {}
for (k, v) in weights.items():
new_weights[k.replace('module.', '')] = v
self.load_state_dict(new_weights, strict=False)
print('\n\n AFTER', self.state_dict()[ks[0]][(0, 0, 0)])
print('Num of weights in restore dict {}'.format(len(new_weights.keys())))
frozen_weights = 0
for (name, param) in self.named_parameters():
if (not name.startswith('classifier')):
param.requires_grad = False
frozen_weights += 1
else:
print('Training : {}'.format(name))
print('Number of frozen weights {}'.format(frozen_weights))
assert (frozen_weights != 0), 'You are trying to fine tune, but no weights are frozen!!! Check the naming convention of the parameters'
def restore_i3d(self, restore_path, parameters_to_train=['classifier']):
weights = torch.load(restore_path)['state_dict']
new_weights = {}
for (k, v) in weights.items():
if ('i3D' in k):
new_weights[k.replace('module.', '')] = v
self.load_state_dict(new_weights, strict=False)
print('Num of weights in restore dict {}'.format(len(new_weights.keys())))
for m in self.i3D.modules():
if (isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d)):
m.eval()
m.weight.requires_grad = False
m.bias.requires_grad = False
frozen_weights = 0
for (name, param) in self.named_parameters():
if ('i3D' in name):
param.requires_grad = False
frozen_weights += 1
else:
print('Training : {}'.format(name))
print('Number of frozen weights {}'.format(frozen_weights))
assert (frozen_weights != 0), 'You are trying to fine tune, but no weights are frozen!!! Check the naming convention of the parameters'
def fine_tune(self, restore_path, parameters_to_train=['classifier']):
weights = torch.load(restore_path)['state_dict']
new_weights = {}
for (k, v) in weights.items():
if ((not ('classifier.4' in k)) and 'i3D.classifier'):
new_weights[k.replace('module.', '')] = v
self.load_state_dict(new_weights, strict=False)
print('Num of weights in restore dict {}'.format(len(new_weights.keys())))
frozen_weights = 0
for (name, param) in self.named_parameters():
if (not ('classifier.4' in name)):
param.requires_grad = False
frozen_weights += 1
else:
print('Training : {}'.format(name))
print('Number of frozen weights {}'.format(frozen_weights))
assert (frozen_weights != 0), 'You are trying to fine tune, but no weights are frozen!!! Check the naming convention of the parameters'
def forward(self, global_img_input, box_categories, box_input, video_label, is_inference=False):
(bs, _, _, _, _) = global_img_input.shape
(y_i3d, org_features) = self.i3D(global_img_input)
videos_features = self.conv(org_features)
b = bs
box_input = box_input.transpose(2, 1).contiguous()
box_input = box_input.view(((b * self.nr_boxes) * (self.nr_frames // 2)), 4)
box_categories = box_categories.long()
box_categories = box_categories.transpose(2, 1).contiguous()
box_categories = box_categories.view(((b * self.nr_boxes) * (self.nr_frames // 2)))
box_category_embeddings = self.category_embed_layer(box_categories)
bf = self.c_coord_to_feature(box_input)
bf = torch.cat([bf, box_category_embeddings], dim=1)
bf = self.c_coord_category_fusion(bf)
bf = bf.view(b, self.nr_boxes, (self.nr_frames // 2), self.coord_feature_dim)
spatial_message = bf.sum(dim=1, keepdim=True)
spatial_message = ((spatial_message - bf) / (self.nr_boxes - 1))
bf_message_gf = torch.cat([bf, spatial_message], dim=3)
bf_spatial = self.c_spatial_node_fusion(bf_message_gf.view(((b * self.nr_boxes) * (self.nr_frames // 2)), (- 1)))
bf_spatial = bf_spatial.view(b, self.nr_boxes, (self.nr_frames // 2), self.coord_feature_dim)
bf_temporal_input = bf_spatial.view(b, self.nr_boxes, ((self.nr_frames // 2) * self.coord_feature_dim))
box_features = self.c_box_feature_fusion(bf_temporal_input.view((b * self.nr_boxes), (- 1)))
coord_ft = torch.mean(box_features.view(b, self.nr_boxes, (- 1)), dim=1)
_gf = videos_features.mean((- 1)).mean((- 1)).view(b, (self.nr_frames // 2), (2 * self.img_feature_dim))
_gf = _gf.mean(1)
video_features = torch.cat([_gf.view(b, (- 1)), coord_ft], dim=(- 1))
cls_output = self.classifier(video_features)
return cls_output |
class Step3_DownloadText():
def __init__(self, GutenbergBookPersistenz: GutenbergBookPersistenz, savePath: str):
self.savePath = savePath
self.GutenbergBookPersistenz = GutenbergBookPersistenz
def run(self):
return DoneMarker(self.savePath).run(self.script)
def script(self):
self.GutenbergBookPersistenz.save() |
('/classify_url', methods=['GET'])
def classify_url():
imageurl = flask.request.args.get('imageurl', '')
try:
string_buffer = StringIO.StringIO(urllib.urlopen(imageurl).read())
image = caffe.io.load_image(string_buffer)
except Exception as err:
logging.info('URL Image open error: %s', err)
return flask.render_template('index.html', has_result=True, result=(False, 'Cannot open image from URL.'))
logging.info('Image: %s', imageurl)
result = app.clf.classify_image(image)
return flask.render_template('index.html', has_result=True, result=result, imagesrc=imageurl) |
def string_to_tree(string):
if ((string[0] in IDCS) and (len(string) != 1)):
bracket_stack = []
tree = []
def add_brackets(num):
if (num == 2):
bracket_stack.extend(['}', '{', '}'])
else:
bracket_stack.extend(['}', '{', '}', '{', '}'])
tree.append('{')
global_just_put = '{'
for c in string:
tree.append(c)
if (c in IDCS):
assert (global_just_put != '}')
add_brackets(IDCS[c])
global_just_put = '{'
else:
just_put = ''
while ((just_put != '{') and bracket_stack):
just_put = bracket_stack.pop((- 1))
tree.append(just_put)
global_just_put = just_put
res = ''.join(tree)
assert (res[(- 1)] == '}')
else:
assert ((len(string) == 1) or (string == 'null'))
res = string[0]
return (('{' + res) + '}') |
class FrameNetLoader(Loader):
def __init__(self):
super().__init__()
self.frame_set = set()
self.element_set = set()
def _load(self, path):
dataset = {}
sentence = []
frame = []
element = []
with open(path) as f:
for line in f:
if ((len(line) == 0) or (line[0] == '\n')):
if (len(words) > 0):
if (''.join(sentence) not in dataset):
data = {'sentence': sentence, 'frames': [], 'elements': []}
dataset[''.join(sentence)] = data
dataset[''.join(sentence)]['frames'].append(frame)
dataset[''.join(sentence)]['elements'].append(element)
sentence = []
frame = []
element = []
continue
words = line.split('\t')
sentence.append(words[1])
if (words[(- 3)] not in '_'):
frame.append(words[(- 3)])
self.frame_set.add(words[(- 3)])
else:
frame.append('<unk>')
element.append(words[(- 2)])
self.element_set.add(words[(- 2)])
if (len(sentence) > 0):
if (''.join(sentence) not in dataset):
data = {'sentence': sentence, 'frames': [], 'elements': []}
dataset[''.join(sentence)] = data
dataset[''.join(sentence)]['frames'].append(frame)
dataset[''.join(sentence)]['elements'].append(element)
return dataset
def load_all(self, path):
train_set = self._load(os.path.join(path, 'train.bios'))
dev_set = self._load(os.path.join(path, 'dev.bios'))
test_set = self._load(os.path.join(path, 'test.bios'))
return (train_set, dev_set, test_set)
def get_frame_labels(self):
labels = list(self.frame_set)
labels.sort()
return labels
def get_element_labels(self):
labels = list(self.element_set)
labels.sort()
return labels |
def get_Future3D_visual_path(cfg: DictConfig, id: str) -> dict:
path = os.path.join(cfg.data.future3d, id, 'image.jpg')
form = 'future3d-jpg'
return {'path': path, 'format': form} |
class TimeCondition(AbstractCondition):
def __init__(self, time):
super(TimeCondition, self).__init__()
self.time = time
def __call__(self, world, state, actor=None, prev_state=None):
return (state.t <= self.time)
def name(self):
return ('time_condition(%d)' % self.time) |
class TCNForecaster(BasePytorchForecaster):
def __init__(self, past_seq_len, future_seq_len, input_feature_num, output_feature_num, dummy_encoder=False, num_channels=([16] * 3), kernel_size=3, normalization=True, decomposition_kernel_size=0, repo_initialization=True, dropout=0.1, optimizer='Adam', loss='mse', lr=0.001, metrics=['mse'], seed=None, distributed=False, workers_per_node=1, distributed_backend='ray'):
if dummy_encoder:
invalidInputError((input_feature_num == output_feature_num), 'if dummy_encoder is set to True, then the model should have equal input_feature_num and output_feature_num.')
self.data_config = {'past_seq_len': past_seq_len, 'future_seq_len': future_seq_len, 'input_feature_num': input_feature_num, 'output_feature_num': output_feature_num}
self.model_config = {'num_channels': num_channels, 'kernel_size': kernel_size, 'repo_initialization': repo_initialization, 'dropout': dropout, 'seed': seed, 'normalization': normalization, 'decomposition_kernel_size': decomposition_kernel_size, 'dummy_encoder': dummy_encoder}
self.loss_config = {'loss': loss}
self.optim_config = {'lr': lr, 'optim': optimizer}
self.model_creator = model_creator
self.optimizer_creator = optimizer_creator
if isinstance(loss, str):
self.loss_creator = loss_creator
else:
def customized_loss_creator(config):
return config['loss']
self.loss_creator = customized_loss_creator
self.distributed = distributed
self.remote_distributed_backend = distributed_backend
self.local_distributed_backend = 'subprocess'
self.workers_per_node = workers_per_node
self.lr = lr
self.metrics = metrics
self.seed = seed
current_num_threads = torch.get_num_threads()
self.thread_num = current_num_threads
self.optimized_model_thread_num = current_num_threads
if (current_num_threads >= 24):
self.num_processes = max(1, (current_num_threads // 8))
else:
self.num_processes = 1
self.use_ipex = False
self.onnx_available = True
self.quantize_available = True
self.checkpoint_callback = True
self.use_hpo = True
self.optimized_model_output_tensor = True
super().__init__() |
def siamese_model():
input1 = (image_size_h_p, image_size_w_p, nchannels)
input2 = (image_size_h_c, image_size_w_c, nchannels)
left_input_P = Input(input1)
right_input_P = Input(input1)
left_input_C = Input(input2)
right_input_C = Input(input2)
convnet_plate = small_vgg(input1)
convnet_car = small_vgg(input2)
encoded_l_P = convnet_plate(left_input_P)
encoded_r_P = convnet_plate(right_input_P)
encoded_l_C = convnet_car(left_input_C)
encoded_r_C = convnet_car(right_input_C)
L1_distanceP = L1_layer([encoded_l_P, encoded_r_P])
L1_distanceC = L1_layer([encoded_l_C, encoded_r_C])
concatL1 = Concatenate()([L1_distanceP, L1_distanceC])
x = Dense(1024)(concatL1)
x = Dropout(0.2)(x)
x = Dense(512)(x)
x = Dropout(0.2)(x)
x = Dense(256)(x)
x = Dropout(0.2)(x)
x = Activation('relu')(x)
predictionF2 = Dense(2, activation='softmax', name='fusion2_output')(x)
optimizer = Adam(0.001, decay=0.00025)
model = Model(inputs=[left_input_P, right_input_P, left_input_C, right_input_C], outputs=predictionF2)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model |
def get_patch_embed(**kwargs) -> nn.Module:
if (kwargs['conv_type'] == 'identity'):
return nn.Identity()
return PatchEmbed(**kwargs) |
class TestTorchOP(unittest.TestCase):
def setUpClass(self):
pass
def tearDownClass(self):
pass
def test_1(self):
n = Net()
example_in = torch.rand(3, 256)
traced_model = torch.jit.trace(n, example_in)
torch.jit.save(traced_model, '{}.pt'.format(file_name))
ref_out = traced_model(example_in).detach().numpy()
graph = compile('{}.pt'.format(file_name))
graph.save(file_name)
newgraph = Graph()
newgraph.graph_init((file_name + '/conf.yaml'), (file_name + '/model.bin'))
out = newgraph.inference([example_in.numpy()])
np.testing.assert_almost_equal(ref_out, [*out.values()][0], decimal=5)
os.remove('{}.pt'.format(file_name))
def test_2(self):
n = Net2()
example_in = torch.rand(3, 256)
traced_model = torch.jit.trace(n, example_in)
torch.jit.save(traced_model, '{}.pt'.format(file_name))
ref_out = traced_model(example_in).detach().numpy()
graph = compile('{}.pt'.format(file_name))
graph.save(file_name)
newgraph = Graph()
newgraph.graph_init((file_name + '/conf.yaml'), (file_name + '/model.bin'))
out = newgraph.inference([example_in.numpy()])
np.testing.assert_almost_equal(ref_out, [*out.values()][0], decimal=5)
os.remove('{}.pt'.format(file_name))
shutil.rmtree(file_name) |
class SharedParamsModalityHallucinationModel(nn.Module):
def __init__(self, opt):
super(SharedParamsModalityHallucinationModel, self).__init__()
self.opt = opt
self.conv = VGGTruncatedConv(opt)
self.hallucination_classifier = VGGHallucinationClassifier(opt)
self.rgb_classifier = VGGHallucinationClassifier(opt)
self.depth_classifier = VGGHallucinationClassifier(opt)
self.sigmoid = nn.Sigmoid()
def forward(self, RGB_ims, xstar, train):
RGB_pool5 = self.conv(RGB_ims)
if train:
xstar_pool5 = self.conv(xstar)
(depth_fc1_act, depth_logits) = self.depth_classifier(xstar_pool5)
(halluc_fc1_act, halluc_logits) = self.hallucination_classifier(RGB_pool5)
(_, rgb_logits) = self.rgb_classifier(RGB_pool5)
hallucination_loss = (self.sigmoid(halluc_fc1_act) - self.sigmoid(depth_fc1_act))
hallucination_loss = torch.pow(hallucination_loss, 2)
hallucination_loss = hallucination_loss.sum()
cache = (halluc_fc1_act, halluc_logits, rgb_logits, depth_fc1_act, depth_logits, hallucination_loss)
else:
(_, halluc_logits) = self.hallucination_classifier(RGB_pool5)
(_, rgb_logits) = self.rgb_classifier(RGB_pool5)
cache = (halluc_logits, rgb_logits)
return cache |
def clean_up_tokenization(out_string):
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(' do not', " don't").replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string |
class FastExecutioner():
def __init__(self, progs, cells):
self.cells = cells
self.progs = progs
self.sortProgs()
def sortProgs(self):
for i in range(len(self.progs)):
self.progs[i] = self.progs[i].topologicalSort()
def execute(self):
maxLen = max([len(e) for e in self.progs])
for s in range(maxLen):
nodes = []
for i in range(len(self.progs)):
prog = self.progs[i]
if (len(prog) <= s):
continue
nodes += [prog[s]]
groupedNodes = {}
for node in nodes:
groupedNodes.setdefault(node.cellInd, []).append(node)
for (cellInd, nodes) in groupedNodes.items():
arity = nodes[0].arity
cell = self.cells[cellInd]
outData = [node.inpData[0] for node in nodes]
if (arity == 1):
arg = t.cat(outData, 0)
outData = cell(arg)
outData = t.split(outData, 1, 0)
elif (arity == 2):
arg1 = t.cat(outData, 0)
arg2 = t.cat([node.inpData[1] for node in nodes], 0)
outData = cell(arg1, arg2)
outData = t.split(outData, 1, 0)
for (node, outDat) in zip(nodes, outData):
if (node.prev is None):
node.outData = outDat
else:
node.prev.inpData += [outDat]
outData = [prog[(- 1)].outData for prog in self.progs]
return t.cat(outData, 0) |
def is_video(ext: str):
allowed_exts = ('.mp4', '.webm', '.ogg', '.avi', '.wmv', '.mkv', '.3gp')
return any((ext.endswith(x) for x in allowed_exts)) |
class VGGTransformerModelTest_big(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
args.transformer_enc_config = '((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3'
super().setUp()
extra_args_setter = [vggtransformer_2, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) |
class ShaConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, activation=(lambda : nn.ReLU(inplace=True)), activate=True, shared_conv=None):
super(ShaConvBlock, self).__init__()
self.activate = activate
if (shared_conv is None):
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
else:
self.conv = shared_conv
self.bn = nn.BatchNorm2d(num_features=out_channels)
if self.activate:
assert (activation is not None)
if isfunction(activation):
self.activ = activation()
elif isinstance(activation, str):
if (activation == 'relu'):
self.activ = nn.ReLU(inplace=True)
elif (activation == 'relu6'):
self.activ = nn.ReLU6(inplace=True)
else:
raise NotImplementedError()
else:
self.activ = activation
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x |
def test_elastic_net_coeffs():
(X, y) = make_classification(random_state=0)
alpha = 2
n_samples = 100
lambda_1 = (1 / (n_samples * alpha))
lambda_2 = (1 / (n_samples * alpha))
coeffs = list()
for penalty in ('elasticnet', 'l1', 'l2'):
if (penalty in ['l1', 'l2']):
lambda_2 = 0
lr = LogisticRegression(penalty=penalty, lambda_1=lambda_1, solver='qning-miso', random_state=0, lambda_2=lambda_2)
lr.fit(X, y)
coeffs.append(lr.coef_)
(elastic_net_coeffs, l1_coeffs, l2_coeffs) = coeffs
assert (not np.allclose(elastic_net_coeffs, l1_coeffs, rtol=0, atol=0.1))
assert (not np.allclose(elastic_net_coeffs, l2_coeffs, rtol=0, atol=0.1))
assert (not np.allclose(l2_coeffs, l1_coeffs, rtol=0, atol=0.1)) |
class TFDebertaForQuestionAnswering(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class ADALN1d(nn.Module):
def __init__(self, norm_nc, feature_nc):
super().__init__()
nhidden = 128
use_bias = True
self.mlp_shared = nn.Sequential(nn.Linear(feature_nc, nhidden, bias=use_bias), nn.ReLU())
self.mlp_gamma = nn.Linear(nhidden, norm_nc, bias=use_bias)
self.mlp_beta = nn.Linear(nhidden, norm_nc, bias=use_bias)
def forward(self, x, feature):
normalized_shape = x.size()[1:]
feature = feature.view(feature.size(0), (- 1))
actv = self.mlp_shared(feature)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
gamma = gamma.view(*gamma.size()[:2], 1)
beta = beta.view(*beta.size()[:2], 1)
out = ((F.layer_norm(x, normalized_shape) * (1 + gamma)) + beta)
return out |
class SharedEncoder(super_sac.nets.Encoder):
def __init__(self, dim):
super().__init__()
self.fc0 = nn.Linear(dim, 128)
self.fc1 = nn.Linear(128, dim)
self._dim = dim
def embedding_dim(self):
return self._dim
def forward(self, obs_dict):
x = F.relu(self.fc0(obs_dict['obs']))
x = F.relu(self.fc1(x))
return x |
def dummy_raw_polygon_masks(size):
(num_obj, heigt, width) = size
polygons = []
for _ in range(num_obj):
num_points = ((np.random.randint(5) * 2) + 6)
polygons.append([np.random.uniform(0, min(heigt, width), num_points)])
return polygons |
def test_seg_recognizer():
tmp_dir = tempfile.TemporaryDirectory()
dict_file = osp.join(tmp_dir.name, 'fake_chars.txt')
_create_dummy_dict_file(dict_file)
label_convertor = dict(type='SegConvertor', dict_file=dict_file, with_unknown=False)
preprocessor = None
backbone = dict(type='ResNet31OCR', layers=[1, 2, 5, 3], channels=[32, 64, 128, 256, 512, 512], out_indices=[0, 1, 2, 3], stage4_pool_cfg=dict(kernel_size=2, stride=2), last_stage_pool=True)
neck = dict(type='FPNOCR', in_channels=[128, 256, 512, 512], out_channels=256)
head = dict(type='SegHead', in_channels=256, upsample_param=dict(scale_factor=2.0, mode='nearest'))
loss = dict(type='SegLoss', seg_downsample_ratio=1.0)
with pytest.raises(AssertionError):
SegRecognizer(backbone=None)
with pytest.raises(AssertionError):
SegRecognizer(neck=None)
with pytest.raises(AssertionError):
SegRecognizer(head=None)
with pytest.raises(AssertionError):
SegRecognizer(loss=None)
with pytest.raises(AssertionError):
SegRecognizer(label_convertor=None)
recognizer = SegRecognizer(preprocessor=preprocessor, backbone=backbone, neck=neck, head=head, loss=loss, label_convertor=label_convertor)
recognizer.init_weights()
recognizer.train()
imgs = torch.rand(1, 3, 64, 256)
feats = recognizer.extract_feat(imgs)
assert (len(feats) == 4)
assert (feats[0].shape == torch.Size([1, 128, 32, 128]))
assert (feats[1].shape == torch.Size([1, 256, 16, 64]))
assert (feats[2].shape == torch.Size([1, 512, 8, 32]))
assert (feats[3].shape == torch.Size([1, 512, 4, 16]))
attn_tgt = np.zeros((64, 256), dtype=np.float32)
segm_tgt = np.zeros((64, 256), dtype=np.float32)
mask = np.zeros((64, 256), dtype=np.float32)
gt_kernels = BitmapMasks([attn_tgt, segm_tgt, mask], 64, 256)
img_metas = [{'text': 'hello', 'resize_shape': (64, 256, 3), 'valid_ratio': 1.0}]
losses = recognizer.forward_train(imgs, img_metas, gt_kernels=[gt_kernels])
assert isinstance(losses, dict)
results = recognizer.simple_test(imgs, img_metas)
assert isinstance(results, list)
assert isinstance(results[0], dict)
assert ('text' in results[0])
assert ('score' in results[0])
aug_results = recognizer.aug_test([imgs, imgs], [img_metas, img_metas])
assert isinstance(aug_results, list)
assert isinstance(aug_results[0], dict)
assert ('text' in aug_results[0])
assert ('score' in aug_results[0])
tmp_dir.cleanup() |
def check_kappa(kappa):
print(('Checking dim = 2, kappa = %f' % kappa))
vmf_diff = VmfDiff(100, 100)
dim = 2
print(('KL Guu %f' % KL_guu(kappa, dim)))
print(('KL Davidson %f' % KL_davidson(kappa, dim)))
samples = []
for i in range(0, 10000):
result = vmf_diff.sample_cell(torch.tensor([[0.0, 1.0]]), norm=0.0, kappa=torch.tensor([kappa]))
x = result.data[0][0][0]
y = result.data[0][0][1]
if ((x > 0) and (y > 0)):
angle_in_rads = np.arctan((y / x))
elif ((x < 0) and (y > 0)):
angle_in_rads = (np.pi - np.arctan(((- y) / x)))
elif ((x < 0) and (y < 0)):
angle_in_rads = (np.pi + np.arctan((y / x)))
elif ((x > 0) and (y < 0)):
angle_in_rads = ((2 * np.pi) - np.arctan(((- y) / x)))
samples.append(angle_in_rads.item())
kl_histogram_vs_uniform(samples) |
class ObjectListDataset(Dataset):
def __init__(self, obj_list_path, exp_dir):
super(ObjectListDataset, self).__init__()
with open(obj_list_path, 'r') as f:
obj_list = json.load(f)
summary_dir = os.path.join(exp_dir, 'summary')
exist_ids = [os.path.splitext(f)[0] for f in os.listdir(summary_dir) if ('json' in f)]
self.obj_id_list = [o['id'] for o in obj_list if (o['id'] not in exist_ids)]
def __len__(self):
return len(self.obj_id_list)
def __getitem__(self, index):
return self.obj_id_list[index] |
def mol2graph(smiles_batch: List[str], args: Namespace) -> BatchMolGraph:
mol_graphs = []
for smiles in smiles_batch:
if (smiles in SMILES_TO_GRAPH):
mol_graph = SMILES_TO_GRAPH[smiles]
else:
mol_graph = MolGraph(smiles, args)
if (not args.no_cache):
SMILES_TO_GRAPH[smiles] = mol_graph
mol_graphs.append(mol_graph)
return BatchMolGraph(mol_graphs, args) |
class MBartTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ['input_ids', 'attention_mask']
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[Dict[(str, Any)]]=None, additional_special_tokens=None, **kwargs):
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, tokenizer_file=None, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
self.fairseq_offset = 1
self.sp_model_size = len(self.sp_model)
self.lang_code_to_id = {code: ((self.sp_model_size + i) + self.fairseq_offset) for (i, code) in enumerate(FAIRSEQ_LANGUAGE_CODES)}
self.id_to_lang_code = {v: k for (k, v) in self.lang_code_to_id.items()}
self.fairseq_tokens_to_ids['<mask>'] = ((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset)
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()}
self._additional_special_tokens = list(self.lang_code_to_id.keys())
if (additional_special_tokens is not None):
self._additional_special_tokens.extend([t for t in additional_special_tokens if (t not in self._additional_special_tokens)])
self._src_lang = (src_lang if (src_lang is not None) else 'en_XX')
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
state['sp_model_proto'] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def vocab_size(self):
return (((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset) + 1)
def src_lang(self) -> str:
return self._src_lang
_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
prefix_ones = ([1] * len(self.prefix_tokens))
suffix_ones = ([1] * len(self.suffix_tokens))
if (token_ids_1 is None):
return ((prefix_ones + ([0] * len(token_ids_0))) + suffix_ones)
return (((prefix_ones + ([0] * len(token_ids_0))) + ([0] * len(token_ids_1))) + suffix_ones)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return ((self.prefix_tokens + token_ids_0) + self.suffix_tokens)
return (((self.prefix_tokens + token_ids_0) + token_ids_1) + self.suffix_tokens)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):
if ((src_lang is None) or (tgt_lang is None)):
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs['forced_bos_token_id'] = tgt_lang_id
return inputs
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
if (token in self.fairseq_tokens_to_ids):
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return ((spm_id + self.fairseq_offset) if spm_id else self.unk_token_id)
def _convert_id_to_token(self, index):
if (index in self.fairseq_ids_to_tokens):
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece((index - self.fairseq_offset))
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def prepare_seq2seq_batch(self, src_texts: List[str], src_lang: str='en_XX', tgt_texts: Optional[List[str]]=None, tgt_lang: str='ro_RO', **kwargs) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def as_target_tokenizer(self):
self.set_tgt_lang_special_tokens(self.tgt_lang)
(yield)
self.set_src_lang_special_tokens(self.src_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
self.cur_lang_code = self.lang_code_to_id[src_lang]
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
def set_tgt_lang_special_tokens(self, lang: str) -> None:
self.cur_lang_code = self.lang_code_to_id[lang]
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] |
class ChatMessage(TypedDict):
role: str
content: str
name: Optional[str]
function_call: Optional[Dict] |
def train_model(cfg, model, dataloaders, loss_fns, optimizer, start_epoch=0, num_epochs=250, save_epochs=25, scheduler=None, mlog=None, flog=None):
checkpoint_dir = os.path.join(cfg['saving']['log_dir'], cfg['saving']['save_dir'])
run_kwargs = {'cfg': cfg, 'mlog': mlog, 'flog': flog, 'optimizer': optimizer, 'loss_fns': loss_fns, 'model': model, 'use_thread': cfg['saving']['in_background']}
context = []
log_interval = cfg['saving']['log_interval']
log_interval = (int(log_interval) if (log_interval > 1) else log_interval)
end_epoch = (start_epoch + num_epochs)
print(f'training for {num_epochs} epochs')
for epoch in range(start_epoch, end_epoch):
torch.cuda.empty_cache()
if ((epoch == 0) or ((epoch % save_epochs) == (save_epochs - 1))):
context += save_checkpoint(model, optimizer, epoch, dataloaders, checkpoint_dir, use_thread=cfg['saving']['in_background'])
should_run_validation = ((epoch == 0) or (log_interval <= 1) or ((epoch % log_interval) == (log_interval - 1)))
if should_run_validation:
assert math.isnan(mlog.peek_meter()['losses/total_0']), 'Loggers are not empty at the beginning of evaluation. Were training logs cleared?'
(context1, loss_dict) = run_one_epoch(dataloader=dataloaders['val'], epoch=epoch, train=False, **run_kwargs)
context += context1
if (scheduler is not None):
try:
scheduler.step(loss_dict['total'])
except:
scheduler.step()
(context1, _) = run_one_epoch(dataloader=dataloaders['train'], epoch=(epoch + 1), train=True, **run_kwargs)
context += context1
post_training_epoch(dataloader=dataloaders['train'], epoch=epoch, **run_kwargs)
(context1, _) = run_one_epoch(dataloader=dataloaders['val'], epoch=end_epoch, train=False, **run_kwargs)
context += context1
context += save_checkpoint(model, optimizer, end_epoch, dataloaders, checkpoint_dir, use_thread=cfg['saving']['in_background'])
return context |
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, frames):
if (random.random() < self.p):
out_frames = []
for frame in frames:
out_frames.append(F.hflip(frame))
return out_frames
else:
return frames
def __repr__(self):
return (self.__class__.__name__ + '(p={})'.format(self.p)) |
class NonLocalBlock2D(NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NonLocalBlock2D, self).__init__(in_channels, inter_channels=inter_channels, dimension=2, sub_sample=sub_sample, bn_layer=bn_layer) |
def White_Space_Remove_From_Word(word, filler_string=''):
white_space_removed_word = ''
for w in word.split():
if (w.strip() == ''):
continue
white_space_removed_word += (filler_string + w)
return white_space_removed_word |
def create_temporary_vocab_file(words, counts=None):
vocab_file = tempfile.NamedTemporaryFile()
if (counts is None):
for token in words:
vocab_file.write((token + '\n').encode('utf-8'))
else:
for (token, count) in zip(words, counts):
vocab_file.write('{}\t{}\n'.format(token, count).encode('utf-8'))
vocab_file.flush()
return vocab_file |
def xception_featurize(file):
model = Xception(include_top=True, weights='imagenet')
img_path = file
img = load_img(img_path, target_size=(299, 299))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
features = np.ndarray.flatten(features)
labels = list()
for i in range(len(features)):
labels.append(('xception_feature_%s' % str((i + 1))))
return (features, labels) |
def loss_G_fn(P, D, options, images, gen_images):
d_gen = D(P.augment_fn(gen_images))
if (options['loss'] == 'nonsat'):
g_loss = F.softplus((- d_gen)).mean()
else:
g_loss = (- d_gen.mean())
return g_loss |
def make_disc_backbones(configs, cfg):
discs = []
for (i, c) in enumerate(configs):
(dim_in, dim_base, max_dim, num_layers, num_strides) = c
discs.append(ProjectionDiscriminator(dim_in=dim_in, dim_base=dim_base, max_dim=max_dim, num_layers=num_layers, num_strides=num_strides, dilate=False, no_out=False, cfg=cfg, hw=(169 if (i < 7) else 144)))
return discs |
class MultiCorpusDataset(FairseqDataset):
def __init__(self, datasets: Dict[(str, FairseqDataset)], distribution: List[float], seed: int, sort_indices: bool=False, batch_sample: bool=False, distributed_rank: Optional[int]=None):
super().__init__()
assert isinstance(datasets, OrderedDict)
assert (len(datasets) == len(distribution))
assert (sum(distribution) == 1)
self.datasets = datasets
self.distribution = distribution
self.seed = seed
self.sort_indices = sort_indices
self.batch_sample = batch_sample
self.distributed_rank = distributed_rank
self.dataset_list = list(datasets.values())
self.total_num_instances = 0
first_dataset = self.dataset_list[0]
self.num_instances_per_dataset = []
self.dataset_offsets = []
for (i, dataset) in enumerate(self.dataset_list):
assert isinstance(dataset, FairseqDataset)
assert (type(dataset) is type(first_dataset))
self.num_instances_per_dataset.append((0 if (self.distribution[i] == 0) else len(dataset)))
self.dataset_offsets.append(self.total_num_instances)
self.total_num_instances += self.num_instances_per_dataset[i]
def ordered_indices(self):
start = time.time()
with data_utils.numpy_seed(self.seed, self.epoch):
logger.info(f'sampling new dataset with seed {self.seed} epoch {self.epoch}')
sampled_indices = []
num_selected_instances = 0
for (i, key) in enumerate(self.datasets):
if (self.distribution[i] == 0):
continue
if (i < (len(self.datasets) - 1)):
num_instances = int((self.distribution[i] * self.total_num_instances))
high = self.dataset_offsets[(i + 1)]
else:
num_instances = (self.total_num_instances - num_selected_instances)
high = self.total_num_instances
logger.info(f'sampling {num_instances} from {key} dataset')
num_selected_instances += num_instances
dataset_size = len(self.datasets[key])
num_copies = (num_instances // dataset_size)
dataset_indices = (np.random.permutation((high - self.dataset_offsets[i])) + self.dataset_offsets[i])[:(num_instances - (num_copies * dataset_size))]
if (num_copies > 0):
sampled_indices += list(np.concatenate((np.repeat(np.arange(self.dataset_offsets[i], high), num_copies), dataset_indices)))
else:
sampled_indices += list(dataset_indices)
assert (len(sampled_indices) == self.total_num_instances), f'{len(sampled_indices)} vs {self.total_num_instances}'
np.random.shuffle(sampled_indices)
if self.sort_indices:
sampled_indices.sort(key=(lambda i: self.num_tokens(i)))
logger.info('multi_corpus_dataset ordered_indices took {}s'.format((time.time() - start)))
return np.array(sampled_indices, dtype=np.int64)
def _map_index(self, index: int):
counter = 0
for (num_instances, key) in zip(self.num_instances_per_dataset, self.datasets):
if (index < (counter + num_instances)):
return ((index - counter), key)
counter += num_instances
raise ValueError('Invalid index: {}, max: {}'.format(index, self.total_num_instances))
def __len__(self):
return self.total_num_instances
def __getitem__(self, index):
(new_index, key) = self._map_index(index)
try:
item = self.datasets[key][new_index]
item['full_id'] = index
return item
except Exception as e:
e.args = (f'Error from {key} dataset', *e.args)
raise
def collater(self, samples):
if (len(samples) == 0):
return None
if ('full_id' in samples[0]):
(_, key) = self._map_index(samples[0]['full_id'])
try:
batch = self.datasets[key].collater(samples)
except Exception:
print(f'Collating failed for key {key}', flush=True)
raise
return batch
else:
return list(self.datasets.values())[0].collater(samples)
def num_tokens(self, index: int):
(index, key) = self._map_index(index)
return self.datasets[key].num_tokens(index)
def size(self, index: int):
(index, key) = self._map_index(index)
return self.datasets[key].size(index)
def can_reuse_epoch_itr_across_epochs(self):
return False
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
logger.info(f'setting epoch of multi_corpus_dataset to {epoch}')
self.epoch = epoch
def supports_prefetch(self):
return False
def supports_fetch_outside_dataloader(self):
return all((self.datasets[key].supports_fetch_outside_dataloader for key in self.datasets))
def batch_by_size(self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1):
if (not self.batch_sample):
return super().batch_by_size(indices, max_tokens, max_sentences, required_batch_size_multiple)
dataset_indices = {key: [] for key in self.datasets}
for i in indices:
(_, key) = self._map_index(i)
dataset_indices[key].append(i)
batches = []
for key in dataset_indices:
cur_batches = super().batch_by_size(np.array(dataset_indices[key], dtype=np.int64), max_tokens, max_sentences, required_batch_size_multiple)
logger.info(f'Created {len(cur_batches)} batches for dataset {key}')
batches += cur_batches
if (self.distributed_rank is not None):
with data_utils.numpy_seed(self.seed, self.epoch, self.distributed_rank):
np.random.shuffle(batches)
return batches |
class FlaxXLMRobertaForMultipleChoice(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def OutputCtm(utterance_id, edits_array, ctm_array):
global ctm_edits_out
assert (len(edits_array) == len(ctm_array))
channel = '1'
for i in range(len(edits_array)):
(hyp_word, ref_word) = edits_array[i]
(start_time, duration, hyp_word2, confidence) = ctm_array[i]
if (not (hyp_word == hyp_word2)):
print('Error producing output CTM for edit = {0} and ctm = {1}'.format(edits_array[i], ctm_array[i]), file=sys.stderr)
sys.exit(1)
assert (hyp_word == hyp_word2)
edit_type = GetEditType(hyp_word, ref_word, duration)
print(utterance_id, channel, FloatToString(start_time), FloatToString(duration), hyp_word, confidence, ref_word, edit_type, file=ctm_edits_out) |
class Subtokenizer(object):
def __init__(self, vocab_file, reserved_tokens=None):
tf.compat.v1.logging.info(('Initializing Subtokenizer from file %s.' % vocab_file))
if (reserved_tokens is None):
reserved_tokens = RESERVED_TOKENS
self.subtoken_list = _load_vocab_file(vocab_file, reserved_tokens)
self.alphabet = _generate_alphabet_dict(self.subtoken_list)
self.subtoken_to_id_dict = _list_to_index_dict(self.subtoken_list)
self.max_subtoken_length = 0
for subtoken in self.subtoken_list:
self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken))
self._cache_size = (2 ** 20)
self._cache = ([(None, None)] * self._cache_size)
def init_from_files(vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1000000.0, reserved_tokens=None):
if (reserved_tokens is None):
reserved_tokens = RESERVED_TOKENS
if tf.io.gfile.exists(vocab_file):
tf.compat.v1.logging.info(('Vocab file already exists (%s)' % vocab_file))
else:
tf.compat.v1.logging.info('Begin steps to create subtoken vocabulary...')
token_counts = _count_tokens(files, file_byte_limit)
alphabet = _generate_alphabet_dict(token_counts)
subtoken_list = _generate_subtokens_with_target_vocab_size(token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens)
tf.compat.v1.logging.info(('Generated vocabulary with %d subtokens.' % len(subtoken_list)))
mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(subtoken_list))
_save_vocab_file(vocab_file, subtoken_list)
return Subtokenizer(vocab_file)
def encode(self, raw_string, add_eos=False):
ret = []
tokens = _split_string_to_tokens(_native_to_unicode(raw_string))
for token in tokens:
ret.extend(self._token_to_subtoken_ids(token))
if add_eos:
ret.append(EOS_ID)
return ret
def _token_to_subtoken_ids(self, token):
cache_location = (hash(token) % self._cache_size)
(cache_key, cache_value) = self._cache[cache_location]
if (cache_key == token):
return cache_value
ret = _split_token_to_subtokens(_escape_token(token, self.alphabet), self.subtoken_to_id_dict, self.max_subtoken_length)
ret = [self.subtoken_to_id_dict[subtoken_id] for subtoken_id in ret]
self._cache[cache_location] = (token, ret)
return ret
def decode(self, subtokens):
if isinstance(subtokens, np.ndarray):
subtokens = subtokens.tolist()
if (not subtokens):
return ''
assert (isinstance(subtokens, list) and isinstance(subtokens[0], int)), 'Subtokens argument passed into decode() must be a list of integers.'
return _unicode_to_native(_join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens)))
def _subtoken_ids_to_tokens(self, subtokens):
escaped_tokens = ''.join([self.subtoken_list[s] for s in subtokens if (s < len(self.subtoken_list))])
escaped_tokens = escaped_tokens.split('_')
ret = []
for token in escaped_tokens:
if token:
ret.append(_unescape_token(token))
return ret |
class CategoricalMLPModel(MLPModel):
def __init__(self, output_dim, name=None, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), layer_normalization=False):
super().__init__(output_dim, name, hidden_sizes, hidden_nonlinearity, hidden_w_init, hidden_b_init, tf.nn.softmax, output_w_init, output_b_init, layer_normalization)
self._output_normalization_fn = output_nonlinearity
def network_output_spec(self):
return ['dist']
def _build(self, state_input, name=None):
prob = super()._build(state_input, name=name)
if self._output_normalization_fn:
prob = self._output_normalization_fn(prob)
return tfp.distributions.OneHotCategorical(probs=prob) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.