code stringlengths 101 5.91M |
|---|
def parse_args():
parser = argparse.ArgumentParser(description='Video Classification')
parser.add_argument('--mode', type=str, default='test', help='train/test')
parser.add_argument('--model', type=str, default='r3d', help='c3d/r3d/r21d')
parser.add_argument('--dataset', type=str, default='ucf101', help='ucf101/hmdb51')
parser.add_argument('--split', type=str, default='1', help='dataset split')
parser.add_argument('--cl', type=int, default=16, help='clip length')
parser.add_argument('--gpu', type=int, default=0, help='GPU id')
parser.add_argument('--lr', type=float, default=1e-05, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--wd', type=float, default=0.0005, help='weight decay')
parser.add_argument('--log', type=str, help='log directory')
parser.add_argument('--ckpt', type=str, default='log/UCF101_TCGL_split1_112_acc_finetuned_r3d_cl16_/best_model_144.pt', help='checkpoint path')
parser.add_argument('--desp', type=str, help='additional description')
parser.add_argument('--epochs', type=int, default=20, help='number of total epochs to run')
parser.add_argument('--start-epoch', type=int, default=1, help='manual epoch number (useful on restarts)')
parser.add_argument('--bs', type=int, default=16, help='mini-batch size')
parser.add_argument('--workers', type=int, default=4, help='number of data loading workers')
parser.add_argument('--pf', type=int, default=100, help='print frequency every batch')
parser.add_argument('--seed', type=int, default=632, help='seed for initializing training.')
args = parser.parse_args()
return args |
_model
def ig_resnext101_32x48d(pretrained=True, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=48, **kwargs)
return _create_resnet('ig_resnext101_32x48d', pretrained, **model_args) |
def generate_data_ratio(rows):
x_array = []
y_array = []
while (len(x_array) < rows):
x = [np.random.uniform(0, 1), np.random.uniform(0.01, 1)]
try:
y = (x[0] / x[1])
x_array.append(x)
y_array.append(y)
except (ValueError, ZeroDivisionError):
pass
return (np.array(x_array, dtype=np.float32), np.array(y_array, dtype=np.float32)) |
def create_reverse_dependency_tree():
modules = [str(f.relative_to(PATH_TO_TRANFORMERS)) for f in (Path(PATH_TO_TRANFORMERS) / 'src/transformers').glob('**/*.py')]
module_edges = [(d, m) for m in modules for d in get_module_dependencies(m)]
tests = [str(f.relative_to(PATH_TO_TRANFORMERS)) for f in (Path(PATH_TO_TRANFORMERS) / 'tests').glob('**/*.py')]
test_edges = [(d, t) for t in tests for d in get_test_dependencies(t)]
return (module_edges + test_edges) |
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0)
if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))):
warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.', stacklevel=2)
with torch.no_grad():
l = norm_cdf(((a - mean) / std))
u = norm_cdf(((b - mean) / std))
tensor.uniform_(((2 * l) - 1), ((2 * u) - 1))
tensor.erfinv_()
tensor.mul_((std * math.sqrt(2.0)))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor |
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
_function
def forward(self, input, rois):
return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += (', sampling_ratio=' + str(self.sampling_ratio))
tmpstr += ')'
return tmpstr |
class TripletNet(nn.Module):
def __init__(self, embedding_net):
super(TripletNet, self).__init__()
self.embedding_net = embedding_net
def forward(self, x1, x2, x3):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
output3 = self.embedding_net(x3)
return (output1, output2, output3)
def get_embedding(self, x):
return self.embedding_net(x) |
def get_parser():
parser = argparse.ArgumentParser(description='Cumulative Reasoning')
parser.add_argument('--temperature', type=float, default=0.1, help='temperature')
parser.add_argument('--propnum', type=int, choices=range(0, 21), default=2, help='numbers of props')
parser.add_argument('--reasoningnum', type=int, choices=range(0, 21), default=16, help='numbers of reasoning, when > 1, majority voting is used')
parser.add_argument('--choices', type=int, choices=range(0, 21), default=5, help='numbers of premises to be chosen')
parser.add_argument('--trycnt', type=int, choices=range(1, 1001), default=16, help='numbers of try times')
parser.add_argument('--exploration_prob', type=float, default=1.0, help='exploration probability')
parser.add_argument('--verified_reasoning', type=ast.literal_eval, default=False, help='self verified reasoning, may not work well for small models')
parser.add_argument('--model', type=str, default='gpt-4', help='model to use')
parser.add_argument('--dataset', type=str, default='data/folio/folio-wiki.jsonl', help='dataset to use')
parser.add_argument('--verbose', type=ast.literal_eval, default=True, help='verbose mode')
return parser |
def read_labeled_image_list(image_list_file, isSkip=True):
f = open(image_list_file, 'r')
content = f.readlines()
glen1 = len(content)
pair1 = []
pair2 = []
labels = []
st = 2
it = 25
if (not isSkip):
st = 0
it = 1
for i in range(st, glen1, it):
line = content[i]
(fn1, fn2, label) = line[:(- 1)].split(' ')
fn1 = (FLAGS.training_img_dir + fn1)
fn2 = (FLAGS.training_img_dir + fn2)
pair1.append(fn1)
pair2.append(fn2)
labels.append(int(label))
return (pair1, pair2, np.asarray(labels)) |
def modularize(f):
class Transform(nn.Module):
def __init__(self, f):
super(Transform, self).__init__()
self.f = f
def forward(self, x):
return self.f(x)
return Transform(f) |
def bbox_ious(boxes1, boxes2, x1y1x2y2=True):
if x1y1x2y2:
mx = torch.min(boxes1[0], boxes2[0])
Mx = torch.max(boxes1[2], boxes2[2])
my = torch.min(boxes1[1], boxes2[1])
My = torch.max(boxes1[3], boxes2[3])
w1 = (boxes1[2] - boxes1[0])
h1 = (boxes1[3] - boxes1[1])
w2 = (boxes2[2] - boxes2[0])
h2 = (boxes2[3] - boxes2[1])
else:
mx = torch.min((boxes1[0] - (boxes1[2] / 2.0)), (boxes2[0] - (boxes2[2] / 2.0)))
Mx = torch.max((boxes1[0] + (boxes1[2] / 2.0)), (boxes2[0] + (boxes2[2] / 2.0)))
my = torch.min((boxes1[1] - (boxes1[3] / 2.0)), (boxes2[1] - (boxes2[3] / 2.0)))
My = torch.max((boxes1[1] + (boxes1[3] / 2.0)), (boxes2[1] + (boxes2[3] / 2.0)))
w1 = boxes1[2]
h1 = boxes1[3]
w2 = boxes2[2]
h2 = boxes2[3]
uw = (Mx - mx)
uh = (My - my)
cw = ((w1 + w2) - uw)
ch = ((h1 + h2) - uh)
mask = (((cw <= 0) + (ch <= 0)) > 0)
area1 = (w1 * h1)
area2 = (w2 * h2)
carea = (cw * ch)
carea[mask] = 0
uarea = ((area1 + area2) - carea)
return (carea / uarea) |
def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array:
ar = f'{sampling_rate}'
ac = '1'
format_for_conversion = 'f32le'
ffmpeg_command = ['ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1']
try:
with subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as ffmpeg_process:
output_stream = ffmpeg_process.communicate(bpayload)
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename') from error
out_bytes = output_stream[0]
audio = np.frombuffer(out_bytes, np.float32)
if (audio.shape[0] == 0):
raise ValueError('Malformed soundfile')
return audio |
def test_fpn():
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [(s // (2 ** i)) for i in range(4)]
out_channels = 8
with pytest.raises(AssertionError):
FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, num_outs=2)
with pytest.raises(AssertionError):
FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, end_level=4, num_outs=2)
with pytest.raises(AssertionError):
FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, end_level=3, num_outs=1)
with pytest.raises(AssertionError):
FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs='on_xxx', num_outs=5)
fpn_model = FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, num_outs=5)
feats = [torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) for i in range(len(in_channels))]
outs = fpn_model(feats)
assert (fpn_model.add_extra_convs == 'on_input')
assert (len(outs) == fpn_model.num_outs)
for i in range(fpn_model.num_outs):
(outs[i].shape[1] == out_channels)
(outs[i].shape[2] == outs[i].shape[3] == (s // (2 ** i)))
fpn_model = FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=False, num_outs=5)
outs = fpn_model(feats)
assert (len(outs) == fpn_model.num_outs)
assert (not fpn_model.add_extra_convs)
for i in range(fpn_model.num_outs):
(outs[i].shape[1] == out_channels)
(outs[i].shape[2] == outs[i].shape[3] == (s // (2 ** i)))
fpn_model = FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, no_norm_on_lateral=False, norm_cfg=dict(type='BN', requires_grad=True), num_outs=5)
outs = fpn_model(feats)
assert (len(outs) == fpn_model.num_outs)
assert (fpn_model.add_extra_convs == 'on_input')
for i in range(fpn_model.num_outs):
(outs[i].shape[1] == out_channels)
(outs[i].shape[2] == outs[i].shape[3] == (s // (2 ** i)))
bn_exist = False
for m in fpn_model.modules():
if isinstance(m, _BatchNorm):
bn_exist = True
assert bn_exist
fpn_model = FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, upsample_cfg=dict(mode='bilinear', align_corners=True), num_outs=5)
fpn_model(feats)
outs = fpn_model(feats)
assert (len(outs) == fpn_model.num_outs)
assert (fpn_model.add_extra_convs == 'on_input')
for i in range(fpn_model.num_outs):
(outs[i].shape[1] == out_channels)
(outs[i].shape[2] == outs[i].shape[3] == (s // (2 ** i)))
fpn_model = FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, upsample_cfg=dict(scale_factor=2), num_outs=5)
outs = fpn_model(feats)
assert (len(outs) == fpn_model.num_outs)
for i in range(fpn_model.num_outs):
(outs[i].shape[1] == out_channels)
(outs[i].shape[2] == outs[i].shape[3] == (s // (2 ** i)))
fpn_model = FPN(in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_input', start_level=1, num_outs=5)
assert (fpn_model.add_extra_convs == 'on_input')
outs = fpn_model(feats)
assert (len(outs) == fpn_model.num_outs)
for i in range(fpn_model.num_outs):
(outs[i].shape[1] == out_channels)
(outs[i].shape[2] == outs[i].shape[3] == (s // (2 ** i)))
fpn_model = FPN(in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_lateral', start_level=1, num_outs=5)
assert (fpn_model.add_extra_convs == 'on_lateral')
outs = fpn_model(feats)
assert (len(outs) == fpn_model.num_outs)
for i in range(fpn_model.num_outs):
(outs[i].shape[1] == out_channels)
(outs[i].shape[2] == outs[i].shape[3] == (s // (2 ** i)))
fpn_model = FPN(in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_output', start_level=1, num_outs=5)
assert (fpn_model.add_extra_convs == 'on_output')
outs = fpn_model(feats)
assert (len(outs) == fpn_model.num_outs)
for i in range(fpn_model.num_outs):
(outs[i].shape[1] == out_channels)
(outs[i].shape[2] == outs[i].shape[3] == (s // (2 ** i)))
fpn_model = FPN(in_channels=in_channels, out_channels=out_channels, add_extra_convs=True, extra_convs_on_inputs=False, start_level=1, num_outs=5)
assert (fpn_model.add_extra_convs == 'on_output')
outs = fpn_model(feats)
assert (len(outs) == fpn_model.num_outs)
for i in range(fpn_model.num_outs):
(outs[i].shape[1] == out_channels)
(outs[i].shape[2] == outs[i].shape[3] == (s // (2 ** i)))
fpn_model = FPN(in_channels=in_channels, out_channels=out_channels, add_extra_convs=True, extra_convs_on_inputs=True, start_level=1, num_outs=5)
assert (fpn_model.add_extra_convs == 'on_input')
outs = fpn_model(feats)
assert (len(outs) == fpn_model.num_outs)
for i in range(fpn_model.num_outs):
(outs[i].shape[1] == out_channels)
(outs[i].shape[2] == outs[i].shape[3] == (s // (2 ** i))) |
class InceptionV4(nn.Module):
def __init__(self, num_classes=1001):
super(InceptionV4, self).__init__()
self.input_space = None
self.input_size = (299, 299, 3)
self.mean = None
self.std = None
self.features = nn.Sequential(BasicConv2d(3, 32, kernel_size=3, stride=2), BasicConv2d(32, 32, kernel_size=3, stride=1), BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), Mixed_3a(), Mixed_4a(), Mixed_5a(), Inception_A(), Inception_A(), Inception_A(), Inception_A(), Reduction_A(), Inception_B(), Inception_B(), Inception_B(), Inception_B(), Inception_B(), Inception_B(), Inception_B(), Reduction_B(), Inception_C(), Inception_C(), Inception_C())
self.avg_pool = nn.AvgPool2d(8, count_include_pad=False)
self.last_linear = nn.Linear(1536, num_classes)
def logits(self, features):
x = self.avg_pool(features)
x = x.view(x.size(0), (- 1))
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x |
def session_indexed(s):
action_to_idx = {'start': 0, 'end': 1, 'add': 2, 'remove': 3, 'purchase': 4, 'detail': 5, 'view': 6}
return (([action_to_idx['start']] + [action_to_idx[e] for e in s]) + [action_to_idx['end']]) |
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
return False |
def test_register_new_sensors_and_measures():
if (not PointNavDatasetV1.check_config_paths_exist(config=habitat.get_config().DATASET)):
pytest.skip('Please download Habitat test data to data folder.')
register_new_sensors_and_measures.main() |
class VisualNavigationModel(nn.Module):
def init_weights(self, module):
if (type(module) in [nn.GRU, nn.LSTM, nn.RNN]):
for (name, param) in module.named_parameters():
if ('weight_ih' in name):
nn.init.xavier_uniform_(param.data)
elif ('weight_hh' in name):
nn.init.orthogonal_(param.data)
elif ('bias' in name):
param.data.fill_(0)
elif (type(module) in [nn.Conv2d, nn.ConvTranspose2d, nn.Linear]):
nn.init.zeros_(module.bias.data)
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(module.weight.data)
d = (1.0 / math.sqrt(fan_in))
nn.init.uniform_(module.weight.data, (- d), d)
def __init__(self, num_inputs, num_outputs):
super().__init__()
self.main_output_size = 512
self.shared_base = TimeDistributed(nn.Sequential(nn.Conv2d(num_inputs, 16, 8, stride=4), nn.ReLU(True)))
self.conv_base = TimeDistributed(nn.Sequential(nn.Conv2d(32, 32, 4, stride=2), nn.ReLU(True), nn.Conv2d(32, 32, 1), nn.ReLU()))
self.conv_merge = TimeDistributed(nn.Sequential(Flatten(), nn.Linear(((9 ** 2) * 32), self.main_output_size), nn.ReLU()))
self.critic = TimeDistributed(nn.Linear(self.main_output_size, 1))
self.policy_logits = TimeDistributed(nn.Linear(self.main_output_size, num_outputs))
self.lstm_layers = 1
self.lstm_hidden_size = self.main_output_size
self.rnn = MaskedRNN(nn.LSTM(((self.main_output_size + num_outputs) + 1), hidden_size=self.lstm_hidden_size, num_layers=self.lstm_layers, batch_first=True))
self._create_pixel_control_network(num_outputs)
self._create_rp_network()
self._create_deconv_networks()
self.apply(self.init_weights)
self.pc_cell_size = 2
self.deconv_cell_size = 2
def _create_deconv_networks(self):
self.deconv_depth = TimeDistributed(nn.Sequential(Unflatten(32, 9, 9), nn.ConvTranspose2d(32, 16, kernel_size=4, stride=2), nn.ReLU(), nn.ConvTranspose2d(16, 1, kernel_size=4, stride=2)))
self.deconv_rgb = TimeDistributed(nn.Sequential(Unflatten(32, 9, 9), nn.ConvTranspose2d(32, 16, kernel_size=4, stride=2), nn.ReLU()))
self.deconv_rgb_goal = TimeDistributed(nn.Sequential(Unflatten(32, 9, 9), nn.ConvTranspose2d(32, 16, kernel_size=4, stride=2), nn.ReLU()))
self.deconv_rgb_shared = TimeDistributed(nn.Sequential(nn.ConvTranspose2d(16, 3, kernel_size=4, stride=2)))
self.deconv_depth.apply(self.init_weights)
self.deconv_rgb_goal.apply(self.init_weights)
self.deconv_rgb.apply(self.init_weights)
self.deconv_rgb_shared.apply(self.init_weights)
def initial_states(self, batch_size):
return tuple([torch.zeros([batch_size, self.lstm_layers, self.lstm_hidden_size], dtype=torch.float32) for _ in range(2)])
def forward(self, inputs, masks, states):
(features, states) = self._forward_base(inputs, masks, states)
policy_logits = self.policy_logits(features)
critic = self.critic(features)
return [policy_logits, critic, states]
def _forward_base(self, inputs, masks, states):
(observations, last_reward_action) = inputs
image = observations[0]
goal = observations[1]
(image, goal) = (self.shared_base(image), self.shared_base(goal))
features = torch.cat((image, goal), 2)
features = self.conv_base(features)
features = self.conv_merge(features)
features = torch.cat((features, last_reward_action), dim=2)
return self.rnn(features, masks, states)
def _create_pixel_control_network(self, num_outputs):
self.pc_base = TimeDistributed(nn.Sequential(nn.Linear(self.lstm_hidden_size, ((32 * 9) * 9)), nn.ReLU()))
self.pc_action = TimeDistributed(nn.Sequential(nn.ConvTranspose2d(32, 32, kernel_size=4, stride=2), nn.ReLU(), nn.ConvTranspose2d(32, 1, kernel_size=4, stride=2), nn.ReLU()))
self.pc_value = TimeDistributed(nn.Sequential(nn.ConvTranspose2d(32, 32, kernel_size=4, stride=2), nn.ReLU(), nn.ConvTranspose2d(32, num_outputs, kernel_size=4, stride=2), nn.ReLU()))
def _create_rp_network(self):
self.rp = nn.Sequential(Flatten(), nn.Linear((((9 ** 2) * 32) * 3), 3))
def reward_prediction(self, inputs):
(observations, _) = inputs
image = observations[0]
goal = observations[1]
(image, goal) = (self.shared_base(image), self.shared_base(goal))
features = torch.cat((image, goal), 2)
features = self.conv_base(features)
features = self.rp(features)
return features
def pixel_control(self, inputs, masks, states):
(features, states) = self._forward_base(inputs, masks, states)
features = self.pc_base(features)
features = features.view(*(features.size()[:2] + (32, 9, 9)))
action_features = self.pc_action(features)
features = ((self.pc_value(features) + action_features) - action_features.mean(2, keepdim=True))
return (features, states)
def value_prediction(self, inputs, masks, states):
(features, states) = self._forward_base(inputs, masks, states)
critic = self.critic(features)
return (critic, states)
def forward_deconv(self, inputs, masks, states):
(observations, _) = inputs
image = observations[0]
goal = observations[1]
(image, goal) = (self.shared_base(image), self.shared_base(goal))
features = torch.cat((image, goal), 2)
features = self.conv_base(features)
depth = self.deconv_depth(features)
rgb = self.deconv_rgb_shared(self.deconv_rgb(features))
rgb_goal = self.deconv_rgb_shared(self.deconv_rgb_goal(features))
return ((rgb, rgb_goal, depth), states) |
class TFBertForMaskedLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def get_bs_per_stream(batch_size, stream_number):
result = []
batch_per_instance = (batch_size // stream_number)
if (batch_per_instance >= 1):
used_num_streams = stream_number
instance_need_extra_input = (batch_size % stream_number)
else:
batch_per_instance = 1
used_num_streams = batch_size
instance_need_extra_input = 0
start_idx = 0
end_idx = 0
for j in range(used_num_streams):
if (j < instance_need_extra_input):
end_idx = (end_idx + (batch_per_instance + 1))
else:
end_idx = (end_idx + batch_per_instance)
result.append((end_idx - start_idx))
start_idx = end_idx
return result |
def init_weights(m):
if (isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear)):
m.weight.data.normal_(0, 0.001)
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0, 0.001)
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() |
.parametrize('size', list_tensor_sizes())
.parametrize('dtype', list_non_bool_dtypes())
.parametrize('op', list_binary_ops())
def test_binary_ew_ops(benchmark, size, dtype, op):
np_a = np.array(np.random.uniform(1, 127, size), dtype=to_numpy_dtype(dtype))
np_b = np.array(np.random.uniform(1, 127, size), dtype=to_numpy_dtype(dtype))
a = o3c.Tensor(np_a, dtype=dtype, device=o3c.Device('CPU:0'))
b = o3c.Tensor(np_b, dtype=dtype, device=o3c.Device('CPU:0'))
benchmark(op, a, b) |
_vision
class CLIPProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
image_processor_map = {'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0., 0.4578275, 0.], 'image_std': [0., 0., 0.]}
self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(image_processor_map, fp)
def get_tokenizer(self, **kwargs):
return CLIPTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
def get_image_processor(self, **kwargs):
return CLIPImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, (- 1))) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_default(self):
tokenizer_slow = self.get_tokenizer()
tokenizer_fast = self.get_rust_tokenizer()
image_processor = self.get_image_processor()
processor_slow = CLIPProcessor(tokenizer=tokenizer_slow, image_processor=image_processor)
processor_slow.save_pretrained(self.tmpdirname)
processor_slow = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=False)
processor_fast = CLIPProcessor(tokenizer=tokenizer_fast, image_processor=image_processor)
processor_fast.save_pretrained(self.tmpdirname)
processor_fast = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, CLIPTokenizer)
self.assertIsInstance(processor_fast.tokenizer, CLIPTokenizerFast)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, CLIPImageProcessor)
self.assertIsInstance(processor_fast.image_processor, CLIPImageProcessor)
def test_save_load_pretrained_additional_features(self):
processor = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = CLIPProcessor.from_pretrained(self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=False, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, CLIPTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, CLIPImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_image_proc = image_processor(image_input, return_tensors='np')
input_processor = processor(images=image_input, return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=0.01)
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'attention_mask', 'pixel_values'])
with pytest.raises(ValueError):
processor()
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), processor.model_input_names) |
class Abstractor(object):
def __init__(self, abs_dir, max_len=30, cuda=True):
abs_meta = json.load(open(os.path.join(abs_dir, 'meta.json')))
assert (abs_meta['net'] == 'base_abstractor')
abs_args = abs_meta['net_args']
abs_ckpt = load_best_ckpt(abs_dir)
word2id = pkl.load(open(os.path.join(abs_dir, 'vocab.pkl'), 'rb'))
abstractor = CopySumm(**abs_args)
abstractor.load_state_dict(abs_ckpt)
self._device = torch.device(('cuda' if cuda else 'cpu'))
self._net = abstractor.to(self._device)
self._word2id = word2id
self._id2word = {i: w for (w, i) in word2id.items()}
self._max_len = max_len
def _prepro(self, raw_article_sents):
ext_word2id = dict(self._word2id)
ext_id2word = dict(self._id2word)
for raw_words in raw_article_sents:
for w in raw_words:
if (not (w in ext_word2id)):
ext_word2id[w] = len(ext_word2id)
ext_id2word[len(ext_id2word)] = w
articles = conver2id(UNK, self._word2id, raw_article_sents)
art_lens = [len(art) for art in articles]
article = pad_batch_tensorize(articles, PAD, cuda=False).to(self._device)
extend_arts = conver2id(UNK, ext_word2id, raw_article_sents)
extend_art = pad_batch_tensorize(extend_arts, PAD, cuda=False).to(self._device)
extend_vsize = len(ext_word2id)
dec_args = (article, art_lens, extend_art, extend_vsize, START, END, UNK, self._max_len)
return (dec_args, ext_id2word)
def __call__(self, raw_article_sents):
self._net.eval()
(dec_args, id2word) = self._prepro(raw_article_sents)
(decs, attns) = self._net.batch_decode(*dec_args)
def argmax(arr, keys):
return arr[max(range(len(arr)), key=(lambda i: keys[i].item()))]
dec_sents = []
for (i, raw_words) in enumerate(raw_article_sents):
dec = []
for (id_, attn) in zip(decs, attns):
if (id_[i] == END):
break
elif (id_[i] == UNK):
dec.append(argmax(raw_words, attn[i]))
else:
dec.append(id2word[id_[i].item()])
dec_sents.append(dec)
return dec_sents |
class BNMomentumScheduler(object):
def __init__(self, model, bn_lambda, last_epoch=(- 1), setter=set_bn_momentum_default):
if (not isinstance(model, nn.Module)):
raise RuntimeError("Class '{}' is not a PyTorch nn Module".format(type(model).__name__))
self.model = model
self.setter = setter
self.lmbd = bn_lambda
self.step((last_epoch + 1))
self.last_epoch = last_epoch
def step(self, epoch=None):
if (epoch is None):
epoch = (self.last_epoch + 1)
self.last_epoch = epoch
self.model.apply(self.setter(self.lmbd(epoch)))
def get_momentum(self, epoch=None):
if (epoch is None):
epoch = (self.last_epoch + 1)
return self.lmbd(epoch) |
def lossfun(x, alpha, scale, approximate=False, epsilon=1e-06):
assert torch.is_tensor(x)
assert torch.is_tensor(scale)
assert torch.is_tensor(alpha)
assert (alpha.dtype == x.dtype)
assert (scale.dtype == x.dtype)
assert (scale > 0).all()
if approximate:
assert (epsilon > np.finfo(np.float32).eps)
b = (torch.abs((alpha - 2)) + epsilon)
d = torch.where((alpha >= 0), (alpha + epsilon), (alpha - epsilon))
loss = ((b / d) * (torch.pow(((((x / scale) ** 2) / b) + 1.0), (0.5 * d)) - 1.0))
else:
squared_scaled_x = ((x / scale) ** 2)
loss_two = (0.5 * squared_scaled_x)
loss_zero = util.log1p_safe((0.5 * squared_scaled_x))
loss_neginf = (- torch.expm1(((- 0.5) * squared_scaled_x)))
loss_posinf = util.expm1_safe((0.5 * squared_scaled_x))
machine_epsilon = torch.tensor(np.finfo(np.float32).eps).to(x)
beta_safe = torch.max(machine_epsilon, torch.abs((alpha - 2.0)))
alpha_safe = (torch.where((alpha >= 0), torch.ones_like(alpha), (- torch.ones_like(alpha))) * torch.max(machine_epsilon, torch.abs(alpha)))
loss_otherwise = ((beta_safe / alpha_safe) * (torch.pow(((squared_scaled_x / beta_safe) + 1.0), (0.5 * alpha)) - 1.0))
loss = torch.where((alpha == (- float('inf'))), loss_neginf, torch.where((alpha == 0), loss_zero, torch.where((alpha == 2), loss_two, torch.where((alpha == float('inf')), loss_posinf, loss_otherwise))))
return loss |
def one_of_k_encoding_unk(x, allowable_set):
if (x not in allowable_set):
return None
return list(map((lambda s: int((x == s))), allowable_set)) |
(others=sampled_from([{'box': TFBoxTensor(tf.Variable([[[1, 1], [3, 5]], [[2, 0], [6, 2]]], dtype=tf.float32)), 'weights': None, 'mask': None, 'keepdim': True, 'dim': 0, 'expected': TFBoxTensor(tf.Variable([[(3.0 / 2.0), (1.0 / 2.0)], [(9.0 / 2.0), (7.0 / 2.0)]]))}, {'box': TFBoxTensor(tf.Variable([[[1, 1], [3, 5]], [[2, 0], [6, 2]]], dtype=tf.float32)), 'weights': tf.reshape(tf.Variable([0.5, 0.5]), (2, 1)), 'mask': None, 'keepdim': True, 'dim': 0, 'expected': TFBoxTensor(tf.Variable([[(3.0 / 2.0), (1.0 / 2.0)], [(9.0 / 2.0), (7.0 / 2.0)]]))}, {'box': TFBoxTensor(tf.Variable([[[1, 1], [3, 5]], [[2, 0], [6, 2]]], dtype=tf.float32)), 'weights': tf.reshape(tf.Variable([0.1, 0.9]), (2, 1)), 'mask': None, 'keepdim': True, 'dim': 0, 'expected': TFBoxTensor(tf.Variable([[((0.1 * 1.0) + (0.9 * 2.0)), ((0.1 * 1.0) + (0.9 * 0))], [((0.1 * 3) + (0.9 * 6.0)), ((0.1 * 5) + (0.9 * 2))]]))}, {'box': TFBoxTensor(tf.Variable([[[[1, 1], [3, 5]], [[2, 0], [6, 2]]]], dtype=tf.float32)), 'weights': tf.reshape(tf.Variable([0.1, 0.9]), (1, 2, 1)), 'mask': None, 'keepdim': True, 'dim': 1, 'expected': TFBoxTensor(tf.Variable([[[((0.1 * 1.0) + (0.9 * 2.0)), ((0.1 * 1.0) + (0.9 * 0))], [((0.1 * 3) + (0.9 * 6.0)), ((0.1 * 5) + (0.9 * 2))]]], dtype=tf.float32))}, {'box': TFBoxTensor(tf.Variable([[[1, 1, 1], [3, 5, 6]], [[2, 0, 1], [6, 2, 3]]], dtype=tf.float32)), 'weights': tf.Variable([0.5, 0.5], dtype=tf.float32), 'mask': None, 'keepdim': True, 'dim': 0, 'expected': InternalError}]))
def test_bob(others) -> None:
box = others['box']
bob = TFBagOfBoxesBoxPooler(dim=others['dim'], keepdim=others['keepdim'])
expected = others['expected']
if isinstance(expected, TFBoxTensor):
result = bob(box, mask=others['mask'], weights=others['weights'])
assert np.allclose(result.z, others['expected'].z)
assert np.allclose(result.Z, others['expected'].Z)
else:
with pytest.raises(expected):
result = bob(box, mask=others['mask'], weights=others['weights']) |
def merge_new_config(config, new_config):
for (key, val) in new_config.items():
if (not isinstance(val, dict)):
if (key == '_base_'):
with open(new_config['_base_'], 'r') as f:
try:
val = yaml.load(f, Loader=yaml.FullLoader)
except:
val = yaml.load(f)
config[key] = EasyDict()
merge_new_config(config[key], val)
else:
config[key] = val
continue
if (key not in config):
config[key] = EasyDict()
merge_new_config(config[key], val)
return config |
class TransNorm2d(_TransNorm):
def _check_input(self, x):
if (x.dim() != 4):
raise ValueError('Expected the input to be 4-D, but got {}-D'.format(x.dim())) |
def dc_state_dict(dc_vars, *name_list):
return {(name + '_state_dict'): dc_vars[name].state_dict() for name in name_list if hasattr(dc_vars[name], 'state_dict')} |
def eval(args, val_loader, model, criterion):
model.eval()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
device = args.device
if args.cuda:
torch.cuda.empty_cache()
for (data, y) in val_loader:
data = data.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
outputs = model(data)
loss = criterion(outputs, y)
(prec1, prec5) = accuracy(outputs.data, y.data, topk=(1, 5))
losses.update(loss.item(), data.size(0))
top1.update(prec1.item(), data.size(0))
top5.update(prec5.item(), data.size(0))
logging('Avg loss: {:.4f}, top1: {:.2f}%, top5: {:.2f}%'.format(losses.avg, top1.avg, top5.avg), args.log)
return (losses.avg, top1.avg, top5.avg) |
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if (len(mask.shape) < 3):
mask = mask[(..., None)]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks |
def weights_to_cpu(state_dict):
state_dict_cpu = OrderedDict()
for (key, val) in state_dict.items():
state_dict_cpu[key] = val.cpu()
return state_dict_cpu |
class CmsPfSinglePi(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Add muon type, fix electron GSF association', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add genjet information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_pi ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfSinglePi, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
sample_dir = 'SinglePiMinusFlatPt0p7To1000_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files) |
def connect_nodes(n1, n2):
if (n2['node'].name not in n1['outputs']):
n1['outputs'].append(n2['node'].name)
n2['inputs'].append(n1['node'].name)
else:
print('{} -> {} already connected'.format(n1['node'].name, n2['node'].name)) |
def load_actor(checkpointpath):
import json
with open(checkpointpath.replace('model.tar', 'meta.json'), 'r') as file:
args = json.load(file)['args']
if ('breath' not in args):
args['breath'] = 2
env = create_gymenv(AttributeDict(args))
return (env, create_model(AttributeDict(args), env), AttributeDict(args)) |
class Res16UNetSN14(Res16UNet14):
NORM_TYPE = NormType.SPARSE_SWITCH_NORM
BLOCK = BasicBlockSN |
class MyMetric_keras(MyMetric):
def __init__(self, *args):
super(MyMetric_keras, self).__init__(*args) |
class BasePose(nn.Module):
__metaclass__ = ABCMeta
def forward_train(self, img, img_metas, **kwargs):
def forward_test(self, img, img_metas, **kwargs):
def forward(self, img, img_metas, return_loss=True, **kwargs):
def _parse_losses(losses):
log_vars = OrderedDict()
for (loss_name, loss_value) in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, float):
log_vars[loss_name] = loss_value
elif isinstance(loss_value, list):
log_vars[loss_name] = sum((_loss.mean() for _loss in loss_value))
else:
raise TypeError(f'{loss_name} is not a tensor or list of tensors or float')
loss = sum((_value for (_key, _value) in log_vars.items() if ('loss' in _key)))
log_vars['loss'] = loss
for (loss_name, loss_value) in log_vars.items():
if (not isinstance(loss_value, float)):
if (dist.is_available() and dist.is_initialized()):
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
else:
log_vars[loss_name] = loss_value
return (loss, log_vars)
def train_step(self, data_batch, optimizer, **kwargs):
losses = self.forward(**data_batch)
(loss, log_vars) = self._parse_losses(losses)
outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
results = self.forward(return_loss=False, **data_batch)
outputs = dict(results=results)
return outputs
def show_result(self, **kwargs):
raise NotImplementedError |
def symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if (e.errno == errno.EEXIST):
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e |
class Trainer():
_pipeline: Union[(BaseNRHintPipeline, DDP)]
def __init__(self, config: SystemConfig, shm_info: NRDataSHMInfo):
self.config = config
self.rank = local_rank()
self.world_size = 1
if torch.cuda.is_available():
self.device = torch.device('cuda')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.backends.cudnn.benchmark = True
gpu_count = torch.cuda.device_count()
if (gpu_count > 1):
torch.distributed.init_process_group(backend='nccl', init_method='env://')
self.world_size = torch.distributed.get_world_size()
torch_rank = torch.distributed.get_rank()
assert (torch_rank == self.rank), f'torch_rank {torch_rank} != rank {self.rank}, initialization might have failed...'
self.device = torch.device('cuda', self.rank)
torch.cuda.set_device(self.device)
else:
self.device = torch.device('cpu')
self.log_dir = ((pathlib.Path(self.config.base_dir) / self.config.exp_name) / self.config.scene_name)
self.log_dir.mkdir(parents=True, exist_ok=True)
if self.is_main_process:
with open((self.log_dir / 'config.yaml'), 'w') as f:
f.write(tyro.to_yaml(self.config))
wandb.init(project='NR2023', name=f'{config.exp_name}_{config.scene_name}', group=config.exp_name, notes='NR Hint Relighting', config=asdict(config), resume='allow', id=str(self.log_dir).replace('/', '_'))
wandb.run.log_code()
seed_everything((config.seed + self.rank))
self._pipeline = BaseNRHintPipeline(config, shm_info).to(self.device)
if (self.world_size > 1):
self._pipeline = DDP(self._pipeline, device_ids=[self.rank], find_unused_parameters=False, broadcast_buffers=True)
if self.is_main_process:
summary(self.pipeline)
self.optimizer = torch.optim.Adam(self.pipeline.get_param_groups())
alpha = config.model.lr_alpha
warm_up_end = config.model.warm_up_end
end_iter = config.model.end_iter
def lr_lambda(iter_step):
if (iter_step < warm_up_end):
learning_factor = (iter_step / warm_up_end)
else:
progress = ((iter_step - warm_up_end) / (end_iter - warm_up_end))
learning_factor = ((((np.cos((np.pi * progress)) + 1.0) * 0.5) * (1 - alpha)) + alpha)
return learning_factor
self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda)
self.data_manager = NRSHMDataManager(shm_info=shm_info, batch_size=(config.model.batch_size // self.world_size), strategy=PixelSamplingStrategy.ALL_IMAGES, image_idx_rng_seed=config.seed, pixel_idx_rng_seed=config.seed, local_rank=self.rank)
self.global_step = 0
self.load_ckpt()
def pipeline(self):
if isinstance(self._pipeline, DDP):
return cast(BaseNRHintPipeline, self._pipeline.module)
return self._pipeline
def is_main_process(self):
return (self.rank == 0)
def use_ddp(self):
return (self.world_size > 1)
def wait_all(self):
if self.use_ddp:
torch.distributed.barrier()
def model_states(self) -> Dict:
return {'world_size': self.world_size, 'global_step': self.global_step, 'pipeline': self.pipeline.state_dict(), 'optimizer': self.optimizer.state_dict(), 'scheduler': self.lr_scheduler.state_dict()}
def rng_states(self):
return {'python.random': random.getstate(), 'np.random': np.random.get_state(), 'torch.random': torch.random.get_rng_state(), 'torch.cuda.random': torch.cuda.random.get_rng_state(self.device), 'ray_generator.image': self.data_manager.sampler.image_rng.__getstate__(), 'ray_generator.pixel': self.data_manager.sampler.pixel_rng.__getstate__()}
def save_ckpt(self):
if self.is_main_process:
ckpt_path = ((self.log_dir / 'ckpt') / f'step_{self.global_step:07d}.ckpt')
torch.save(self.model_states, ckpt_path)
rng_state_path = ((self.log_dir / 'rng_state') / f'step_{self.global_step:07d}_device_{self.rank}.pickle')
pickle.dump(self.rng_states, open(rng_state_path, 'wb'))
def load_ckpt(self):
ckpt_path = (self.log_dir / 'ckpt')
ckpt_path.mkdir(parents=True, exist_ok=True)
rng_state_path = (self.log_dir / 'rng_state')
rng_state_path.mkdir(parents=True, exist_ok=True)
if (self.config.ckpt_path is not None):
ckpts = [self.config.ckpt_path]
print(f'Loading given ckpt from {self.config.ckpt_path}')
else:
ckpts = [os.path.join(ckpt_path, f) for f in sorted(os.listdir(ckpt_path)) if ('ckpt' in f)]
print(f'Found {len(ckpts)} ckpts in {ckpt_path}')
if (len(ckpts) > 0):
try:
ckpt_path = ckpts[(- 1)]
print(f'Resume from ckpt: {ckpt_path}')
last_world_size = self._load_ckpt_file(ckpt_path)
except EOFError:
ckpt_path = ckpts[(- 2)]
print(f'Retrying resume from ckpt: {ckpt_path}')
last_world_size = self._load_ckpt_file(ckpt_path)
if (last_world_size == self.world_size):
try:
rng_state_path = (rng_state_path / f'step_{self.global_step:07d}_device_{self.rank}.pickle')
self._load_rng_states(rng_state_path)
except Exception as e:
print(e)
print('rng state resume failed, the results might not be fully reproducible')
def _load_ckpt_file(self, ckpt_file):
ckpt = torch.load(ckpt_file, map_location=self.device)
self.global_step = ckpt['global_step']
self.pipeline.load_state_dict(ckpt['pipeline'])
self.optimizer.load_state_dict(ckpt['optimizer'])
self.lr_scheduler.load_state_dict(ckpt['scheduler'])
return ckpt['world_size']
def _load_rng_states(self, rng_state_path):
rng_states = pickle.load(open(rng_state_path, 'rb'))
random.setstate(rng_states['python.random'])
np.random.set_state(rng_states['np.random'])
torch.random.set_rng_state(rng_states['torch.random'])
torch.cuda.set_rng_state(rng_states['torch.cuda.random'])
self.data_manager.sampler.image_rng.__setstate__(rng_states['ray_generator.image'])
self.data_manager.sampler.pixel_rng.__setstate__(rng_states['ray_generator.pixel'])
def run(self):
if (not self.config.evaluation_only):
start_step = self.global_step
for _ in tqdm(range(start_step, self.config.model.end_iter), desc=f'Training: ', initial=start_step, total=self.config.model.end_iter, dynamic_ncols=True, disable=(self.rank != 0)):
loss_dict = self.train_iter()
if (((self.global_step % self.config.intervals.log_metrics) == 0) and self.is_main_process):
wandb.log(loss_dict, step=self.global_step)
self.global_step += 1
if ((self.global_step % self.config.intervals.save_ckpt) == 0):
self.save_ckpt()
if ((self.global_step % self.config.intervals.render_test_views) == 0):
self.render_test_views()
if ((self.global_step % self.config.intervals.dump_mesh) == 0):
self.dump_mesh()
if ((self.global_step % self.config.intervals.render_video) == 0):
self.render_video()
self.dump_mesh(resolution=1024)
self.render_test_views(is_final=True)
def train_iter(self):
pixel_bundle = self.data_manager.next_train_batch()
pixel_bundle = pixel_bundle.to(self.device)
rendering_res = self._pipeline.forward(pixel_bundle, global_step=self.global_step)
loss_dict = self.pipeline.get_train_loss_dict(rendering_res, pixel_bundle)
loss = loss_dict['loss']
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.lr_scheduler.step()
return loss_dict
def render_test_views(self, is_final=False):
total_test_view = self.data_manager.test_view_num()
skip_num = (self.config.data.testset_skip if (not is_final) else 1)
start_idx = (self.rank * skip_num)
metrics_dicts = []
for idx in tqdm(range(start_idx, total_test_view, (skip_num * self.world_size)), desc=f'Rendering test views on process {self.rank}: '):
metrics_dict = self.render_single_view(idx)
metrics_dicts.append(metrics_dict)
self.wait_all()
if self.use_ddp:
output_list = [None for _ in range(self.world_size)]
torch.distributed.gather_object(obj=metrics_dicts, object_gather_list=(output_list if self.is_main_process else None), dst=0)
else:
output_list = [metrics_dicts]
if self.is_main_process:
gather_output = {}
image_cnt = 0
for item in itertools.chain(*output_list):
image_cnt += 1
for (k, v) in item.items():
gather_output.setdefault(k, 0.0)
gather_output[k] += v
final_output = {}
for (k, v) in gather_output.items():
final_output[(f'val/{k}' if is_final else f'val/{k}')] = (v / image_cnt)
wandb.log(final_output, step=self.global_step)
self.wait_all()
_grad()
def render_single_view(self, view_index, is_training_view: bool=False):
img_pixel_bundle = self.data_manager.get_test_view(view_index)
(img_dict, metrics_dict, tensor_dict) = self.pipeline.get_eval_dicts(img_pixel_bundle, self.device)
self.save_dumps(view_index, img_dict, tensor_dict)
if ((view_index == 0) and self.is_main_process):
self.log_images(img_dict)
return metrics_dict
def save_dumps(self, view_idx, image_dict, tensor_dict):
dump_dir = ((self.log_dir / 'test_views') / f'step_{self.global_step:07d}')
dump_dir.mkdir(parents=True, exist_ok=True)
for (k, v) in image_dict.items():
if ('normal' in k):
v = ((v * 0.5) + 0.5)
if (v.shape[(- 1)] == 1):
v = v[(..., 0)]
imageio.v3.imwrite((dump_dir / f'{k}_{view_idx:03d}.png'), (v * 255).clip(0, 255).astype(np.uint8))
for (k, v) in tensor_dict.items():
np.save((dump_dir / f'{k}_{view_idx:03d}.npy'), v)
def log_images(self, image_dict):
for (k, v) in image_dict.items():
if ('normal' in k):
v = ((v * 0.5) + 0.5)
wandb.log({k: wandb.Image((v * 255).clip(0, 255).astype(np.uint8))}, step=self.global_step)
def dump_mesh(self, resolution: int=256):
if self.is_main_process:
mesh_dir = (self.log_dir / 'mesh')
mesh_dir.mkdir(parents=True, exist_ok=True)
mesh_file_path = (mesh_dir / f'step_{self.global_step:07d}_res_{resolution}.obj')
bound_min = torch.tensor([(- 1.01), (- 1.01), (- 1.01)], dtype=torch.float32)
bound_max = torch.tensor([1.01, 1.01, 1.01], dtype=torch.float32)
(vertices, triangles) = self.pipeline.renderer.extract_geometry(bound_min, bound_max, resolution=resolution, threshold=0.0)
mesh = trimesh.Trimesh(vertices, triangles)
mesh.export(mesh_file_path)
self.wait_all()
def render_video(self):
video_dir = (self.log_dir / 'video')
video_dir.mkdir(parents=True, exist_ok=True)
video_frame_dir = (video_dir / f'step_{self.global_step:07d}')
video_frame_dir.mkdir(parents=True, exist_ok=True)
video_pixel_bundle = self.data_manager.get_video_pixel_bundle(self.config.data.video_frame_num, is_z_up=self.config.data.is_z_up)
if self.is_main_process:
video_frame_buffer = torch.empty(*video_pixel_bundle.shape, 3, dtype=torch.uint8, device='cpu')
assert ((len(video_pixel_bundle) % self.world_size) == 0), 'video frame number should be divisible by world size'
frames_per_process = (len(video_pixel_bundle) // self.world_size)
for i in tqdm(range(frames_per_process), desc=f'Rendering video frames on process {self.rank}: '):
idx = (i + (frames_per_process * self.rank))
(img_dict, _, _) = self.pipeline.get_eval_dicts(video_pixel_bundle[idx], self.device)
uint8_rgb = (img_dict['rgb'] * 255).clip(0, 255).astype(np.uint8)
imageio.v3.imwrite((video_frame_dir / f'{idx:03d}.png'), uint8_rgb)
if self.is_main_process:
video_frame_buffer[idx] = torch.tensor(uint8_rgb, dtype=torch.uint8, device='cpu')
single_idx_buffer = torch.empty((1,), dtype=torch.long, device=self.device)
single_frame_buffer = torch.empty_like(video_frame_buffer[idx], dtype=torch.uint8, device=self.device)
for sub_process_rank in range(1, self.world_size):
torch.distributed.recv(tensor=single_idx_buffer, src=sub_process_rank, tag=0)
torch.distributed.recv(tensor=single_frame_buffer, src=sub_process_rank, tag=1)
video_frame_buffer[single_idx_buffer.item()] = single_frame_buffer.cpu()
else:
torch.distributed.send(torch.tensor([idx], device=self.device), dst=0, tag=0)
torch.distributed.send(torch.tensor(uint8_rgb, dtype=torch.uint8, device=self.device), dst=0, tag=1)
self.wait_all()
if self.is_main_process:
video_rgb = video_frame_buffer.numpy().astype(np.uint8)
imageio.v3.imwrite(os.path.join(video_dir, f'step_{self.global_step:06d}_rot_view.mp4'), video_rgb[:self.config.data.video_frame_num], fps=30, quality=9)
imageio.v3.imwrite(os.path.join(video_dir, f'step_{self.global_step:06d}_rot_light.mp4'), video_rgb[self.config.data.video_frame_num:], fps=30, quality=9)
self.wait_all()
def release_shm(self):
self.data_manager.release_shm()
del self.data_manager |
class ConvVAE(nn.Module):
def __init__(self, latent_size):
super(ConvVAE, self).__init__()
self.latent_size = latent_size
self.encoder = nn.Sequential(nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1), nn.ReLU(), nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1), nn.ReLU(), Flatten(), nn.Linear(131072, 1024), nn.ReLU())
self.fc1 = nn.Linear(1024, self.latent_size)
self.fc2 = nn.Linear(1024, self.latent_size)
self.decoder = nn.Sequential(nn.Linear(self.latent_size, 1024), nn.ReLU(), nn.Linear(1024, 131072), nn.ReLU(), Unflatten(128, 32, 32), nn.ReLU(), nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1), nn.ReLU(), nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2, padding=1), nn.Sigmoid())
def encode(self, x):
h = self.encoder(x)
(mu, logvar) = (self.fc1(h), self.fc2(h))
return (mu, logvar)
def decode(self, z):
z = self.decoder(z)
return z
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp((0.5 * logvar))
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def reparameterize_eval(self, mu, logvar):
std = torch.exp((0.5 * logvar))
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def forward(self, x):
if (x.shape[1] == 1):
x = x.repeat(1, 3, 1, 1)
(mu, logvar) = self.encode(x)
z = self.reparameterize(mu, logvar)
return (self.decode(z), mu, logvar)
def loss_function(self, x: Tensor, y: Tensor, mu: Tensor, logvar: Tensor) -> dict:
if (x.shape[1] == 1):
x = x.repeat(1, 3, 1, 1)
recon_loss = F.binary_cross_entropy(y, x, reduction='mean')
kl_loss = torch.mean(((- 0.5) * (((1 + logvar) - (mu ** 2)) - logvar.exp())))
loss = (recon_loss + (1 * kl_loss))
return {'loss': loss, 'recon_loss': recon_loss, 'kl_loss': kl_loss} |
def get_sorted_wordlist(path):
freqs = defaultdict((lambda : 0))
with codecs.open(path, 'r', encoding='utf-8') as fin:
for line in fin:
words = line.strip().split()
for word in words:
freqs[word] += 1
sorted_words = sorted(freqs, key=freqs.get, reverse=True)
wordlist = [word for word in sorted_words]
return wordlist |
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for (i, v) in enumerate(cfg):
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = (v[1] if isinstance(v, tuple) else 1)
out_channels = (v[0] if isinstance(v, tuple) else v)
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = out_channels
return nn.Sequential(*layers) |
class Conv2dIndepBeta(_DeepIndepBeta):
def __init__(self, backbone: nn.Module, hidden_channels: int=1, out_channels: int=1):
super().__init__(backbone=backbone, alpha_head=nn.Conv2d(hidden_channels, out_channels=out_channels, kernel_size=1), beta_head=nn.Conv2d(hidden_channels, out_channels=out_channels, kernel_size=1)) |
def prune(model, amount=0.3):
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for (name, m) in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount)
prune.remove(m, 'weight')
print((' %.3g global sparsity' % sparsity(model))) |
class MelFilterBank():
def __init__(self, specSize, numCoefficients, sampleRate):
numBands = int(numCoefficients)
minMel = 0
maxMel = self.freqToMel((sampleRate / 2.0))
melStep = ((maxMel - minMel) / (numBands + 1))
melFilterEdges = (np.arange(0, (numBands + 2)) * melStep)
centerIndices = list(map((lambda x: self.freqToBin(math.floor(self.melToFreq(x)), sampleRate, specSize)), melFilterEdges))
filterMatrix = np.zeros((numBands, specSize))
for i in range(numBands):
(start, center, end) = centerIndices[i:(i + 3)]
k1 = np.float((center - start))
k2 = np.float((end - center))
up = ((np.array(range(start, center)) - start) / k1)
down = ((end - np.array(range(center, end))) / k2)
filterMatrix[i][start:center] = up
filterMatrix[i][center:end] = down
self.melMatrix = filterMatrix.transpose()
self.melMatrix = self.makeNormal((self.melMatrix / self.normSum(self.melMatrix)))
self.melInvMatrix = self.melMatrix.transpose()
self.melInvMatrix = self.makeNormal((self.melInvMatrix / self.normSum(self.melInvMatrix)))
def normSum(self, x):
retSum = np.sum(x, axis=0)
retSum[np.where((retSum == 0))] = 1.0
return retSum
def fuzz(self, x):
return (x + 1e-07)
def freqToBin(self, freq, sampleRate, specSize):
return int(math.floor(((freq / (sampleRate / 2.0)) * specSize)))
def freqToMel(self, freq):
return (2595.0 * math.log10((1.0 + (freq / 700.0))))
def melToFreq(self, mel):
return (700.0 * (math.pow(10.0, (mel / 2595.0)) - 1.0))
def toMelScale(self, spectrogram):
return np.dot(spectrogram, self.melMatrix)
def fromMelScale(self, melSpectrogram):
return np.dot(melSpectrogram, self.melInvMatrix)
def makeNormal(self, x):
nanIdx = np.isnan(x)
x[nanIdx] = 0
infIdx = np.isinf(x)
x[infIdx] = 0
return x
def toMels(self, spectrogram):
return self.toMelScale(spectrogram)
def fromMels(self, melSpectrogram):
return self.fromMelScale(melSpectrogram)
def toLogMels(self, spectrogram):
return self.makeNormal(np.log(self.fuzz(self.toMelScale(spectrogram))))
def fromLogMels(self, melSpectrogram):
return self.makeNormal(self.fromMelScale(np.exp(melSpectrogram))) |
class Softmax(Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input):
return input.softmax(self.dim)
def from_onnx(parameters=None, attributes=None):
if (attributes is None):
attributes = {}
return Softmax(attributes['axis']) |
def tf2th(conv_weights):
if (conv_weights.ndim == 4):
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights) |
def add_args(parser):
parser.add_argument('--num_steps', type=int, default=(10 ** 6), help='number of steps in training')
parser.add_argument('--transitions_per_step', type=int, default=1, help='env transitions per training step. Defaults to 1, but will need to be set higher for repaly ratios < 1')
parser.add_argument('--max_episode_steps', type=int, default=100000, help='maximum steps per episode')
parser.add_argument('--batch_size', type=int, default=512, help='training batch size')
parser.add_argument('--actor_lr', type=float, default=0.0001, help='actor learning rate')
parser.add_argument('--critic_lr', type=float, default=0.0001, help='critic learning rate')
parser.add_argument('--gamma', type=float, default=0.99, help='gamma, the discount factor')
parser.add_argument('--buffer_size', type=int, default=1000000, help='replay buffer size')
parser.add_argument('--eval_interval', type=int, default=5000, help='how often to test the agent without exploration (in episodes)')
parser.add_argument('--eval_episodes', type=int, default=10, help='how many episodes to run for when testing')
parser.add_argument('--warmup_steps', type=int, default=1000, help='warmup length, in steps')
parser.add_argument('--render', action='store_true', help='flag to enable env rendering during training')
parser.add_argument('--actor_clip', type=float, default=None, help='gradient clipping for actor updates')
parser.add_argument('--critic_clip', type=float, default=None, help='gradient clipping for critic updates')
parser.add_argument('--name', type=str, default='tsr_caql_run', help='dir name for saves')
parser.add_argument('--actor_l2', type=float, default=0.0, help='L2 regularization coeff for actor network')
parser.add_argument('--critic_l2', type=float, default=0.0, help='L2 regularization coeff for critic network')
parser.add_argument('--save_interval', type=int, default=100000, help='How many steps to go between saving the agent params to disk')
parser.add_argument('--verbosity', type=int, default=1, help='verbosity > 0 displays a progress bar during training')
parser.add_argument('--max_critic_updates_per_step', type=int, default=10, help='Max critic updates to make per step. The GRAC paper calls this K')
parser.add_argument('--prioritized_replay', action='store_true', help='flag that enables use of prioritized experience replay')
parser.add_argument('--skip_save_to_disk', action='store_true', help='flag to skip saving agent params to disk during training')
parser.add_argument('--skip_log_to_disk', action='store_true', help='flag to skip saving agent performance logs to disk during training')
parser.add_argument('--log_std_low', type=float, default=(- 10), help='Lower bound for log std of action distribution.')
parser.add_argument('--log_std_high', type=float, default=2, help='Upper bound for log std of action distribution.')
parser.add_argument('--critic_target_improvement_init', type=float, default=0.7, help='Stop critic updates when loss drops by this factor. The GRAC paper calls this alpha')
parser.add_argument('--critic_target_improvement_final', type=float, default=0.9, help='Stop critic updates when loss drops by this factor. The GRAC paper calls this alpha')
parser.add_argument('--debug_logs', action='store_true') |
def read_reco2vol(volumes_file):
volumes = {}
with open(volumes_file) as volume_reader:
for line in volume_reader.readlines():
if (len(line.strip()) == 0):
continue
parts = line.strip().split()
if (len(parts) != 2):
raise RuntimeError('Unable to parse the line {0} in file {1}.'.format(line.strip(), volumes_file))
volumes[parts[0]] = float(parts[1])
return volumes |
def gelu(x):
x = tf.convert_to_tensor(x)
cdf = (0.5 * (1.0 + tf.math.erf((x / tf.math.sqrt(2.0)))))
return (x * cdf) |
_registry(operator_type='LayerNorm')
class LayerNorm(Operator):
def __init__(self):
super().__init__()
def set_attr(self, framework, node):
if (framework == 'torch'):
if (node.inputsSize() > 4):
self._attr['epsilon'] = node.inputsAt(4).toIValue()
self._attr['normalized_shape'] = list2str(parseTorchListConstruct(node.inputsAt(1))) |
def get_train_type(train_type, checkpoint):
exist_status = (checkpoint and os.path.exists(checkpoint))
if (train_type == 'NORMAL'):
return train_type
if ((train_type == 'FPD') and exist_status):
return 'FPD'
if ((train_type == 'FPD') and (not exist_status)):
exit('ERROR: teacher checkpoint is not existed.')
else:
exit('ERROR: please change train type {} to NORMAL or FPD.'.format(train_type)) |
class Indexer():
def __init__(self, symbols=['<pad>', '<unk>', '<s>', '</s>']):
self.vocab = defaultdict(int)
self.PAD = symbols[0]
self.UNK = symbols[1]
self.BOS = symbols[2]
self.EOS = symbols[3]
self.d = {self.PAD: 0, self.UNK: 1, self.BOS: 2, self.EOS: 3}
self.idx2word = {}
def add_w(self, ws):
for w in ws:
if (w not in self.d):
self.d[w] = len(self.d)
def convert(self, w):
return (self.d[w] if (w in self.d) else self.d[self.UNK])
def convert_sequence(self, ls):
return [self.convert(l) for l in ls]
def write(self, outfile):
out = open(outfile, 'w')
items = [(v, k) for (k, v) in self.d.items()]
items.sort()
for (v, k) in items:
out.write((' '.join([k, str(v)]) + '\n'))
out.close()
def prune_vocab(self, k, cnt=False):
vocab_list = [(word, count) for (word, count) in self.vocab.items()]
if cnt:
self.pruned_vocab = {pair[0]: pair[1] for pair in vocab_list if (pair[1] > k)}
else:
vocab_list.sort(key=(lambda x: x[1]), reverse=True)
k = min(k, len(vocab_list))
self.pruned_vocab = {pair[0]: pair[1] for pair in vocab_list[:k]}
for word in self.pruned_vocab:
if (word not in self.d):
self.d[word] = len(self.d)
for (word, idx) in self.d.items():
self.idx2word[idx] = word
def load_vocab(self, vocab_file):
self.d = {}
for line in open(vocab_file, 'r'):
(v, k) = line.strip().split()
self.d[v] = int(k)
for (word, idx) in self.d.items():
self.idx2word[idx] = word |
_module()
class Sharpness(object):
def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5):
assert isinstance(magnitude, (int, float)), f'The magnitude type must be int or float, but got {type(magnitude)} instead.'
assert (0 <= prob <= 1.0), f'The prob should be in range [0,1], got {prob} instead.'
assert (0 <= random_negative_prob <= 1.0), f'The random_negative_prob should be in range [0,1], got {random_negative_prob} instead.'
self.magnitude = magnitude
self.prob = prob
self.random_negative_prob = random_negative_prob
def __call__(self, results):
if (np.random.rand() > self.prob):
return results
magnitude = random_negative(self.magnitude, self.random_negative_prob)
for key in results.get('img_fields', ['strong']):
img = results[key]
img_sharpened = mmcv.adjust_sharpness(img, factor=(1 + magnitude))
results[key] = img_sharpened.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(magnitude={self.magnitude}, '
repr_str += f'prob={self.prob}, '
repr_str += f'random_negative_prob={self.random_negative_prob})'
return repr_str |
def parse_args():
parser = argparse.ArgumentParser(description='Create periodicity threshold figure')
parser.add_argument('--names', required=True, nargs='+', help='Corresponding labels for each evaluation')
parser.add_argument('--evaluations', type=Path, required=True, nargs='+', help='The evaluations to plot')
parser.add_argument('--output_file', type=Path, required=True, help='The output jpg file')
return parser.parse_known_args()[0] |
def get_messages_tokens(messages):
cnt = 0
for message in messages:
cnt += count_tokens(message['content'])
return cnt |
def D_logistic(G, D, opt, training_set, minibatch_size, reals, labels):
_ = (opt, training_set)
latents = tf.random_normal(([minibatch_size] + G.input_shapes[0][1:]))
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out)
loss += tf.nn.softplus((- real_scores_out))
return (loss, None) |
def run(api_key, api_url, index):
es = Elasticsearch(hosts=[ELASTICSEARCH_HOST])
arxivdigest_connector = ArxivdigestConnector(api_key, api_url)
if (not es.indices.exists(index=index)):
logger.info('Creating index')
init_index(es, index)
logger.info('Indexing articles from arXivDigest API.')
run_indexing(es, index, arxivdigest_connector)
recommend(es, arxivdigest_connector, index)
logger.info('\nFinished recommending articles.') |
class nnUNetTrainerV2_lReLU_biasInSegOutput(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-05, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 0.01, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, (lambda x: x), InitWeights_He(0), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True, seg_output_use_bias=True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper |
def iso_recal(exp_props, obs_props):
exp_props = exp_props.flatten()
obs_props = obs_props.flatten()
min_obs = torch.min(obs_props)
max_obs = torch.max(obs_props)
iso_model = IsotonicRegression(increasing=True, out_of_bounds='clip')
try:
assert (torch.min(obs_props) == 0.0)
assert (torch.max(obs_props) == 1.0)
except:
print('Obs props not ideal: from {} to {}'.format(min_obs, max_obs))
exp_0_idx = get_q_idx(exp_props, 0.0)
exp_1_idx = get_q_idx(exp_props, 1.0)
within_01 = obs_props[exp_0_idx:(exp_1_idx + 1)]
(beg_idx, end_idx) = (None, None)
min_obs_below = torch.min(obs_props[:exp_0_idx])
min_obs_within = torch.min(within_01)
if (min_obs_below < min_obs_within):
i = (exp_0_idx - 1)
while (obs_props[i] > min_obs_below):
i -= 1
beg_idx = i
elif (torch.sum((within_01 == min_obs_within).float()) > 1):
i = (exp_1_idx - 1)
while (obs_props[i] > min_obs_within):
i -= 1
beg_idx = i
elif (torch.sum((within_01 == min_obs_within).float()) == 1):
beg_idx = (torch.argmin(within_01) + exp_0_idx)
else:
import pudb
pudb.set_trace()
max_obs_above = torch.max(obs_props[(exp_1_idx + 1):])
max_obs_within = torch.max(within_01)
if (max_obs_above > max_obs_within):
i = (exp_1_idx + 1)
while (obs_props[i] < max_obs_above):
i += 1
end_idx = (i + 1)
elif (torch.sum((within_01 == max_obs_within).float()) > 1):
i = beg_idx
while (obs_props[i] < max_obs_within):
i += 1
end_idx = (i + 1)
elif (torch.sum((within_01 == max_obs_within).float()) == 1):
end_idx = ((exp_0_idx + torch.argmax(within_01)) + 1)
else:
import pudb
pudb.set_trace()
assert (end_idx > beg_idx)
filtered_obs_props = obs_props[beg_idx:end_idx]
filtered_exp_props = exp_props[beg_idx:end_idx]
try:
iso_model = iso_model.fit(filtered_obs_props, filtered_exp_props)
except:
import pudb
pudb.set_trace()
return iso_model |
class AdapterResnetBlock(nn.Module):
def __init__(self, channels: int):
super().__init__()
self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.act = nn.ReLU()
self.block2 = nn.Conv2d(channels, channels, kernel_size=1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self.act(self.block1(x))
h = self.block2(h)
return (h + x) |
class CrossEntropyLoss(nn.CrossEntropyLoss):
def __init__(self, weight=None, ignore_index=(- 100), reduction='mean', smooth_eps=None, smooth_dist=None):
super(CrossEntropyLoss, self).__init__(weight=weight, ignore_index=ignore_index, reduction=reduction)
self.smooth_eps = smooth_eps
self.smooth_dist = smooth_dist
def forward(self, input, target, smooth_dist=None):
if (smooth_dist is None):
smooth_dist = self.smooth_dist
return cross_entropy(input, target, self.weight, self.ignore_index, self.reduction, self.smooth_eps, smooth_dist) |
def getModelFiles():
return [dict(name=os.path.basename(p), mtime=int(os.path.getmtime(p))) for p in glob.glob(os.path.join(models_dir, '*.tflite'))] |
def test_poisson_time_generator():
gen = PoissonTimeGenerator(lambda_time=2, random_generator=np.random.RandomState(seed=1))
for _ in range(10):
print(gen.next()) |
def test_capsule(capture):
pytest.gc_collect()
with capture:
a = m.return_capsule_with_destructor()
del a
pytest.gc_collect()
assert (capture.unordered == '\n creating capsule\n destructing capsule\n ')
with capture:
a = m.return_capsule_with_destructor_2()
del a
pytest.gc_collect()
assert (capture.unordered == '\n creating capsule\n destructing capsule: 1234\n ')
with capture:
a = m.return_capsule_with_name_and_destructor()
del a
pytest.gc_collect()
assert (capture.unordered == "\n created capsule (1234, 'pointer type description')\n destructing capsule (1234, 'pointer type description')\n ") |
_model('s2spect2_conformer')
class S2SpecT2ConformerModel(S2SpecTConformerModel):
def add_args(parser):
S2SpecTConformerModel.add_args(parser)
parser.add_argument('--translation-decoder-layers', type=int, default=4, metavar='N', help='num decoder layers in the first-pass translation module')
parser.add_argument('--synthesizer', default='transformer', choices=['transformer'], help='')
parser.add_argument('--synthesizer-encoder-layers', type=int, default=0, metavar='N', help='num encoder layers in the second-pass synthesizer module')
def build_multitask_decoder(cls, args, tgt_dict, in_dim, is_mt_decoder, decoder_layers, decoder_embed_dim, decoder_attention_heads):
decoder_args = args.decoder_args
decoder_args.encoder_embed_dim = in_dim
if (args.decoder_type == 'transformer'):
if is_mt_decoder:
multitask_text_transformer_decoder_arch(decoder_args, decoder_layers, decoder_embed_dim, decoder_attention_heads)
else:
base_multitask_text_transformer_decoder_arch(decoder_args)
task_decoder = TransformerDecoder(decoder_args, tgt_dict, embed_tokens=TransformerModelBase.build_embedding(decoder_args, tgt_dict, decoder_args.decoder_embed_dim))
elif (args.decoder_type == 'ctc'):
task_decoder = CTCDecoder(dictionary=tgt_dict, in_dim=in_dim)
else:
raise NotImplementedError("currently only support multitask decoder_type 'transformer', 'ctc'")
return task_decoder
def build_decoder(cls, args):
_args = copy.deepcopy(args)
_args.encoder_embed_dim = args.decoder_embed_dim
if (args.synthesizer == 'transformer'):
return TTSTransformerDecoder(_args, None, padding_idx=1)
else:
raise NotImplementedError(args.synthesizer)
def build_model(cls, args, task):
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args)
base_model = cls(encoder, decoder)
base_model.mt_task_name = None
base_model.multitask_decoders = {}
has_first_pass_decoder = False
for (task_name, task_obj) in task.multitask_tasks.items():
if task_obj.is_first_pass_decoder:
has_first_pass_decoder = True
base_model.mt_task_name = task_name
in_dim = (args.encoder_embed_dim if (task_obj.args.input_from == 'encoder') else args.decoder_embed_dim)
task_decoder = cls.build_multitask_decoder(task_obj.args, task_obj.target_dictionary, in_dim, task_obj.is_first_pass_decoder, getattr(args, 'translation_decoder_layers', 4), getattr(args, 'decoder_embed_dim', 256), getattr(args, 'decoder_attention_heads', 4))
setattr(base_model, f'{task_name}_decoder', task_decoder)
decoder_model_cls = (FairseqEncoderModel if (task_obj.args.decoder_type == 'ctc') else FairseqLanguageModel)
base_model.multitask_decoders[task_name] = decoder_model_cls(getattr(base_model, f'{task_name}_decoder'))
assert has_first_pass_decoder, 'set at least one intermediate non-CTC decoder'
if (getattr(args, 'synthesizer_encoder_layers', 0) > 0):
base_model.synthesizer_encoder = cls.build_text_encoder(args)
else:
base_model.synthesizer_encoder = None
return base_model
def build_text_encoder(cls, args):
_args = copy.deepcopy(args)
_args.encoder_layers = args.synthesizer_encoder_layers
_args.encoder_embed_dim = args.decoder_embed_dim
_args.encoder_ffn_embed_dim = args.decoder_ffn_embed_dim
_args.encoder_attention_heads = args.decoder_attention_heads
_args.encoder_normalize_before = True
return TransformerEncoderNoEmb(_args)
def forward(self, src_tokens, src_lengths, prev_output_tokens, prev_output_tokens_mt, tgt_speaker=None, incremental_state=None, target_lengths=None, speaker=None, return_all_hiddens=False):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, tgt_speaker=tgt_speaker, return_all_hiddens=return_all_hiddens)
mt_decoder = getattr(self, f'{self.mt_task_name}_decoder')
mt_decoder_out = mt_decoder(prev_output_tokens_mt, encoder_out=encoder_out)
x = mt_decoder_out[1]['inner_states'][(- 1)]
if (mt_decoder.layer_norm is not None):
x = mt_decoder.layer_norm(x)
mt_decoder_padding_mask = None
if prev_output_tokens_mt.eq(mt_decoder.padding_idx).any():
mt_decoder_padding_mask = prev_output_tokens_mt.eq(mt_decoder.padding_idx)
if (self.synthesizer_encoder is not None):
tts_encoder_out = self.synthesizer_encoder(x, mt_decoder_padding_mask, return_all_hiddens=return_all_hiddens)
else:
tts_encoder_out = {'encoder_out': [x], 'encoder_padding_mask': [mt_decoder_padding_mask]}
decoder_out = self.decoder(prev_output_tokens, encoder_out=tts_encoder_out, incremental_state=incremental_state, target_lengths=target_lengths, speaker=speaker)
if return_all_hiddens:
decoder_out[(- 1)]['encoder_states'] = encoder_out['encoder_states']
decoder_out[(- 1)]['encoder_padding_mask'] = encoder_out['encoder_padding_mask']
decoder_out[(- 1)]['mt_decoder_out'] = mt_decoder_out
return decoder_out |
def resnet18(pretrained=False, filter_size=1, pool_only=True, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], filter_size=filter_size, pool_only=pool_only, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model |
def test_digits_corr_two_stage_init():
model = SaturatedCoverageSelection(100, 'corr', optimizer='two-stage', initial_subset=digits_corr_ranking[:5])
model.fit(X_digits)
assert_array_equal(model.ranking[:(- 5)], digits_corr_ranking[5:])
assert_array_almost_equal(model.gains[:(- 5)], digits_corr_gains[5:], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
.parametrize('as_frame', [True, False])
def test_load_movielens100k(as_frame):
(df_data, df_users, df_items) = load_movielens100k(as_frame=as_frame)
if as_frame:
assert ((df_data.shape, df_users.shape, df_items.shape, type(df_data), type(df_users), type(df_items)) == ((100000, 4), (943, 5), (1682, 24), pd.DataFrame, pd.DataFrame, pd.DataFrame))
else:
assert ((df_data.shape, df_users.shape, df_items.shape, type(df_data), type(df_users), type(df_items)) == ((100000, 4), (943, 5), (1682, 24), np.ndarray, np.ndarray, np.ndarray)) |
def objective(trial):
param = {'min_samples_leaf': trial.suggest_int('min_samples_leaf', 2, 20), 'min_samples_split': trial.suggest_int('min_samples_split', 2, 20), 'max_depth': trial.suggest_int('max_depth', 2, 32), 'n_estimators': trial.suggest_int('n_estimators', 100, 1000, step=100)}
clf = RandomForestRegressor(**param).fit(X_train, y_train)
pred = clf.predict(X_test)
r2 = r2_score(y_test, pred)
return r2 |
class MyDataloader(data.Dataset):
modality_names = ['rgb', 'rgbd', 'd']
color_jitter = transforms.ColorJitter(0.4, 0.4, 0.4)
def __init__(self, root, type, sparsifier=None, modality='rgb', loader=h5_loader):
(classes, class_to_idx) = find_classes(root)
imgs = make_dataset(root, class_to_idx)
assert (len(imgs) > 0), (('Found 0 images in subfolders of: ' + root) + '\n')
print('Found {} images in {} folder.'.format(len(imgs), type))
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
if (type == 'train'):
self.transform = self.train_transform
elif (type == 'val'):
self.transform = self.val_transform
else:
raise RuntimeError((('Invalid dataset type: ' + type) + '\nSupported dataset types are: train, val'))
self.loader = loader
self.sparsifier = sparsifier
assert (modality in self.modality_names), (((('Invalid modality type: ' + modality) + '\n') + 'Supported dataset types are: ') + ''.join(self.modality_names))
self.modality = modality
def train_transform(self, rgb, depth):
raise RuntimeError('train_transform() is not implemented. ')
def val_transform(rgb, depth):
raise RuntimeError('val_transform() is not implemented.')
def create_sparse_depth(self, rgb, depth):
if (self.sparsifier is None):
return depth
else:
mask_keep = self.sparsifier.dense_to_sparse(rgb, depth)
sparse_depth = np.zeros(depth.shape)
sparse_depth[mask_keep] = depth[mask_keep]
return sparse_depth
def create_rgbd(self, rgb, depth):
sparse_depth = self.create_sparse_depth(rgb, depth)
rgbd = np.append(rgb, np.expand_dims(sparse_depth, axis=2), axis=2)
return rgbd
def __getraw__(self, index):
(path, target) = self.imgs[index]
(rgb, depth) = self.loader(path)
return (rgb, depth)
def __getitem__(self, index):
(rgb, depth) = self.__getraw__(index)
if (self.transform is not None):
(rgb_np, depth_np) = self.transform(rgb, depth)
else:
raise RuntimeError('transform not defined')
if (self.modality == 'rgb'):
input_np = rgb_np
elif (self.modality == 'rgbd'):
input_np = self.create_rgbd(rgb_np, depth_np)
elif (self.modality == 'd'):
input_np = self.create_sparse_depth(rgb_np, depth_np)
input_tensor = to_tensor(input_np)
while (input_tensor.dim() < 3):
input_tensor = input_tensor.unsqueeze(0)
depth_tensor = to_tensor(depth_np)
depth_tensor = depth_tensor.unsqueeze(0)
return (input_tensor, depth_tensor)
def __len__(self):
return len(self.imgs) |
class Config():
def __init__(self):
self.det_head = 'pip'
self.net_stride = 32
self.batch_size = 16
self.init_lr = 0.0001
self.num_epochs = 60
self.decay_steps = [30, 50]
self.input_size = 256
self.backbone = 'resnet18'
self.pretrained = True
self.criterion_cls = 'l2'
self.criterion_reg = 'l1'
self.cls_loss_weight = 10
self.reg_loss_weight = 1
self.num_lms = 68
self.save_interval = self.num_epochs
self.num_nb = 10
self.use_gpu = True
self.gpu_id = 2 |
def static_baseline(num, probas):
logits = [np.log((p + 1e-19)) for p in probas]
return torch.tensor(([logits] * num)).float() |
def build_optim(model, optim_opt):
optim = nmt.Optim(optim_opt.optim_method, optim_opt.learning_rate, optim_opt.max_grad_norm, optim_opt.learning_rate_decay, optim_opt.weight_decay, optim_opt.start_decay_at)
optim.set_parameters(model.parameters())
return optim |
def DPT_Hybrid(pretrained=True, **kwargs):
model = DPTDepthModel(path=None, backbone='vitb_rn50_384', non_negative=True)
if pretrained:
checkpoint = '
state_dict = torch.hub.load_state_dict_from_url(checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True)
model.load_state_dict(state_dict)
return model |
def identify_and_tag_authors(line, authors_kb):
(re_auth, re_auth_near_miss) = get_author_regexps()
for (pattern, repl) in authors_kb:
line = line.replace(pattern, repl)
output_line = line
line = strip_tags(output_line)
matched_authors = list(re_auth.finditer(line))
unidecoded_line = strip_tags(unidecode(output_line))
matched_authors_unidecode = list(re_auth.finditer(unidecoded_line))
if (len(matched_authors_unidecode) > len(matched_authors)):
output_line = unidecode(output_line)
matched_authors = matched_authors_unidecode
if matched_authors:
matched_positions = []
preceeding_text_string = line
preceeding_text_start = 0
for (auth_no, match) in enumerate(matched_authors):
if (line[match.start():match.end()].find('_') == (- 1)):
matched_positions.append({'start': match.start(), 'end': match.end(), 'etal': (match.group('et') or match.group('et2')), 'ed_start': match.group('es'), 'ed_end': match.group('ee'), 'multi_auth': match.group('multi_auth'), 'multi_surs': match.group('multi_surs'), 'text_before': preceeding_text_string[preceeding_text_start:match.start()], 'auth_no': auth_no, 'author_names': match.group('author_names')})
preceeding_text_start = match.end()
matched_positions.reverse()
for m in matched_positions:
dump_in_misc = False
start = m['start']
end = m['end']
lower_text_before = m['text_before'].strip().lower()
for e in etal_matches:
if lower_text_before.endswith(e):
dump_in_misc = True
break
if ((not dump_in_misc) and (not (m['multi_auth'] or m['multi_surs'])) and lower_text_before.endswith(' and')):
weaker_match = re_auth_near_miss.match(m['text_before'])
if (weaker_match and (not (weaker_match.group('es') or weaker_match.group('ee')))):
start = (start - (len(m['text_before']) - weaker_match.start()))
else:
dump_in_misc = True
add_to_misc = ''
if (len(output_line) > m['end']):
if (output_line[m['end']].strip(' ,.') == ';'):
add_to_misc = ';'
tmp_output_line = re.sub(re_ed_notation, '(ed.)', output_line[start:end], re.IGNORECASE)
tmp_output_line = re.sub(re_etal, 'et al.', tmp_output_line, re.IGNORECASE)
tmp_output_line = tmp_output_line.lstrip('.').strip(',:;- [](')
if (not tmp_output_line.endswith('(ed.)')):
tmp_output_line = tmp_output_line.strip(')')
if (m['etal'] and (not (m['ed_start'] or m['ed_end'] or dump_in_misc))):
output_line = (((((output_line[:start] + '<cds.AUTHetal>') + tmp_output_line) + CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL) + add_to_misc) + output_line[end:])
elif (not (m['ed_start'] or m['ed_end'] or dump_in_misc)):
output_line = (((((output_line[:start] + '<cds.AUTHstnd>') + tmp_output_line) + CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND) + add_to_misc) + output_line[end:])
elif (m['ed_start'] or m['ed_end']):
ed_notation = ' (eds.)'
tmp_output_line = re.sub(re_etal, 'et al.', m['author_names'], re.IGNORECASE)
output_line = ((((((output_line[:start] + '<cds.AUTHincl>') + tmp_output_line.strip(',:;- [](')) + ed_notation) + CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL) + add_to_misc) + output_line[end:])
return output_line |
def build_feature_extractor(cfg):
(model_name, backbone_name) = cfg.MODEL.NAME.split('_')
if backbone_name.startswith('resnetv2'):
backbone = resnet_feature_extractor_v2(backbone_name.replace('v2', ''), pretrained_weights=cfg.MODEL.WEIGHTS, aux=False, pretrained_backbone=True, eval_bn=cfg.MODEL.EVAL_BN)
elif backbone_name.startswith('resnet'):
backbone = resnet_feature_extractor(backbone_name, pretrained_weights=cfg.MODEL.WEIGHTS, aux=False, pretrained_backbone=True, freeze_bn=cfg.MODEL.FREEZE_BN)
elif backbone_name.startswith('vgg'):
backbone = vgg_feature_extractor(backbone_name, pretrained_weights=cfg.MODEL.WEIGHTS, aux=False, pretrained_backbone=True, freeze_bn=cfg.MODEL.FREEZE_BN)
else:
raise NotImplementedError
return backbone |
def GetPlanToJointStateService():
return GetService('/costar/PlanToJointState', ServoToJointState) |
class LengthGroupedSampler(Sampler):
def __init__(self, batch_size: int, world_size: int, lengths: Optional[List[int]]=None, generator=None, group_by_modality: bool=False):
if (lengths is None):
raise ValueError('Lengths must be provided.')
self.batch_size = batch_size
self.world_size = world_size
self.lengths = lengths
self.generator = generator
self.group_by_modality = group_by_modality
def __len__(self):
return len(self.lengths)
def __iter__(self):
if self.group_by_modality:
indices = get_modality_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator)
else:
indices = get_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator)
return iter(indices) |
def get_split_time(num_domain=2, mode='pre_process', data_file=None, station=None, dis_type='coral'):
spilt_time = {'2': [('2013-3-6 0:0', '2015-5-31 23:0'), ('2015-6-2 0:0', '2016-6-30 23:0')]}
if (mode == 'pre_process'):
return spilt_time[str(num_domain)]
if (mode == 'tdc'):
return TDC(num_domain, data_file, station, dis_type=dis_type)
else:
print('error in mode') |
(scope='session')
def t2(dummy: ep.Tensor) -> ep.Tensor:
return ep.arange(dummy, 7, 17, 2).float32() |
def fake_output_machine(t, beam_size):
assert (beam_size >= 2)
if (t >= 0):
values = ([0.5, 0.3] + [0.001 for _ in range((beam_size - 2))])
indices = (random.sample([1, 2, 3, 4], k=2) + [0 for _ in range((beam_size - 2))])
return (values, indices) |
def partition_data(datadir, partition, n_nets, alpha, logger):
logger.info('partition data')
(X_train, y_train, X_test, y_test) = load_tiny_data(datadir)
n_train = X_train.shape[0]
if (partition == 'homo'):
total_num = n_train
idxs = np.random.permutation(total_num)
batch_idxs = np.array_split(idxs, n_nets)
net_dataidx_map = {i: batch_idxs[i] for i in range(n_nets)}
elif (partition == 'hetero'):
min_size = 0
K = 200
N = y_train.shape[0]
logger.info(('N = ' + str(N)))
net_dataidx_map = {}
while (min_size < 200):
idx_batch = [[] for _ in range(n_nets)]
for k in range(K):
idx_k = np.where((y_train == k))[0]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(alpha, n_nets))
proportions = np.array([(p * (len(idx_j) < (N / n_nets))) for (p, idx_j) in zip(proportions, idx_batch)])
proportions = (proportions / proportions.sum())
proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:(- 1)]
idx_batch = [(idx_j + idx.tolist()) for (idx_j, idx) in zip(idx_batch, np.split(idx_k, proportions))]
min_size = min([len(idx_j) for idx_j in idx_batch])
for j in range(n_nets):
np.random.shuffle(idx_batch[j])
net_dataidx_map[j] = idx_batch[j]
elif (partition == 'n_cls'):
n_client = n_nets
n_cls = 200
n_data_per_clnt = (len(y_train) / n_client)
clnt_data_list = np.random.lognormal(mean=np.log(n_data_per_clnt), sigma=0, size=n_client)
clnt_data_list = ((clnt_data_list / np.sum(clnt_data_list)) * len(y_train)).astype(int)
cls_priors = np.zeros(shape=(n_client, n_cls))
for i in range(n_client):
cls_priors[i][random.sample(range(n_cls), int(alpha))] = (1.0 / alpha)
prior_cumsum = np.cumsum(cls_priors, axis=1)
idx_list = [np.where((y_train == i))[0] for i in range(n_cls)]
cls_amount = [len(idx_list[i]) for i in range(n_cls)]
net_dataidx_map = {}
for j in range(n_client):
net_dataidx_map[j] = []
while (np.sum(clnt_data_list) != 0):
curr_clnt = np.random.randint(n_client)
if (clnt_data_list[curr_clnt] <= 0):
continue
clnt_data_list[curr_clnt] -= 1
curr_prior = prior_cumsum[curr_clnt]
while True:
cls_label = np.argmax((np.random.uniform() <= curr_prior))
if (cls_amount[cls_label] <= 0):
cls_amount[cls_label] = np.random.randint(0, len(idx_list[cls_label]))
continue
cls_amount[cls_label] -= 1
net_dataidx_map[curr_clnt].append(idx_list[cls_label][cls_amount[cls_label]])
break
elif (partition == 'dir'):
n_client = n_nets
n_cls = 200
n_data_per_clnt = (len(y_train) / n_client)
clnt_data_list = np.random.lognormal(mean=np.log(n_data_per_clnt), sigma=0, size=n_client)
clnt_data_list = ((clnt_data_list / np.sum(clnt_data_list)) * len(y_train)).astype(int)
cls_priors = np.random.dirichlet(alpha=([alpha] * n_cls), size=n_client)
prior_cumsum = np.cumsum(cls_priors, axis=1)
idx_list = [np.where((y_train == i))[0] for i in range(n_cls)]
cls_amount = [len(idx_list[i]) for i in range(n_cls)]
net_dataidx_map = {}
for j in range(n_client):
net_dataidx_map[j] = []
while (np.sum(clnt_data_list) != 0):
curr_clnt = np.random.randint(n_client)
if (clnt_data_list[curr_clnt] <= 0):
continue
clnt_data_list[curr_clnt] -= 1
curr_prior = prior_cumsum[curr_clnt]
while True:
cls_label = np.argmax((np.random.uniform() <= curr_prior))
if (cls_amount[cls_label] <= 0):
continue
cls_amount[cls_label] -= 1
net_dataidx_map[curr_clnt].append(idx_list[cls_label][cls_amount[cls_label]])
break
elif (partition == 'my_part'):
n_shards = alpha
n_client = n_nets
n_cls = 200
n_data_per_clnt = (len(y_train) / n_client)
clnt_data_list = np.random.lognormal(mean=np.log(n_data_per_clnt), sigma=0, size=n_client)
clnt_data_list = ((clnt_data_list / np.sum(clnt_data_list)) * len(y_train)).astype(int)
cls_priors = np.zeros(shape=(n_client, n_cls))
cls_priors_tmp = np.random.dirichlet(alpha=([0.3] * n_cls), size=int(n_shards))
for i in range(n_client):
cls_priors[i] = cls_priors_tmp[int((i / n_shards))]
prior_cumsum = np.cumsum(cls_priors, axis=1)
idx_list = [np.where((y_train == i))[0] for i in range(n_cls)]
cls_amount = [len(idx_list[i]) for i in range(n_cls)]
net_dataidx_map = {}
for j in range(n_client):
net_dataidx_map[j] = []
while (np.sum(clnt_data_list) != 0):
curr_clnt = np.random.randint(n_client)
if (clnt_data_list[curr_clnt] <= 0):
continue
clnt_data_list[curr_clnt] -= 1
curr_prior = prior_cumsum[curr_clnt]
while True:
cls_label = np.argmax((np.random.uniform() <= curr_prior))
if (cls_amount[cls_label] <= 0):
cls_amount[cls_label] = np.random.randint(0, len(idx_list[cls_label]))
continue
cls_amount[cls_label] -= 1
net_dataidx_map[curr_clnt].append(idx_list[cls_label][cls_amount[cls_label]])
break
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map)
return (X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts) |
_module()
class BaseConvBboxHead(nn.Module):
def __init__(self, in_channels=0, shared_conv_channels=(), cls_conv_channels=(), num_cls_out_channels=0, reg_conv_channels=(), num_reg_out_channels=0, conv_cfg=dict(type='Conv1d'), norm_cfg=dict(type='BN1d'), act_cfg=dict(type='ReLU'), bias='auto', *args, **kwargs):
super(BaseConvBboxHead, self).__init__(*args, **kwargs)
assert (in_channels > 0)
assert (num_cls_out_channels > 0)
assert (num_reg_out_channels > 0)
self.in_channels = in_channels
self.shared_conv_channels = shared_conv_channels
self.cls_conv_channels = cls_conv_channels
self.num_cls_out_channels = num_cls_out_channels
self.reg_conv_channels = reg_conv_channels
self.num_reg_out_channels = num_reg_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.bias = bias
if (len(self.shared_conv_channels) > 0):
self.shared_convs = self._add_conv_branch(self.in_channels, self.shared_conv_channels)
out_channels = self.shared_conv_channels[(- 1)]
else:
out_channels = self.in_channels
prev_channel = out_channels
if (len(self.cls_conv_channels) > 0):
self.cls_convs = self._add_conv_branch(prev_channel, self.cls_conv_channels)
prev_channel = self.cls_conv_channels[(- 1)]
self.conv_cls = build_conv_layer(conv_cfg, in_channels=prev_channel, out_channels=num_cls_out_channels, kernel_size=1)
prev_channel = out_channels
if (len(self.reg_conv_channels) > 0):
self.reg_convs = self._add_conv_branch(prev_channel, self.reg_conv_channels)
prev_channel = self.reg_conv_channels[(- 1)]
self.conv_reg = build_conv_layer(conv_cfg, in_channels=prev_channel, out_channels=num_reg_out_channels, kernel_size=1)
def _add_conv_branch(self, in_channels, conv_channels):
conv_spec = ([in_channels] + list(conv_channels))
conv_layers = nn.Sequential()
for i in range((len(conv_spec) - 1)):
conv_layers.add_module(f'layer{i}', ConvModule(conv_spec[i], conv_spec[(i + 1)], kernel_size=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, bias=self.bias, inplace=True))
return conv_layers
def init_weights(self):
pass
def forward(self, feats):
if (len(self.shared_conv_channels) > 0):
x = self.shared_convs(feats)
x_cls = x
x_reg = x
if (len(self.cls_conv_channels) > 0):
x_cls = self.cls_convs(x_cls)
cls_score = self.conv_cls(x_cls)
if (len(self.reg_conv_channels) > 0):
x_reg = self.reg_convs(x_reg)
bbox_pred = self.conv_reg(x_reg)
return (cls_score, bbox_pred) |
class Scale(object):
def __init__(self, size):
self.size = size
def __call__(self, sample):
(image, depth) = (sample['image'], sample['depth'])
image = self.changeScale(image, self.size)
depth = self.changeScale(depth, self.size, Image.NEAREST)
return {'image': image, 'depth': depth}
def changeScale(self, img, size, interpolation=Image.BILINEAR):
if (not _is_pil_image(img)):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if (not (isinstance(size, int) or (isinstance(size, collections.Iterable) and (len(size) == 2)))):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
(w, h) = img.size
if (((w <= h) and (w == size)) or ((h <= w) and (h == size))):
return img
if (w < h):
ow = size
oh = int(((size * h) / w))
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(((size * w) / h))
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::(- 1)], interpolation) |
class HierarchicalDecoder(nn.Module):
def __init__(self, num_convolutions=3, filters=(256, 128, 64, 32, 16), latent_dim=100, output_size=(1, 128, 128), upconv=False, use_weight_norm=False, use_spectral_norm=False, hierarchical_layers=(1, 3, 5), context_dim=4, div_factor=8):
super().__init__()
self.num_convolutions = num_convolutions
self.filters = filters
self.upconv = upconv
if (use_weight_norm and use_spectral_norm):
raise ValueError('Cannot use both weight norm and spectral norm.')
self.use_weight_norm = use_weight_norm
self.use_spectral_norm = use_spectral_norm
self.hierarchical_layers = hierarchical_layers
hierarchical_layers_ = [h for h in hierarchical_layers if (h != len(filters))]
self.context_dim = context_dim
self.div_factor = div_factor
self.resolution_layers = nn.ModuleList([])
self.up_layers = nn.ModuleList([])
self.intermediate_shapes = []
self.context_attention = nn.ModuleList([])
cur_channels = filters[0]
self.start_context_attention = self._attn(cur_channels)
self.start_up_layer = nn.Sequential(*self._upsample_layer(cur_channels, cur_channels))
if (len(filters) in hierarchical_layers):
self.intermediate_shapes.append((np.array(output_size) // (2 ** len(filters))))
self.intermediate_shapes[(- 1)][0] = cur_channels
for (i, c) in enumerate(filters[1:], 1):
resolution_layer = []
i = (len(filters) - i)
input_layer = (i in hierarchical_layers_)
in_channels = max((cur_channels // div_factor), 1)
for j in range(0, (num_convolutions - 1)):
ci = ((in_channels + cur_channels) if ((j == 0) and input_layer) else cur_channels)
resolution_layer += self._conv_layer(ci, cur_channels)
self.resolution_layers.append(nn.Sequential(*resolution_layer))
self.context_attention.append(self._attn(cur_channels))
self.up_layers.append(nn.Sequential(*self._upsample_layer(cur_channels, c)))
if input_layer:
self.intermediate_shapes.append((np.array(output_size) // (2 ** i)))
self.intermediate_shapes[(- 1)][0] = in_channels
cur_channels = c
final_layer = self._conv_layer(cur_channels, cur_channels)
final_layer.append(self._conv(cur_channels, output_size[0], 1, 1, bias=True))
self.final_layer = nn.Sequential(*final_layer)
self.fc = nn.Sequential(nn.Linear(latent_dim, np.prod(self.intermediate_shapes[0]), bias=False), nn.BatchNorm1d(np.prod(self.intermediate_shapes[0])), nn.LeakyReLU(0.1, inplace=True))
def _conv(self):
return partial(Conv2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=self.use_spectral_norm)
def _conv_transpose(self):
return partial(ConvTranspose2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=self.use_spectral_norm)
def _conv_layer(self, ci, co):
return [self._conv(ci, co, 3, 1, 1, bias=False), nn.BatchNorm2d(co, momentum=0.05), nn.LeakyReLU(0.1, inplace=True)]
def _upsample_layer(self, ci, co):
if self.upconv:
layer = [nn.Upsample(scale_factor=2, mode='nearest'), self._conv(ci, co, kernel_size=5, stride=1, padding=2, bias=False)]
else:
layer = [self._conv_transpose(ci, co, kernel_size=4, stride=2, padding=1, bias=False)]
layer += [nn.BatchNorm2d(co, momentum=0.05), nn.LeakyReLU(0.1, inplace=True)]
return layer
def _attn(self, co):
hidden_dim = max((co // 4), self.context_dim)
return nn.Sequential(nn.Linear(self.context_dim, hidden_dim), nn.LeakyReLU(0.1, inplace=True), nn.Linear(hidden_dim, co), nn.Sigmoid())
def forward(self, x, ctx):
assert (x[0].size(0) == ctx.size(0))
batch_size = ctx.size(0)
layers = zip(self.resolution_layers, self.up_layers, self.context_attention)
ctx_attn = self.start_context_attention(ctx).view(batch_size, (- 1), 1, 1)
y = self.fc(x.pop()).view((- 1), *self.intermediate_shapes[0])
y = (self.start_up_layer(y) * ctx_attn)
for (i, (conv, up, attn)) in enumerate(layers, 1):
i = (len(self.filters) - i)
output_layer = (i in self.hierarchical_layers)
ctx_attn = attn(ctx).view(batch_size, (- 1), 1, 1)
if output_layer:
y = torch.cat([y, x.pop()], 1)
y = (conv(y) * ctx_attn)
y = up(y)
y = self.final_layer(y)
return y |
def fit_to_block_size(sequence, block_size, pad_token_id):
if (len(sequence) > block_size):
return sequence[:block_size]
else:
sequence.extend(([pad_token_id] * (block_size - len(sequence))))
return sequence |
class SparseMaxPool2d(SparseMaxPool):
def __init__(self, kernel_size, stride=1, padding=0, dilation=1):
super(SparseMaxPool2d, self).__init__(2, kernel_size, stride, padding, dilation) |
class GCKNet(nn.Module):
def __init__(self, nclass, input_size, hidden_sizes, path_sizes, kernel_funcs=None, kernel_args_list=None, pooling='mean', global_pooling='sum', heads=1, out_size=3, max_iter=100, eps=0.1, aggregation=False, weight_decay=0.0, batch_norm=False, **kwargs):
super().__init__()
self.features = GCKNetFeature(input_size, hidden_sizes, path_sizes, kernel_funcs, kernel_args_list, pooling, global_pooling, heads, out_size, max_iter, eps, aggregation, **kwargs)
self.output_size = self.features.output_size
self.nclass = nclass
self.batch_norm = batch_norm
if batch_norm:
self.bn_layer = nn.BatchNorm1d(self.output_size)
self.classifier = Linear(self.output_size, nclass, weight_decay)
def reset_parameters(self):
for layer in self.children():
layer.reset_parameters()
def representation(self, input, paths_indices, other_info):
return self.features(input, paths_indices, other_info)
def forward(self, input, paths_indices, other_info):
features = self.representation(input, paths_indices, other_info)
if self.batch_norm:
features = self.bn_layer(features)
return self.classifier(features)
def unsup_train(self, data_loader, n_sampling_paths=100000, init=None, use_cuda=False):
self.features.unsup_train(data_loader=data_loader, n_sampling_paths=n_sampling_paths, init=init, use_cuda=use_cuda)
def unsup_train_classifier(self, data_loader, criterion, use_cuda=False):
(encoded_data, labels) = self.features.predict(data_loader, use_cuda)
print(encoded_data.shape)
self.classifier.fit(encoded_data, labels, criterion) |
class Epanechnikov(torch.nn.Module):
def __init__(self):
super().__init__()
self.support = ((- 1.0), 1.0)
def forward(self, u):
return (((((- 1.0) <= u) * (u <= 1.0)) * (1.0 - (u ** 2))) * 0.75) |
def get_mask_paths(metadata):
mask_paths = {}
ignore_paths = {}
with open(metadata.localization) as f:
for line in f.readlines():
(image_id, mask_path, ignore_path) = line.strip('\n').split(',')
if (image_id in mask_paths):
mask_paths[image_id].append(mask_path)
assert (len(ignore_path) == 0)
else:
mask_paths[image_id] = [mask_path]
ignore_paths[image_id] = ignore_path
return (mask_paths, ignore_paths) |
def new_func(*args, **kwds):
default_in_kw = kwds.get('default', None)
default_in_args = (args[2] if (len(args) > 2) else None)
default = (default_in_kw or default_in_args)
try:
return config_tree_get_ori(*args, **kwds)
except ConfigMissingException:
logger.info(f'key {args[1]}, def {default}')
return default |
def parse_args():
parser = argparse.ArgumentParser(description='Finetune a transformers model on a text classification task')
parser.add_argument('--task_name', type=str, default=None, help='The name of the glue task to train on.', choices=list(task_to_keys.keys()))
parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.')
parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.')
parser.add_argument('--max_length', type=int, default=128, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_length` is passed.')
parser.add_argument('--pad_to_max_length', action='store_true', help='If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.')
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=True)
parser.add_argument('--use_slow_tokenizer', action='store_true', help='If passed, will use a slow tokenizer (not backed by the Tokenizers library).')
parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.')
parser.add_argument('--checkpointing_steps', type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.")
parser.add_argument('--resume_from_checkpoint', type=str, default=None, help='If the training should continue from a checkpoint folder.')
parser.add_argument('--with_tracking', action='store_true', help='Whether to enable experiment trackers for logging.')
parser.add_argument('--report_to', type=str, default='all', help='The integration to report the results and logs to. Supported platforms are `"tensorboard"`, `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.Only applicable when `--with_tracking` is passed.')
parser.add_argument('--ignore_mismatched_sizes', action='store_true', help='Whether or not to enable to load a pretrained model whose head dimensions are different.')
args = parser.parse_args()
if ((args.task_name is None) and (args.train_file is None) and (args.validation_file is None)):
raise ValueError('Need either a task name or a training/validation file.')
else:
if (args.train_file is not None):
extension = args.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (args.validation_file is not None):
extension = args.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if args.push_to_hub:
assert (args.output_dir is not None), 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.'
return args |
def _find_stack(stacks, item):
for (stack_id, stack) in enumerate(stacks):
if (stack[0] == item):
return (stack, stack_id)
return (None, None) |
class FlaxElectraForMaskedLM(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def loadmat(file):
f = h5py.File(file)
arr = {k: np.array(v) for (k, v) in f.items()}
return arr |
_model_architecture('transformer_lm', 'transformer_lm_gpt3_2_7')
def transformer_lm_gpt3_2_7(args):
args.decoder_layers = getattr(args, 'decoder_layers', 32)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 2560)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 32)
base_gpt3_architecture(args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.