code stringlengths 17 6.64M |
|---|
def run_model(method: str, classes: List[str], backbone: str):
results = {}
for cls in classes:
if (method == 'spade'):
model = SPADE(k=50, backbone_name=backbone)
elif (method == 'padim'):
model = PaDiM(d_reduced=350, backbone_name=backbone)
elif (method == 'patchcore'):
model = PatchCore(f_coreset=0.1, backbone_name=backbone)
print(f'''
โโ Running {method} on {cls} dataset.''')
print(f''' โฐ{('โ' * ((len(method) + len(cls)) + 23))}
''')
(train_ds, test_ds) = MVTecDataset(cls).get_dataloaders()
print(' Training ...')
model.fit(train_ds)
print(' Testing ...')
(image_rocauc, pixel_rocauc) = model.evaluate(test_ds)
print(f'''
โญ{('โ' * (len(cls) + 15))}โฌ{('โ' * 20)}โฌ{('โ' * 20)}โฎ''')
print(f' โ Test results {cls} โ image_rocauc: {image_rocauc:.2f} โ pixel_rocauc: {pixel_rocauc:.2f} โ')
print(f" โฐ{('โ' * (len(cls) + 15))}โด{('โ' * 20)}โด{('โ' * 20)}โฏ")
results[cls] = [float(image_rocauc), float(pixel_rocauc)]
image_results = [v[0] for (_, v) in results.items()]
average_image_roc_auc = (sum(image_results) / len(image_results))
image_results = [v[1] for (_, v) in results.items()]
average_pixel_roc_auc = (sum(image_results) / len(image_results))
total_results = {'per_class_results': results, 'average image rocauc': average_image_roc_auc, 'average pixel rocauc': average_pixel_roc_auc, 'model parameters': model.get_parameters()}
return total_results
|
@click.command()
@click.argument('method')
@click.option('--dataset', default='all', help='Dataset name, defaults to all datasets.')
@click.option('--backbone', default='wide_resnet50_2', help='The TIMM compatible backbone.')
def cli_interface(method: str, dataset: str, backbone: str):
if (dataset == 'all'):
dataset = ALL_CLASSES
else:
dataset = [dataset]
method = method.lower()
assert (method in ALLOWED_METHODS), f'Select from {ALLOWED_METHODS}.'
total_results = run_model(method, dataset, backbone)
print_and_export_results(total_results, method)
|
def get_tqdm_params():
return TQDM_PARAMS
|
class GaussianBlur():
def __init__(self, radius: int=4):
self.radius = radius
self.unload = transforms.ToPILImage()
self.load = transforms.ToTensor()
self.blur_kernel = ImageFilter.GaussianBlur(radius=4)
def __call__(self, img):
map_max = img.max()
final_map = (self.load(self.unload((img[0] / map_max)).filter(self.blur_kernel)) * map_max)
return final_map
|
def get_coreset_idx_randomp(z_lib: tensor, n: int=1000, eps: float=0.9, float16: bool=True, force_cpu: bool=False) -> tensor:
'Returns n coreset idx for given z_lib.\n \n Performance on AMD3700, 32GB RAM, RTX3080 (10GB):\n CPU: 40-60 it/s, GPU: 500+ it/s (float32), 1500+ it/s (float16)\n\n Args:\n z_lib: (n, d) tensor of patches.\n n: Number of patches to select.\n eps: Agression of the sparse random projection.\n float16: Cast all to float16, saves memory and is a bit faster (on GPU).\n force_cpu: Force cpu, useful in case of GPU OOM.\n\n Returns:\n coreset indices\n '
print(f' Fitting random projections. Start dim = {z_lib.shape}.')
try:
transformer = random_projection.SparseRandomProjection(eps=eps)
z_lib = torch.tensor(transformer.fit_transform(z_lib))
print(f' DONE. Transformed dim = {z_lib.shape}.')
except ValueError:
print(' Error: could not project vectors. Please increase `eps`.')
select_idx = 0
last_item = z_lib[select_idx:(select_idx + 1)]
coreset_idx = [torch.tensor(select_idx)]
min_distances = torch.linalg.norm((z_lib - last_item), dim=1, keepdims=True)
if float16:
last_item = last_item.half()
z_lib = z_lib.half()
min_distances = min_distances.half()
if (torch.cuda.is_available() and (not force_cpu)):
last_item = last_item.to('cuda')
z_lib = z_lib.to('cuda')
min_distances = min_distances.to('cuda')
for _ in tqdm(range((n - 1)), **TQDM_PARAMS):
distances = torch.linalg.norm((z_lib - last_item), dim=1, keepdims=True)
min_distances = torch.minimum(distances, min_distances)
select_idx = torch.argmax(min_distances)
last_item = z_lib[select_idx:(select_idx + 1)]
min_distances[select_idx] = 0
coreset_idx.append(select_idx.to('cpu'))
return torch.stack(coreset_idx)
|
def print_and_export_results(results: dict, method: str):
'Writes results to .yaml and serialized results to .txt.'
print('\n โญโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฎ')
print(' โ Results summary โ')
print(' โขโโโโโโโโโโโโโโโโโโโโโโโโโโโโโช')
print(f" โ average image rocauc: {results['average image rocauc']:.2f} โ")
print(f" โ average pixel rocauc: {results['average pixel rocauc']:.2f} โ")
print(' โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\n')
timestamp = datetime.now().strftime('%d_%m_%Y_%H_%M_%S')
name = f'{method}_{timestamp}'
results_yaml_path = f'./results/{name}.yml'
scoreboard_path = f'./results/{name}.txt'
with open(results_yaml_path, 'w') as outfile:
yaml.safe_dump(results, outfile, default_flow_style=False)
with open(scoreboard_path, 'w') as outfile:
outfile.write(serialize_results(results['per_class_results']))
print(f' Results written to {results_yaml_path}')
|
def serialize_results(results: dict) -> str:
'Serialize a results dict into something usable in markdown.'
n_first_col = 20
ans = []
for (k, v) in results.items():
s = (k + (' ' * (n_first_col - len(k))))
s = (s + f'| {(v[0] * 100):.1f} | {(v[1] * 100):.1f} |')
ans.append(s)
return '\n'.join(ans)
|
def tensor_to_img(x, normalize=False):
if normalize:
x *= IMAGENET_STD.unsqueeze((- 1)).unsqueeze((- 1))
x += IMAGENET_MEAN.unsqueeze((- 1)).unsqueeze((- 1))
x = x.clip(0.0, 1.0).permute(1, 2, 0).detach().numpy()
return x
|
def pred_to_img(x, range):
(range_min, range_max) = range
x -= range_min
if ((range_max - range_min) > 0):
x /= (range_max - range_min)
return tensor_to_img(x)
|
def show_pred(sample, score, fmap, range):
sample_img = tensor_to_img(sample, normalize=True)
fmap_img = pred_to_img(fmap, range)
plt.imshow(sample_img)
plt.imshow(fmap_img, cmap='jet', alpha=0.5)
plt.axis('off')
buf = io.BytesIO()
plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0, transparent=True)
buf.seek(0)
overlay_img = Image.open(buf)
cols = st.columns(3)
cols[0].subheader('Test sample')
cols[0].image(sample_img)
cols[1].subheader('Anomaly map')
cols[1].image(fmap_img)
cols[2].subheader('Overlay')
cols[2].image(overlay_img)
|
def get_sample_images(dataset, n):
n_data = len(dataset)
ans = []
if (n < n_data):
indexes = np.random.choice(n_data, n, replace=False)
else:
indexes = list(range(n_data))
for index in indexes:
(sample, _) = dataset[index]
ans.append(tensor_to_img(sample, normalize=True))
return ans
|
def main():
with open('./docs/streamlit_instructions.md', 'r') as file:
md_file = file.read()
st.markdown(md_file)
st.sidebar.title('Config')
app_custom_dataset = st.sidebar.checkbox('Custom dataset', False)
if app_custom_dataset:
app_custom_train_images = st.sidebar.file_uploader('Select 3 or more TRAINING images.', accept_multiple_files=True)
app_custom_test_images = st.sidebar.file_uploader('Select 1 or more TEST images.', accept_multiple_files=True)
app_mvtec_dataset = None
else:
app_mvtec_dataset = st.sidebar.selectbox('Choose an MVTec dataset', mvtec_classes)
app_custom_train_images = []
app_custom_test_images = None
app_method = st.sidebar.selectbox('Choose a method', METHODS)
app_backbone = st.sidebar.selectbox('Choose a backbone', BACKBONES)
manualRange = st.sidebar.checkbox('Manually set color range', value=False)
if manualRange:
app_color_min = st.sidebar.number_input('set color min ', (- 1000), 1000, 0)
app_color_max = st.sidebar.number_input('set color max ', (- 1000), 1000, 200)
color_range = (app_color_min, app_color_max)
app_start = st.sidebar.button('Start')
if (app_start or ('reached_test_phase' not in st.session_state)):
st.session_state.train_dataset = None
st.session_state.test_dataset = None
st.session_state.sample_images = None
st.session_state.model = None
st.session_state.reached_test_phase = False
st.session_state.test_idx = 0
if (app_start or st.session_state.reached_test_phase):
if (not st.session_state.reached_test_phase):
flag_data_ok = False
if app_custom_dataset:
if ((len(app_custom_train_images) > 2) and (len(app_custom_test_images) > 0)):
train_dataset = StreamingDataset()
test_dataset = StreamingDataset()
for training_image in app_custom_train_images:
bytes_data = training_image.getvalue()
train_dataset.add_pil_image(Image.open(io.BytesIO(bytes_data)))
for test_image in app_custom_test_images:
bytes_data = test_image.getvalue()
test_dataset.add_pil_image(Image.open(io.BytesIO(bytes_data)))
flag_data_ok = True
else:
st.error('Please upload 3 or more training images and 1 test image.')
else:
with st_stdout('info', 'Checking or downloading dataset ...'):
(train_dataset, test_dataset) = MVTecDataset(app_mvtec_dataset).get_datasets()
st.success(f"Loaded '{app_mvtec_dataset}' dataset.")
flag_data_ok = True
if (not flag_data_ok):
st.stop()
else:
train_dataset = st.session_state.train_dataset
test_dataset = st.session_state.test_dataset
st.header('Random (healthy) training samples')
cols = st.columns(N_IMAGE_GALLERY)
if (not st.session_state.reached_test_phase):
col_imgs = get_sample_images(train_dataset, N_IMAGE_GALLERY)
else:
col_imgs = st.session_state.sample_images
for (col, img) in zip(cols, col_imgs):
col.image(img, use_column_width=True)
if (not st.session_state.reached_test_phase):
if (app_method == 'SPADE'):
model = SPADE(k=3, backbone_name=app_backbone)
elif (app_method == 'PaDiM'):
model = PaDiM(d_reduced=75, backbone_name=app_backbone)
elif (app_method == 'PatchCore'):
model = PatchCore(f_coreset=0.01, backbone_name=app_backbone, coreset_eps=0.95)
st.success(f'Loaded {app_method} model.')
else:
model = st.session_state.model
if (not st.session_state.reached_test_phase):
with st_stdout('info', 'Setting up training ...'):
model.fit(DataLoader(train_dataset))
if (not st.session_state.reached_test_phase):
st.session_state.reached_test_phase = True
st.session_state.sample_images = col_imgs
st.session_state.model = model
st.session_state.train_dataset = train_dataset
st.session_state.test_dataset = test_dataset
st.session_state.test_idx = st.number_input('Test sample index', min_value=0, max_value=(len(test_dataset) - 1))
(sample, *_) = test_dataset[st.session_state.test_idx]
(img_lvl_anom_score, pxl_lvl_anom_score) = model.predict(sample.unsqueeze(0))
score_range = (pxl_lvl_anom_score.min(), pxl_lvl_anom_score.max())
if (not manualRange):
color_range = score_range
show_pred(sample, img_lvl_anom_score, pxl_lvl_anom_score, color_range)
st.write('pixel score min:{:.0f}'.format(score_range[0]))
st.write('pixel score max:{:.0f}'.format(score_range[1]))
|
@contextmanager
def st_redirect(src, dst, msg):
'https://discuss.streamlit.io/t/cannot-print-the-terminal-output-in-streamlit/6602'
placeholder = st.info(msg)
sleep(3)
output_func = getattr(placeholder, dst)
with StringIO() as buffer:
old_write = src.write
def new_write(b):
if getattr(current_thread(), REPORT_CONTEXT_ATTR_NAME, None):
buffer.write(b)
output_func(b)
else:
old_write(b)
try:
src.write = new_write
(yield)
finally:
src.write = old_write
placeholder.empty()
|
@contextmanager
def st_stdout(dst, msg):
'https://discuss.streamlit.io/t/cannot-print-the-terminal-output-in-streamlit/6602'
with st_redirect(sys.stdout, dst, msg):
(yield)
|
@contextmanager
def st_stderr(dst):
'https://discuss.streamlit.io/t/cannot-print-the-terminal-output-in-streamlit/6602'
with st_redirect(sys.stderr, dst):
(yield)
|
def main():
with tf.Session() as sess:
(model_cfg, model_outputs) = posenet.load_model(args.model, sess)
output_stride = model_cfg['output_stride']
num_images = args.num_images
filenames = [f.path for f in os.scandir(args.image_dir) if (f.is_file() and f.path.endswith(('.png', '.jpg')))]
if (len(filenames) > num_images):
filenames = filenames[:num_images]
images = {f: posenet.read_imgfile(f, 1.0, output_stride)[0] for f in filenames}
start = time.time()
for i in range(num_images):
(heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result) = sess.run(model_outputs, feed_dict={'image:0': images[filenames[(i % len(filenames))]]})
output = posenet.decode_multiple_poses(heatmaps_result.squeeze(axis=0), offsets_result.squeeze(axis=0), displacement_fwd_result.squeeze(axis=0), displacement_bwd_result.squeeze(axis=0), output_stride=output_stride, max_pose_detections=10, min_pose_score=0.25)
print('Average FPS:', (num_images / (time.time() - start)))
|
def main():
if (not os.path.exists(args.image_dir)):
os.makedirs(args.image_dir)
for f in TEST_IMAGES:
url = os.path.join(GOOGLE_CLOUD_IMAGE_BUCKET, f)
print(('Downloading %s' % f))
urllib.request.urlretrieve(url, os.path.join(args.image_dir, f))
|
def load_config(config_name='config.yaml'):
cfg_f = open(os.path.join(BASE_DIR, config_name), 'r+')
cfg = yaml.load(cfg_f)
return cfg
|
def download_file(checkpoint, filename, base_dir):
output_path = os.path.join(base_dir, checkpoint, filename)
url = posixpath.join(GOOGLE_CLOUD_STORAGE_DIR, checkpoint, filename)
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
if (response.info().get('Content-Encoding') == 'gzip'):
data = zlib.decompress(response.read(), (zlib.MAX_WBITS | 32))
else:
data = response.read()
with open(output_path, 'wb') as f:
f.write(data)
|
def download(checkpoint, base_dir='./weights/'):
save_dir = os.path.join(base_dir, checkpoint)
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
download_file(checkpoint, 'manifest.json', base_dir)
with open(os.path.join(save_dir, 'manifest.json'), 'r') as f:
json_dict = json.load(f)
for x in json_dict:
filename = json_dict[x]['filename']
print('Downloading', filename)
download_file(checkpoint, filename, base_dir)
|
def main():
checkpoint = CHECKPOINTS[CHK]
download(checkpoint)
|
def traverse_to_targ_keypoint(edge_id, source_keypoint, target_keypoint_id, scores, offsets, output_stride, displacements):
height = scores.shape[0]
width = scores.shape[1]
source_keypoint_indices = np.clip(np.round((source_keypoint / output_stride)), a_min=0, a_max=[(height - 1), (width - 1)]).astype(np.int32)
displaced_point = (source_keypoint + displacements[(source_keypoint_indices[0], source_keypoint_indices[1], edge_id)])
displaced_point_indices = np.clip(np.round((displaced_point / output_stride)), a_min=0, a_max=[(height - 1), (width - 1)]).astype(np.int32)
score = scores[(displaced_point_indices[0], displaced_point_indices[1], target_keypoint_id)]
image_coord = ((displaced_point_indices * output_stride) + offsets[(displaced_point_indices[0], displaced_point_indices[1], target_keypoint_id)])
return (score, image_coord)
|
def decode_pose(root_score, root_id, root_image_coord, scores, offsets, output_stride, displacements_fwd, displacements_bwd):
num_parts = scores.shape[2]
num_edges = len(PARENT_CHILD_TUPLES)
instance_keypoint_scores = np.zeros(num_parts)
instance_keypoint_coords = np.zeros((num_parts, 2))
instance_keypoint_scores[root_id] = root_score
instance_keypoint_coords[root_id] = root_image_coord
for edge in reversed(range(num_edges)):
(target_keypoint_id, source_keypoint_id) = PARENT_CHILD_TUPLES[edge]
if ((instance_keypoint_scores[source_keypoint_id] > 0.0) and (instance_keypoint_scores[target_keypoint_id] == 0.0)):
(score, coords) = traverse_to_targ_keypoint(edge, instance_keypoint_coords[source_keypoint_id], target_keypoint_id, scores, offsets, output_stride, displacements_bwd)
instance_keypoint_scores[target_keypoint_id] = score
instance_keypoint_coords[target_keypoint_id] = coords
for edge in range(num_edges):
(source_keypoint_id, target_keypoint_id) = PARENT_CHILD_TUPLES[edge]
if ((instance_keypoint_scores[source_keypoint_id] > 0.0) and (instance_keypoint_scores[target_keypoint_id] == 0.0)):
(score, coords) = traverse_to_targ_keypoint(edge, instance_keypoint_coords[source_keypoint_id], target_keypoint_id, scores, offsets, output_stride, displacements_fwd)
instance_keypoint_scores[target_keypoint_id] = score
instance_keypoint_coords[target_keypoint_id] = coords
return (instance_keypoint_scores, instance_keypoint_coords)
|
def model_id_to_ord(model_id):
if (0 <= model_id < 4):
return model_id
elif (model_id == 50):
return 0
elif (model_id == 75):
return 1
elif (model_id == 100):
return 2
else:
return 3
|
def load_config(model_ord):
converter_cfg = posenet.converter.config.load_config()
checkpoints = converter_cfg['checkpoints']
output_stride = converter_cfg['outputStride']
checkpoint_name = checkpoints[model_ord]
model_cfg = {'output_stride': output_stride, 'checkpoint_name': checkpoint_name}
return model_cfg
|
def load_model(model_id, sess, model_dir=MODEL_DIR):
model_ord = model_id_to_ord(model_id)
model_cfg = load_config(model_ord)
model_path = os.path.join(model_dir, ('model-%s.pb' % model_cfg['checkpoint_name']))
if (not os.path.exists(model_path)):
print(('Cannot find model file %s, converting from tfjs...' % model_path))
from posenet.converter.tfjs2python import convert
convert(model_ord, model_dir, check=False)
assert os.path.exists(model_path)
with tf.gfile.GFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
if DEBUG_OUTPUT:
graph_nodes = [n for n in graph_def.node]
names = []
for t in graph_nodes:
names.append(t.name)
print('Loaded graph node:', t.name)
offsets = sess.graph.get_tensor_by_name('offset_2:0')
displacement_fwd = sess.graph.get_tensor_by_name('displacement_fwd_2:0')
displacement_bwd = sess.graph.get_tensor_by_name('displacement_bwd_2:0')
heatmaps = sess.graph.get_tensor_by_name('heatmap:0')
return (model_cfg, [heatmaps, offsets, displacement_fwd, displacement_bwd])
|
def main():
with tf.Session() as sess:
(model_cfg, model_outputs) = posenet.load_model(args.model, sess)
output_stride = model_cfg['output_stride']
if (args.file is not None):
cap = cv2.VideoCapture(args.file)
else:
cap = cv2.VideoCapture(args.cam_id)
cap.set(3, args.cam_width)
cap.set(4, args.cam_height)
start = time.time()
frame_count = 0
while True:
(input_image, display_image, output_scale) = posenet.read_cap(cap, scale_factor=args.scale_factor, output_stride=output_stride)
(heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result) = sess.run(model_outputs, feed_dict={'image:0': input_image})
(pose_scores, keypoint_scores, keypoint_coords) = posenet.decode_multi.decode_multiple_poses(heatmaps_result.squeeze(axis=0), offsets_result.squeeze(axis=0), displacement_fwd_result.squeeze(axis=0), displacement_bwd_result.squeeze(axis=0), output_stride=output_stride, max_pose_detections=10, min_pose_score=0.15)
keypoint_coords *= output_scale
overlay_image = posenet.draw_skel_and_kp(display_image, pose_scores, keypoint_scores, keypoint_coords, min_pose_score=0.15, min_part_score=0.1)
cv2.imshow('posenet', overlay_image)
frame_count += 1
if ((cv2.waitKey(1) & 255) == ord('q')):
break
print('Average FPS: ', (frame_count / (time.time() - start)))
|
def pooling_factor(pool_type='avg'):
return (2 if (pool_type == 'avgmaxc') else 1)
|
def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
'Selectable global pooling function with dynamic input kernel size\n '
if (pool_type == 'avgmaxc'):
x = torch.cat([F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad), F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)], dim=1)
elif (pool_type == 'avgmax'):
x_avg = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = (0.5 * (x_avg + x_max))
elif (pool_type == 'max'):
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if (pool_type != 'avg'):
print(('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type))
x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x
|
class AdaptiveAvgMaxPool2d(torch.nn.Module):
'Selectable global pooling layer with dynamic input kernel size\n '
def __init__(self, output_size=1, pool_type='avg'):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
self.pool_type = pool_type
if ((pool_type == 'avgmaxc') or (pool_type == 'avgmax')):
self.pool = nn.ModuleList([nn.AdaptiveAvgPool2d(output_size), nn.AdaptiveMaxPool2d(output_size)])
elif (pool_type == 'max'):
self.pool = nn.AdaptiveMaxPool2d(output_size)
else:
if (pool_type != 'avg'):
print(('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type))
self.pool = nn.AdaptiveAvgPool2d(output_size)
def forward(self, x):
if (self.pool_type == 'avgmaxc'):
x = torch.cat([p(x) for p in self.pool], dim=1)
elif (self.pool_type == 'avgmax'):
x = (0.5 * torch.sum(torch.stack([p(x) for p in self.pool]), 0).squeeze(dim=0))
else:
x = self.pool(x)
return x
def factor(self):
return pooling_factor(self.pool_type)
def __repr__(self):
return ((((((self.__class__.__name__ + ' (') + 'output_size=') + str(self.output_size)) + ', pool_type=') + self.pool_type) + ')')
|
def _convert_bn(k):
aux = False
if (k == 'bias'):
add = 'beta'
elif (k == 'weight'):
add = 'gamma'
elif (k == 'running_mean'):
aux = True
add = 'moving_mean'
elif (k == 'running_var'):
aux = True
add = 'moving_var'
else:
assert False, ('Unknown key: %s' % k)
return (aux, add)
|
def convert_from_mxnet(model, checkpoint_prefix, debug=False):
(_, mxnet_weights, mxnet_aux) = mxnet.model.load_checkpoint(checkpoint_prefix, 0)
remapped_state = {}
for state_key in model.state_dict().keys():
k = state_key.split('.')
aux = False
mxnet_key = ''
if (k[(- 1)] == 'num_batches_tracked'):
continue
if (k[0] == 'features'):
if (k[1] == 'conv1_1'):
mxnet_key += 'conv1_x_1__'
if (k[2] == 'bn'):
mxnet_key += 'relu-sp__bn_'
(aux, key_add) = _convert_bn(k[3])
mxnet_key += key_add
else:
assert (k[3] == 'weight')
mxnet_key += ('conv_' + k[3])
elif (k[1] == 'conv5_bn_ac'):
mxnet_key += 'conv5_x_x__relu-sp__bn_'
assert (k[2] == 'bn')
(aux, key_add) = _convert_bn(k[3])
mxnet_key += key_add
else:
if (model.b and ('c1x1_c' in k[2])):
bc_block = True
else:
bc_block = False
ck = k[1].split('_')
mxnet_key += (((ck[0] + '_x__') + ck[1]) + '_')
ck = k[2].split('_')
mxnet_key += ((ck[0] + '-') + ck[1])
if ((ck[1] == 'w') and (len(ck) > 2)):
mxnet_key += ('(s/2)' if (ck[2] == 's2') else '(s/1)')
mxnet_key += '__'
if (k[3] == 'bn'):
mxnet_key += ('bn_' if bc_block else 'bn__bn_')
(aux, key_add) = _convert_bn(k[4])
mxnet_key += key_add
else:
ki = (3 if bc_block else 4)
assert (k[ki] == 'weight')
mxnet_key += ('conv_' + k[ki])
elif (k[0] == 'classifier'):
if ('fc6-1k_weight' in mxnet_weights):
mxnet_key += 'fc6-1k_'
else:
mxnet_key += 'fc6_'
mxnet_key += k[1]
else:
assert False, 'Unexpected token'
if debug:
print(mxnet_key, '=> ', state_key, end=' ')
mxnet_array = (mxnet_aux[mxnet_key] if aux else mxnet_weights[mxnet_key])
torch_tensor = torch.from_numpy(mxnet_array.asnumpy())
if ((k[0] == 'classifier') and (k[1] == 'weight')):
torch_tensor = torch_tensor.view((torch_tensor.size() + (1, 1)))
remapped_state[state_key] = torch_tensor
if debug:
print(list(torch_tensor.size()), torch_tensor.mean(), torch_tensor.std())
model.load_state_dict(remapped_state)
return model
|
def main():
args = parser.parse_args()
if ('dpn' not in args.model):
print('Error: Can only convert DPN models.')
exit(1)
if (not has_mxnet):
print('Error: Cannot import MXNet module. Please install.')
exit(1)
model = model_factory.create_model(args.model, num_classes=1000, pretrained=False)
model_prefix = args.model
if (model_prefix in ['dpn107', 'dpn68b', 'dpn92']):
model_prefix += '-extra'
checkpoint_base = os.path.join(args.checkpoint_path, model_prefix)
convert_from_mxnet(model, checkpoint_base)
output_checkpoint = os.path.join(args.checkpoint_path, (model_prefix + '.pth'))
torch.save(model.state_dict(), output_checkpoint)
|
def natural_key(string_):
'See http://www.codinghorror.com/blog/archives/001018.html'
return [(int(s) if s.isdigit() else s) for s in re.split('(\\d+)', string_.lower())]
|
def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True):
if (class_to_idx is None):
class_to_idx = dict()
build_class_idx = True
else:
build_class_idx = False
labels = []
filenames = []
for (root, subdirs, files) in os.walk(folder, topdown=False):
rel_path = (os.path.relpath(root, folder) if (root != folder) else '')
label = (os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_'))
if (build_class_idx and (not subdirs)):
class_to_idx[label] = None
for f in files:
(base, ext) = os.path.splitext(f)
if (ext.lower() in types):
filenames.append(os.path.join(root, f))
labels.append(label)
if build_class_idx:
classes = sorted(class_to_idx.keys(), key=natural_key)
for (idx, c) in enumerate(classes):
class_to_idx[c] = idx
images_and_targets = zip(filenames, [class_to_idx[l] for l in labels])
if sort:
images_and_targets = sorted(images_and_targets, key=(lambda k: natural_key(k[0])))
if build_class_idx:
return (images_and_targets, classes, class_to_idx)
else:
return images_and_targets
|
class Dataset(data.Dataset):
def __init__(self, root, transform=None):
(imgs, _, _) = find_images_and_targets(root)
if (len(imgs) == 0):
raise RuntimeError(((('Found 0 images in subfolders of: ' + root) + '\nSupported image extensions are: ') + ','.join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
def __getitem__(self, index):
(path, target) = self.imgs[index]
img = Image.open(path).convert('RGB')
if (self.transform is not None):
img = self.transform(img)
if (target is None):
target = torch.zeros(1).long()
return (img, target)
def __len__(self):
return len(self.imgs)
def set_transform(self, transform):
self.transform = transform
def filenames(self, indices=[], basename=False):
if indices:
if basename:
return [os.path.basename(self.imgs[i][0]) for i in indices]
else:
return [self.imgs[i][0] for i in indices]
elif basename:
return [os.path.basename(x[0]) for x in self.imgs]
else:
return [x[0] for x in self.imgs]
|
def main():
args = parser.parse_args()
num_classes = 1000
model = model_factory.create_model(args.model, num_classes=num_classes, pretrained=args.pretrained, test_time_pool=args.test_time_pool)
if (args.restore_checkpoint and os.path.isfile(args.restore_checkpoint)):
print("=> loading checkpoint '{}'".format(args.restore_checkpoint))
checkpoint = torch.load(args.restore_checkpoint)
if (isinstance(checkpoint, dict) and ('state_dict' in checkpoint)):
model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint)
print("=> loaded checkpoint '{}'".format(args.restore_checkpoint))
elif (not args.pretrained):
print("=> no checkpoint found at '{}'".format(args.restore_checkpoint))
exit(1)
if args.multi_gpu:
model = torch.nn.DataParallel(model).cuda()
else:
model = model.cuda()
transforms = model_factory.get_transforms_eval(args.model, args.img_size)
dataset = Dataset(args.data, transforms)
loader = data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
model.eval()
batch_time = AverageMeter()
end = time.time()
top5_ids = []
with torch.no_grad():
for (batch_idx, (input, _)) in enumerate(loader):
input = input.cuda()
labels = model(input)
top5 = labels.topk(5)[1]
top5_ids.append(top5.cpu().numpy())
batch_time.update((time.time() - end))
end = time.time()
if ((batch_idx % args.print_freq) == 0):
print('Predict: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(batch_idx, len(loader), batch_time=batch_time))
top5_ids = np.concatenate(top5_ids, axis=0).squeeze()
with open(os.path.join(args.output_dir, './top5_ids.csv'), 'w') as out_file:
filenames = dataset.filenames()
for (filename, label) in zip(filenames, top5_ids):
filename = os.path.basename(filename)
out_file.write('{0},{1},{2},{3},{4},{5}\n'.format(filename, label[0], label[1], label[2], label[3], label[4]))
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def create_model(model_name, num_classes=1000, pretrained=False, **kwargs):
if ('test_time_pool' in kwargs):
test_time_pool = kwargs.pop('test_time_pool')
else:
test_time_pool = True
if (model_name == 'dpn68'):
model = dpn68(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'dpn68b'):
model = dpn68b(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'dpn92'):
model = dpn92(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'dpn98'):
model = dpn98(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'dpn131'):
model = dpn131(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'dpn107'):
model = dpn107(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'resnet18'):
model = resnet18(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'resnet34'):
model = resnet34(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'resnet50'):
model = resnet50(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'resnet101'):
model = resnet101(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'resnet152'):
model = resnet152(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'densenet121'):
model = densenet121(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'densenet161'):
model = densenet161(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'densenet169'):
model = densenet169(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'densenet201'):
model = densenet201(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'inception_v3'):
model = inception_v3(pretrained=pretrained, num_classes=num_classes, transform_input=False, **kwargs)
else:
assert False, ('Unknown model architecture (%s)' % model_name)
return model
|
class LeNormalize(object):
'Normalize to -1..1 in Google Inception style\n '
def __call__(self, tensor):
for t in tensor:
t.sub_(0.5).mul_(2.0)
return tensor
|
def get_transforms_eval(model_name, img_size=224, crop_pct=None):
crop_pct = (crop_pct or DEFAULT_CROP_PCT)
if ('dpn' in model_name):
if (crop_pct is None):
if (img_size == 224):
scale_size = int(math.floor((img_size / DEFAULT_CROP_PCT)))
else:
scale_size = img_size
else:
scale_size = int(math.floor((img_size / crop_pct)))
normalize = transforms.Normalize(mean=[(124 / 255), (117 / 255), (104 / 255)], std=([(1 / (0.0167 * 255))] * 3))
elif ('inception' in model_name):
scale_size = int(math.floor((img_size / crop_pct)))
normalize = LeNormalize()
else:
scale_size = int(math.floor((img_size / crop_pct)))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
return transforms.Compose([transforms.Resize(scale_size, Image.BICUBIC), transforms.CenterCrop(img_size), transforms.ToTensor(), normalize])
|
def main():
args = parser.parse_args()
test_time_pool = False
if (('dpn' in args.model) and (args.img_size > 224) and (not args.no_test_pool)):
test_time_pool = True
if ((not args.checkpoint) and (not args.pretrained)):
args.pretrained = True
num_classes = 1000
model = model_factory.create_model(args.model, num_classes=num_classes, pretrained=args.pretrained, test_time_pool=test_time_pool)
print(('Model %s created, param count: %d' % (args.model, sum([m.numel() for m in model.parameters()]))))
if (args.checkpoint and os.path.isfile(args.checkpoint)):
print("=> loading checkpoint '{}'".format(args.checkpoint))
checkpoint = torch.load(args.checkpoint)
if (isinstance(checkpoint, dict) and ('state_dict' in checkpoint)):
model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint)
print("=> loaded checkpoint '{}'".format(args.checkpoint))
elif args.checkpoint:
print("=> no checkpoint found at '{}'".format(args.checkpoint))
exit(1)
if args.multi_gpu:
model = torch.nn.DataParallel(model).cuda()
else:
model = model.cuda()
criterion = nn.CrossEntropyLoss().cuda()
cudnn.benchmark = True
transforms = model_factory.get_transforms_eval(args.model, args.img_size)
dataset = Dataset(args.data, transforms)
loader = data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for (i, (input, target)) in enumerate(loader):
target = target.cuda()
input = input.cuda()
output = model(input)
loss = criterion(output, target)
(prec1, prec5) = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tPrec@1 {top1.val:.3f} ({top1.avg:.3f})\tPrec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format(top1=top1, top1a=(100 - top1.avg), top5=top5, top5a=(100.0 - top5.avg)))
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def main():
args = parser.parse_args()
args.gpu_id = 0
if args.c2_prefix:
args.c2_init = (args.c2_prefix + '.init.pb')
args.c2_predict = (args.c2_prefix + '.predict.pb')
model = model_helper.ModelHelper(name='le_net', init_params=False)
init_net_proto = caffe2_pb2.NetDef()
with open(args.c2_init, 'rb') as f:
init_net_proto.ParseFromString(f.read())
model.param_init_net = core.Net(init_net_proto)
predict_net_proto = caffe2_pb2.NetDef()
with open(args.c2_predict, 'rb') as f:
predict_net_proto.ParseFromString(f.read())
model.net = core.Net(predict_net_proto)
input_blob = model.net.external_inputs[0]
model.param_init_net.GaussianFill([], input_blob.GetUnscopedName(), shape=(args.batch_size, 3, args.img_size, args.img_size), mean=0.0, std=1.0)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, overwrite=True)
workspace.BenchmarkNet(model.net.Proto().name, 5, 20, True)
|
def natural_key(string_):
'See http://www.codinghorror.com/blog/archives/001018.html'
return [(int(s) if s.isdigit() else s) for s in re.split('(\\d+)', string_.lower())]
|
def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True):
if (class_to_idx is None):
class_to_idx = dict()
build_class_idx = True
else:
build_class_idx = False
labels = []
filenames = []
for (root, subdirs, files) in os.walk(folder, topdown=False):
rel_path = (os.path.relpath(root, folder) if (root != folder) else '')
label = (os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_'))
if (build_class_idx and (not subdirs)):
class_to_idx[label] = None
for f in files:
(base, ext) = os.path.splitext(f)
if (ext.lower() in types):
filenames.append(os.path.join(root, f))
labels.append(label)
if build_class_idx:
classes = sorted(class_to_idx.keys(), key=natural_key)
for (idx, c) in enumerate(classes):
class_to_idx[c] = idx
images_and_targets = zip(filenames, [class_to_idx[l] for l in labels])
if sort:
images_and_targets = sorted(images_and_targets, key=(lambda k: natural_key(k[0])))
if build_class_idx:
return (images_and_targets, classes, class_to_idx)
else:
return images_and_targets
|
class Dataset(data.Dataset):
def __init__(self, root, transform=None, load_bytes=False):
(imgs, _, _) = find_images_and_targets(root)
if (len(imgs) == 0):
raise RuntimeError(((('Found 0 images in subfolders of: ' + root) + '\nSupported image extensions are: ') + ','.join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.load_bytes = load_bytes
def __getitem__(self, index):
(path, target) = self.imgs[index]
img = (open(path, 'rb').read() if self.load_bytes else Image.open(path).convert('RGB'))
if (self.transform is not None):
img = self.transform(img)
if (target is None):
target = torch.zeros(1).long()
return (img, target)
def __len__(self):
return len(self.imgs)
def filenames(self, indices=[], basename=False):
if indices:
if basename:
return [os.path.basename(self.imgs[i][0]) for i in indices]
else:
return [self.imgs[i][0] for i in indices]
elif basename:
return [os.path.basename(x[0]) for x in self.imgs]
else:
return [x[0] for x in self.imgs]
|
def fast_collate(batch):
targets = torch.tensor([b[1] for b in batch], dtype=torch.int64)
batch_size = len(targets)
tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
tensor[i] += torch.from_numpy(batch[i][0])
return (tensor, targets)
|
class PrefetchLoader():
def __init__(self, loader, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD):
self.loader = loader
self.mean = torch.tensor([(x * 255) for x in mean]).cuda().view(1, 3, 1, 1)
self.std = torch.tensor([(x * 255) for x in std]).cuda().view(1, 3, 1, 1)
def __iter__(self):
stream = torch.cuda.Stream()
first = True
for (next_input, next_target) in self.loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
next_input = next_input.float().sub_(self.mean).div_(self.std)
if (not first):
(yield (input, target))
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
(yield (input, target))
def __len__(self):
return len(self.loader)
@property
def sampler(self):
return self.loader.sampler
|
def create_loader(dataset, input_size, batch_size, is_training=False, use_prefetcher=True, interpolation='bilinear', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_workers=1, crop_pct=None, tensorflow_preprocessing=False):
if isinstance(input_size, tuple):
img_size = input_size[(- 2):]
else:
img_size = input_size
if (tensorflow_preprocessing and use_prefetcher):
from data.tf_preprocessing import TfPreprocessTransform
transform = TfPreprocessTransform(is_training=is_training, size=img_size, interpolation=interpolation)
else:
transform = transforms_imagenet_eval(img_size, interpolation=interpolation, use_prefetcher=use_prefetcher, mean=mean, std=std, crop_pct=crop_pct)
dataset.transform = transform
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, collate_fn=(fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate))
if use_prefetcher:
loader = PrefetchLoader(loader, mean=mean, std=std)
return loader
|
def distorted_bounding_box_crop(image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None):
'Generates cropped_image using one of the bboxes randomly distorted.\n\n See `tf.image.sample_distorted_bounding_box` for more documentation.\n\n Args:\n image_bytes: `Tensor` of binary image data.\n bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`\n where each coordinate is [0, 1) and the coordinates are arranged\n as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole\n image.\n min_object_covered: An optional `float`. Defaults to `0.1`. The cropped\n area of the image must contain at least this fraction of any bounding\n box supplied.\n aspect_ratio_range: An optional list of `float`s. The cropped area of the\n image must have an aspect ratio = width / height within this range.\n area_range: An optional list of `float`s. The cropped area of the image\n must contain a fraction of the supplied image within in this range.\n max_attempts: An optional `int`. Number of attempts at generating a cropped\n region of the image of the specified constraints. After `max_attempts`\n failures, return the entire image.\n scope: Optional `str` for name scope.\n Returns:\n cropped image `Tensor`\n '
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True)
(bbox_begin, bbox_size, _) = sample_distorted_bounding_box
(offset_y, offset_x, _) = tf.unstack(bbox_begin)
(target_height, target_width, _) = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
|
def _at_least_x_are_equal(a, b, x):
'At least `x` of `a` and `b` `Tensors` are equal.'
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
|
def _decode_and_random_crop(image_bytes, image_size, resize_method):
'Make a random crop of image_size.'
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=((3.0 / 4), (4.0 / 3.0)), area_range=(0.08, 1.0), max_attempts=10, scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(bad, (lambda : _decode_and_center_crop(image_bytes, image_size)), (lambda : tf.image.resize([image], [image_size, image_size], resize_method)[0]))
return image
|
def _decode_and_center_crop(image_bytes, image_size, resize_method):
'Crops to center of image with padding then scales image_size.'
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(((image_size / (image_size + CROP_PADDING)) * tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = (((image_height - padded_center_crop_size) + 1) // 2)
offset_width = (((image_width - padded_center_crop_size) + 1) // 2)
crop_window = tf.stack([offset_height, offset_width, padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize([image], [image_size, image_size], resize_method)[0]
return image
|
def _flip(image):
'Random horizontal image flip.'
image = tf.image.random_flip_left_right(image)
return image
|
def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):
'Preprocesses the given image for evaluation.\n\n Args:\n image_bytes: `Tensor` representing an image binary of arbitrary size.\n use_bfloat16: `bool` for whether to use bfloat16.\n image_size: image size.\n interpolation: image interpolation method\n\n Returns:\n A preprocessed image `Tensor`.\n '
resize_method = (tf.image.ResizeMethod.BICUBIC if (interpolation == 'bicubic') else tf.image.ResizeMethod.BILINEAR)
image = _decode_and_random_crop(image_bytes, image_size, resize_method)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(image, dtype=(tf.bfloat16 if use_bfloat16 else tf.float32))
return image
|
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):
'Preprocesses the given image for evaluation.\n\n Args:\n image_bytes: `Tensor` representing an image binary of arbitrary size.\n use_bfloat16: `bool` for whether to use bfloat16.\n image_size: image size.\n interpolation: image interpolation method\n\n Returns:\n A preprocessed image `Tensor`.\n '
resize_method = (tf.image.ResizeMethod.BICUBIC if (interpolation == 'bicubic') else tf.image.ResizeMethod.BILINEAR)
image = _decode_and_center_crop(image_bytes, image_size, resize_method)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(image, dtype=(tf.bfloat16 if use_bfloat16 else tf.float32))
return image
|
def preprocess_image(image_bytes, is_training=False, use_bfloat16=False, image_size=IMAGE_SIZE, interpolation='bicubic'):
'Preprocesses the given image.\n\n Args:\n image_bytes: `Tensor` representing an image binary of arbitrary size.\n is_training: `bool` for whether the preprocessing is for training.\n use_bfloat16: `bool` for whether to use bfloat16.\n image_size: image size.\n interpolation: image interpolation method\n\n Returns:\n A preprocessed image `Tensor` with value range of [0, 255].\n '
if is_training:
return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation)
else:
return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation)
|
class TfPreprocessTransform():
def __init__(self, is_training=False, size=224, interpolation='bicubic'):
self.is_training = is_training
self.size = (size[0] if isinstance(size, tuple) else size)
self.interpolation = interpolation
self._image_bytes = None
self.process_image = self._build_tf_graph()
self.sess = None
def _build_tf_graph(self):
with tf.device('/cpu:0'):
self._image_bytes = tf.placeholder(shape=[], dtype=tf.string)
img = preprocess_image(self._image_bytes, self.is_training, False, self.size, self.interpolation)
return img
def __call__(self, image_bytes):
if (self.sess is None):
self.sess = tf.Session()
img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes})
img = img.round().clip(0, 255).astype(np.uint8)
if (img.ndim < 3):
img = np.expand_dims(img, axis=(- 1))
img = np.rollaxis(img, 2)
return img
|
def resolve_data_config(model, args, default_cfg={}, verbose=True):
new_config = {}
default_cfg = default_cfg
if ((not default_cfg) and (model is not None) and hasattr(model, 'default_cfg')):
default_cfg = model.default_cfg
in_chans = 3
input_size = (in_chans, 224, 224)
if (args.img_size is not None):
assert isinstance(args.img_size, int)
input_size = (in_chans, args.img_size, args.img_size)
elif ('input_size' in default_cfg):
input_size = default_cfg['input_size']
new_config['input_size'] = input_size
new_config['interpolation'] = 'bicubic'
if args.interpolation:
new_config['interpolation'] = args.interpolation
elif ('interpolation' in default_cfg):
new_config['interpolation'] = default_cfg['interpolation']
new_config['mean'] = IMAGENET_DEFAULT_MEAN
if (args.mean is not None):
mean = tuple(args.mean)
if (len(mean) == 1):
mean = tuple((list(mean) * in_chans))
else:
assert (len(mean) == in_chans)
new_config['mean'] = mean
elif ('mean' in default_cfg):
new_config['mean'] = default_cfg['mean']
new_config['std'] = IMAGENET_DEFAULT_STD
if (args.std is not None):
std = tuple(args.std)
if (len(std) == 1):
std = tuple((list(std) * in_chans))
else:
assert (len(std) == in_chans)
new_config['std'] = std
elif ('std' in default_cfg):
new_config['std'] = default_cfg['std']
new_config['crop_pct'] = DEFAULT_CROP_PCT
if (args.crop_pct is not None):
new_config['crop_pct'] = args.crop_pct
elif ('crop_pct' in default_cfg):
new_config['crop_pct'] = default_cfg['crop_pct']
if verbose:
print('Data processing configuration for current model + dataset:')
for (n, v) in new_config.items():
print(('\t%s: %s' % (n, str(v))))
return new_config
|
class ToNumpy():
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if (np_img.ndim < 3):
np_img = np.expand_dims(np_img, axis=(- 1))
np_img = np.rollaxis(np_img, 2)
return np_img
|
class ToTensor():
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if (np_img.ndim < 3):
np_img = np.expand_dims(np_img, axis=(- 1))
np_img = np.rollaxis(np_img, 2)
return torch.from_numpy(np_img).to(dtype=self.dtype)
|
def _pil_interp(method):
if (method == 'bicubic'):
return Image.BICUBIC
elif (method == 'lanczos'):
return Image.LANCZOS
elif (method == 'hamming'):
return Image.HAMMING
else:
return Image.BILINEAR
|
def transforms_imagenet_eval(img_size=224, crop_pct=None, interpolation='bilinear', use_prefetcher=False, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD):
crop_pct = (crop_pct or DEFAULT_CROP_PCT)
if isinstance(img_size, tuple):
assert (len(img_size) == 2)
if (img_size[(- 1)] == img_size[(- 2)]):
scale_size = int(math.floor((img_size[0] / crop_pct)))
else:
scale_size = tuple([int((x / crop_pct)) for x in img_size])
else:
scale_size = int(math.floor((img_size / crop_pct)))
tfl = [transforms.Resize(scale_size, _pil_interp(interpolation)), transforms.CenterCrop(img_size)]
if use_prefetcher:
tfl += [ToNumpy()]
else:
tfl += [transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))]
return transforms.Compose(tfl)
|
def add_override_act_fn(name, fn):
global _OVERRIDE_FN
_OVERRIDE_FN[name] = fn
|
def update_override_act_fn(overrides):
assert isinstance(overrides, dict)
global _OVERRIDE_FN
_OVERRIDE_FN.update(overrides)
|
def clear_override_act_fn():
global _OVERRIDE_FN
_OVERRIDE_FN = dict()
|
def add_override_act_layer(name, fn):
_OVERRIDE_LAYER[name] = fn
|
def update_override_act_layer(overrides):
assert isinstance(overrides, dict)
global _OVERRIDE_LAYER
_OVERRIDE_LAYER.update(overrides)
|
def clear_override_act_layer():
global _OVERRIDE_LAYER
_OVERRIDE_LAYER = dict()
|
def get_act_fn(name='relu'):
' Activation Function Factory\n Fetching activation fns by name with this function allows export or torch script friendly\n functions to be returned dynamically based on current config.\n '
if (name in _OVERRIDE_FN):
return _OVERRIDE_FN[name]
use_me = (not (config.is_exportable() or config.is_scriptable() or config.is_no_jit()))
if (use_me and (name in _ACT_FN_ME)):
return _ACT_FN_ME[name]
if (config.is_exportable() and (name in ('silu', 'swish'))):
return swish
use_jit = (not (config.is_exportable() or config.is_no_jit()))
if (use_jit and (name in _ACT_FN_JIT)):
return _ACT_FN_JIT[name]
return _ACT_FN_DEFAULT[name]
|
def get_act_layer(name='relu'):
' Activation Layer Factory\n Fetching activation layers by name with this function allows export or torch script friendly\n functions to be returned dynamically based on current config.\n '
if (name in _OVERRIDE_LAYER):
return _OVERRIDE_LAYER[name]
use_me = (not (config.is_exportable() or config.is_scriptable() or config.is_no_jit()))
if (use_me and (name in _ACT_LAYER_ME)):
return _ACT_LAYER_ME[name]
if (config.is_exportable() and (name in ('silu', 'swish'))):
return Swish
use_jit = (not (config.is_exportable() or config.is_no_jit()))
if (use_jit and (name in _ACT_FN_JIT)):
return _ACT_LAYER_JIT[name]
return _ACT_LAYER_DEFAULT[name]
|
def swish(x, inplace: bool=False):
'Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3)\n and also as Swish (https://arxiv.org/abs/1710.05941).\n\n TODO Rename to SiLU with addition to PyTorch\n '
return (x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()))
|
class Swish(nn.Module):
def __init__(self, inplace: bool=False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
|
def mish(x, inplace: bool=False):
'Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681\n '
return x.mul(F.softplus(x).tanh())
|
class Mish(nn.Module):
def __init__(self, inplace: bool=False):
super(Mish, self).__init__()
self.inplace = inplace
def forward(self, x):
return mish(x, self.inplace)
|
def sigmoid(x, inplace: bool=False):
return (x.sigmoid_() if inplace else x.sigmoid())
|
class Sigmoid(nn.Module):
def __init__(self, inplace: bool=False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return (x.sigmoid_() if self.inplace else x.sigmoid())
|
def tanh(x, inplace: bool=False):
return (x.tanh_() if inplace else x.tanh())
|
class Tanh(nn.Module):
def __init__(self, inplace: bool=False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return (x.tanh_() if self.inplace else x.tanh())
|
def hard_swish(x, inplace: bool=False):
inner = F.relu6((x + 3.0)).div_(6.0)
return (x.mul_(inner) if inplace else x.mul(inner))
|
class HardSwish(nn.Module):
def __init__(self, inplace: bool=False):
super(HardSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_swish(x, self.inplace)
|
def hard_sigmoid(x, inplace: bool=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return (F.relu6((x + 3.0)) / 6.0)
|
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool=False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)
|
@torch.jit.script
def swish_jit(x, inplace: bool=False):
'Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3)\n and also as Swish (https://arxiv.org/abs/1710.05941).\n\n TODO Rename to SiLU with addition to PyTorch\n '
return x.mul(x.sigmoid())
|
@torch.jit.script
def mish_jit(x, _inplace: bool=False):
'Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681\n '
return x.mul(F.softplus(x).tanh())
|
class SwishJit(nn.Module):
def __init__(self, inplace: bool=False):
super(SwishJit, self).__init__()
def forward(self, x):
return swish_jit(x)
|
class MishJit(nn.Module):
def __init__(self, inplace: bool=False):
super(MishJit, self).__init__()
def forward(self, x):
return mish_jit(x)
|
@torch.jit.script
def hard_sigmoid_jit(x, inplace: bool=False):
return (x + 3).clamp(min=0, max=6).div(6.0)
|
class HardSigmoidJit(nn.Module):
def __init__(self, inplace: bool=False):
super(HardSigmoidJit, self).__init__()
def forward(self, x):
return hard_sigmoid_jit(x)
|
@torch.jit.script
def hard_swish_jit(x, inplace: bool=False):
return (x * (x + 3).clamp(min=0, max=6).div(6.0))
|
class HardSwishJit(nn.Module):
def __init__(self, inplace: bool=False):
super(HardSwishJit, self).__init__()
def forward(self, x):
return hard_swish_jit(x)
|
@torch.jit.script
def swish_jit_fwd(x):
return x.mul(torch.sigmoid(x))
|
@torch.jit.script
def swish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
return (grad_output * (x_sigmoid * (1 + (x * (1 - x_sigmoid)))))
|
class SwishJitAutoFn(torch.autograd.Function):
' torch.jit.script optimised Swish w/ memory-efficient checkpoint\n Inspired by conversation btw Jeremy Howard & Adam Pazske\n https://twitter.com/jeremyphoward/status/1188251041835315200\n\n Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3)\n and also as Swish (https://arxiv.org/abs/1710.05941).\n\n TODO Rename to SiLU with addition to PyTorch\n '
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return swish_jit_bwd(x, grad_output)
|
def swish_me(x, inplace=False):
return SwishJitAutoFn.apply(x)
|
class SwishMe(nn.Module):
def __init__(self, inplace: bool=False):
super(SwishMe, self).__init__()
def forward(self, x):
return SwishJitAutoFn.apply(x)
|
@torch.jit.script
def mish_jit_fwd(x):
return x.mul(torch.tanh(F.softplus(x)))
|
@torch.jit.script
def mish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul((x_tanh_sp + ((x * x_sigmoid) * (1 - (x_tanh_sp * x_tanh_sp)))))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.