code stringlengths 17 6.64M |
|---|
def _child(new, *args, **kwargs):
...
|
class TestDelegates():
def test_default(self):
fn = delegates(_parent)(_child)
sig = inspect.signature(fn)
sigd = dict(sig.parameters)
assert (list(sigd) == ['new', 'a', 'b', 'c']), 'Incorrect delegated signature.'
|
def _stringify(x, suffix=''):
'Helper.'
return f'{x}{suffix}'
|
class TestMapContainer():
def test_single(self):
'Test with a single input, i.e. equivalent to the original function.'
input = 2
func = map_container(_stringify)
assert (func(input) == _stringify(input)), "Error in 'map_apply' single input"
assert (func(input, suffix='***') == _stringify(input, suffix='***')), "Error in 'map_apply' single input"
def test_multi(self):
'Test using nested sequences/dicts.'
input = [2, {'a': 'test', 'b': {1}, 'c': (1, 2)}]
target1 = ['2', {'a': 'test', 'b': {'1'}, 'c': ('1', '2')}]
target2 = ['2***', {'a': 'test***', 'b': {'1***'}, 'c': ('1***', '2***')}]
func = map_container(_stringify)
assert (func(input) == target1), "Error in 'map_apply' sequence input"
assert (func(input, suffix='***') == target2), "Error in 'map_apply' sequence input"
|
class Dataset():
def __init__(self, n):
self.n = n
self.log_time = True
self.timer = MultiLevelTimer()
self.__class__.__getitem__ = retry_new_on_error(self.__class__.getitem, exc=self.retry_exc, silent=self.silent, max=self.max, use_blacklist=self.use_blacklist)
def __init_subclass__(cls, retry_exc, silent, max_retries, use_blacklist, **kwargs):
super().__init_subclass__(**kwargs)
cls.retry_exc = retry_exc
cls.silent = silent
cls.max = max_retries
cls.use_blacklist = use_blacklist
def __len__(self):
return self.n
def getitem(self, item):
raise NotImplementedError
def __getitem__(self, item):
return self.getitem(item)
|
class TestRetryDifferentOnError():
def test_default(self):
'Test default parameters, catching any exception and logging.'
class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=False):
def getitem(self, item):
if ((item % 2) == 0):
raise ValueError
return ({'item': item}, {}, {'item': str(item)})
dataset = TmpData(10)
(x, y, meta) = dataset[1]
assert (x['item'] == 1), 'Loading of item (without exception) failed.'
assert ('errors' in meta), "Missing 'error' key when logging errors"
(x, y, meta) = dataset[2]
assert (x['item'] != 2), 'Loading of item (with exception) failed.'
def test_exc_single(self):
'Test that we can catch a specific exception.'
class TmpData(Dataset, retry_exc=ValueError, silent=False, max_retries=None, use_blacklist=False):
def getitem(self, item):
if ((item % 2) == 0):
raise ValueError
return ({'item': item}, {}, {'item': str(item)})
(x, y, meta) = TmpData(10)[2]
assert (x['item'] != 2), 'Loading of item (with exception) failed.'
def test_exc_ignore(self):
'Test that we ignore non-specified exceptions.'
class TmpData(Dataset, retry_exc=ValueError, silent=False, max_retries=None, use_blacklist=False):
def getitem(self, item):
if ((item % 2) == 0):
raise TypeError
return ({'item': item}, {}, {'item': str(item)})
dataset = TmpData(10)
_ = dataset[1]
with pytest.raises(TypeError):
_ = dataset[2]
def test_exc_multiple(self):
'Test that we can catch multiple specific exceptions.'
class TmpData(Dataset, retry_exc=[ValueError, TypeError], silent=False, max_retries=None, use_blacklist=False):
def getitem(self, item):
if ((item % 2) == 0):
raise ValueError
if ((item % 3) == 0):
raise TypeError
return ({'item': item}, {}, {'item': str(item)})
dataset = TmpData(10)
(x, y, meta) = dataset[2]
assert (x['item'] != 2), 'Loading of item (with exception) failed.'
(x, y, meta) = dataset[3]
assert (x['item'] != 2), 'Loading of item (with exception) failed.'
def test_exc_none(self):
'Test that we can disable all exceptions.'
class TmpData(Dataset, retry_exc=None, silent=False, max_retries=None, use_blacklist=False):
def getitem(self, item):
if ((item % 2) == 0):
raise ValueError
if ((item % 3) == 0):
raise TypeError
return ({'item': item}, {}, {'item': str(item)})
dataset = TmpData(10)
_ = dataset[5]
with pytest.raises(ValueError):
_ = dataset[2]
with pytest.raises(TypeError):
_ = dataset[3]
def test_silent(self):
class TmpData(Dataset, retry_exc=Exception, silent=True, max_retries=None, use_blacklist=False):
def getitem(self, item):
if ((item % 2) == 0):
raise ValueError
return ({'item': item}, {}, {'item': str(item)})
dataset = TmpData(10)
(x, y, meta) = dataset[2]
assert (x['item'] != 2), 'Loading of item (with exception) failed.'
assert ('errors' not in meta), 'Error when disabling exception catching.'
def test_max_retries(self):
class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=False):
def getitem(self, item):
raise ValueError
with pytest.raises(RecursionError):
_ = TmpData(10)[0]
class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=5, use_blacklist=False):
def getitem(self, item):
raise ValueError
with pytest.raises(RuntimeError):
_ = TmpData(10)[0]
def test_blacklist(self):
'Test that we can add items to a blacklist, which are excluded from reloading.'
class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=True):
def getitem(self, item):
if (item != 0):
raise ValueError
return ({'item': item}, {}, {'item': str(item)})
dataset = TmpData(10)
_ = [dataset[i] for i in range(10)]
for i in range(10):
(x, y, meta) = dataset[i]
assert (x['item'] == 0)
for j in range(10):
if (i != j):
assert (str(j) not in meta['errors']), 'Error including item in blacklist.'
def test_blacklist_none(self):
'Test that items causing exceptions can be repeated when not creating a blacklist.'
class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=False):
def getitem(self, item):
if (item != 0):
raise ValueError
return ({'item': item}, {}, {'item': str(item)})
dataset = TmpData(10)
num_errors = []
for i in range(10):
(x, y, meta) = dataset[i]
assert (x['item'] == 0), 'Error loading correct item.'
num_errors.append(meta['errors'].count('ValueError'))
assert (max(num_errors) > 1), 'Error when repeating exception items.'
|
def _random_pil(shape):
return Image.fromarray(np.random.randint(0, 255, size=shape, dtype=np.uint8))
|
def _random_np(shape):
return np.random.rand(*shape).astype(np.float32)
|
def _random_torch(shape):
return torch.rand(shape, dtype=torch.float32)
|
def test_all():
'Check all expected symbols are imported.'
items = {'readlines', 'pil2np', 'np2pil', 'write_yaml', 'load_yaml', 'load_merge_yaml'}
assert (set(io.__all__) == items), 'Incorrect keys in `__all__`.'
|
def test_pil2np():
'Test conversion from PIL to numpy.'
shape = (100, 200, 3)
image = _random_pil(shape)
out = pil2np(image)
assert isinstance(out, np.ndarray), 'Output should be a numpy array.'
assert (out.dtype == np.float32), 'Output should be float32.'
assert (out.shape == shape), 'Output should be same size as input.'
assert ((out.max() <= 1) and (out.min() >= 0)), 'Output should be normalized [0, 1].'
|
def test_np2pil():
'Test conversion from numpy to PIL.'
shape = (h, w, _) = (100, 200, 3)
image = _random_np(shape)
out = np2pil(image)
assert isinstance(out, Image.Image), 'Output should be a PIL Image.'
assert (out.size == (w, h)), 'Output should be same size as input.'
(vmax, vmin) = out.getextrema()[0]
assert ((vmax <= 255) and (vmin >= 0)), 'Output should be [0, 255].'
|
def test_all():
'Check all expected symbols are imported.'
items = {'get_logger', 'flatten_dict', 'sort_dict', 'apply_cmap'}
assert (set(misc.__all__) == items), 'Incorrect keys in `__all__`.'
|
class TensorGetLogger():
def test_default(self):
key = 'test1234'
logger = get_logger(key)
assert (key in logging.root.manager.loggerDict), 'Logger not created.'
assert (logger == logging.root.manager.loggerDict[key]), 'Incorrect logger created.'
assert (not logger.propagate), 'Logger propagate should be disabled.'
assert (len(logger.handlers) == 1), 'Incorrect number of handlers.'
def test_duplicate(self):
'Test that we do not duplicate logger StreamHandlers.'
name = 'test_duplicate'
logger = logging.getLogger(name)
logger.addHandler(logging.StreamHandler())
assert (len(logger.handlers) == 1), 'Logger should only have one handler.'
logger.addHandler(logging.StreamHandler())
assert (len(logger.handlers) == 2), 'Logger should have two handlers.'
name2 = 'test_duplicate_v2'
logger2 = get_logger(name2)
assert (len(logger2.handlers) == 1), 'Custom Logger should only have one handler.'
logger2 = get_logger(name2)
assert (len(logger2.handlers) == 1), 'Custom Logger should only have one handler (after second time).'
|
class TestFlatten():
def test_default(self):
'Test basic nesting & default separator.'
d = {'a': 1, 'b': 2, 'c': dict(a=1, b=2)}
tgt = {'a': 1, 'b': 2, 'c/a': 1, 'c/b': 2}
out = flatten_dict(d)
assert (out == tgt), 'Incorrect flattened keys.'
out = flatten_dict(d, sep='/')
assert (out == tgt), 'Incorrect separator keys.'
def test_separator(self):
'Test custom separator.'
d = {'a': 1, 'b': 2, 'c': dict(a=1, b=2)}
tgt = {'a': 1, 'b': 2, 'c.a': 1, 'c.b': 2}
out = flatten_dict(d, sep='.')
assert (out == tgt), 'Incorrect flattened keys.'
def test_nesting(self):
'Test multiple nestings.'
d = {'a': [0, 1, {}], 'b': {'a': 1, 'b': 2, 'c': []}, 'c': {'a': {'a': 0}, 'b': {'b': 1}, 'c': {'c': 2}}}
tgt = {'a': [0, 1, {}], 'b/a': 1, 'b/b': 2, 'b/c': [], 'c/a/a': 0, 'c/b/b': 1, 'c/c/c': 2}
out = flatten_dict(d)
assert (out == tgt), 'Incorrect flattened keys.'
|
class TestSortedDict():
def test_sorted_dict(self):
'Test sorting dict keys.'
d = dict(b=2, c=1, a=10)
tgt = dict(a=10, b=2, c=1)
out = sort_dict(d)
assert (out == tgt), 'Incorrect sorted order'
with pytest.raises(TypeError):
sort_dict({'a': 1, 1: 0, 'test': None})
|
class TestApplyCmap():
def test_default(self):
'Test applying a cmap with default parameters.'
arr = np.array([0, 0, 0.5, 0.5, 1, 1])
out = apply_cmap(arr)
assert np.allclose(out[0], out[1]), 'Incorrect 0 mapping.'
assert np.allclose(out[2], out[3]), 'Incorrect 0.5 mapping.'
assert np.allclose(out[4], out[5]), 'Incorrect 1 mapping.'
assert (not np.allclose(out[0], out[2])), 'Incorrect 0 vs. 0.5'
assert (not np.allclose(out[0], out[4])), 'Incorrect 0 vs. 1'
assert (not np.allclose(out[2], out[4])), 'Incorrect 0.5 vs. 1'
out2 = apply_cmap(arr, cmap='turbo')
assert np.allclose(out, out2), 'Incorrect default colormap, expected "turbo".'
out3 = apply_cmap(arr, vmin=arr.min(), vmax=arr.max())
assert np.allclose(out, out3), 'Incorrect default range.'
def test_range(self):
'Test applying a cmap with custom normalization ranges.'
arr = np.array([0, 0, 0.5, 0.5, 1, 1])
out = apply_cmap(arr)
out2 = apply_cmap(arr, vmin=0.5)
assert np.allclose(out2[2], out2[3]), 'Incorrect sanity check for same value.'
assert (not np.allclose(out2[3], out2[4])), 'Incorrect sanity check for different value.'
assert np.allclose(out2[0], out2[2]), 'Incorrect clipping to min value.'
assert np.allclose(out[0], out2[0]), 'Inconsistent min value.'
assert (not np.allclose(out2[2], out[2])), 'Incorrect clipping to min value.'
out3 = apply_cmap(arr, vmax=0.5)
assert np.allclose(out3[2], out3[3]), 'Incorrect sanity check for same value.'
assert (not np.allclose(out3[2], out3[0])), 'Incorrect sanity check for different value.'
assert np.allclose(out3[2], out3[4]), 'Incorrect clipping to max value.'
assert np.allclose(out[5], out3[5]), 'Inconsistent max value.'
assert (not np.allclose(out3[2], out[2])), 'Incorrect clipping to max value.'
|
def test_all():
'Check all expected symbols are imported.'
items = {'Timer', 'MultiLevelTimer'}
assert (set(timers.__all__) == items), 'Incorrect keys in `__all__`.'
|
class TestTimer():
def test_options(self):
'Test that formatting options are set correctly.'
(name, precision) = ('Test', 4)
timer = Timer(name=name, as_ms=True, precision=precision)
assert (timer.name == name), 'Incorrect Timer name'
assert (repr(timer) == f'Timer(name={name}, as_ms=True, sync_gpu=False, precision={precision})')
with timer as t:
...
parts = str(t).split(' ')
assert (len(parts) == 3), "Incorrect Timer formatting, expected 'name: elapsed units'"
assert (parts[2] == 'ms'), 'Incorrect Timer units.'
assert (len(parts[1].split('.')[(- 1)]) <= precision), 'Incorrect Timer precision.'
def test_accuracy(self):
'Test that timing is accurate (within 2ms).'
target = 0.3
with Timer() as t:
time.sleep(target)
assert np.allclose(t.elapsed, target, atol=0.02), 'Timer off by more than 2ms.'
@mock.patch('torch.cuda.synchronize')
def test_sync_gpu(self, sync_fn):
'Test that torch synchronize is called correctly.'
with Timer(sync_gpu=True):
...
assert (sync_fn.call_count == 2), 'Incorrect number of calls to synchronize.'
|
class TestMultiLevelTimer():
def test_options(self):
'Test that formatting options are set correctly.'
(name, precision) = ('Test', 4)
timer = MultiLevelTimer(name=name, as_ms=True, sync_gpu=False, precision=precision)
assert (repr(timer) == f'MultiLevelTimer(name={name}, as_ms=True, sync_gpu=False, precision={precision})')
_ = str(timer)
def test_keys(self):
'Test that timer keys are set correctly.'
timer = MultiLevelTimer()
with timer('Label'):
assert ('Label' in timer._data), 'Error setting label name.'
with pytest.raises(KeyError):
_ = timer('Label').__enter__()
timer = MultiLevelTimer()
with timer:
...
assert ('Level1' in timer._data), 'Error setting default label name.'
def test_accuracy(self):
'Test accuracy with single label.'
target = 0.3
timer = MultiLevelTimer()
with timer('Test'):
time.sleep(target)
assert np.allclose(timer['Test']['elapsed'], target, atol=0.02), 'Timer off by more than 2ms.'
def test_nesting(self):
'Test accuracy with nesting.'
target = 0.3
timer = MultiLevelTimer()
with timer('Test1'):
time.sleep(target)
with timer('Test2'):
time.sleep((target * 2))
assert (timer['Test2']['depth'] == 2), "Incorrect 'inner' depth level."
assert (timer['Test1']['depth'] == 1), "Incorrect 'outer' depth level."
target2 = (target * 2)
target1 = (target + target2)
assert np.allclose(timer['Test2']['elapsed'], target2, atol=0.02), "'Inner' off by more than 2ms."
assert np.allclose(timer['Test1']['elapsed'], target1, atol=0.02), "'Outer' off by more than 2ms."
@mock.patch('torch.cuda.synchronize')
def test_sync_gpu(self, sync_fn):
'Test that torch synchronize is called correctly.'
timer = MultiLevelTimer(sync_gpu=True)
with timer:
...
assert (sync_fn.call_count == 2), 'Incorrect number of calls to synchronize.'
def test_copy(self):
'Test that timing data is copied correctly and is independent of original timer.'
timer = MultiLevelTimer(name='Test', as_ms=False, precision=4)
with timer:
time.sleep(0.1)
data = timer.copy()._data
assert (data == timer._data), 'Incorrect data copied.'
assert (data is not timer._data), 'Incorrect deep copy of data.'
data['test'] = 0
assert ('test' not in timer._data), 'Incorrect deep copy of data.'
def test_reset(self):
'Test that timer data can be reset.'
timer = MultiLevelTimer()
with timer('Label'):
...
assert ('Label' in timer._data)
timer.reset()
assert (timer._data == {}), 'Incorrect data deletion.'
with timer('Label'):
with pytest.raises(RuntimeError):
timer.reset()
def test_mean_elapsed(self):
'Test that timers elapsed time get averaged correctly.'
assert (MultiLevelTimer.mean_elapsed([]) == []), 'Error returning empty list.'
assert (MultiLevelTimer.mean_elapsed(None) is None), 'Error returning None.'
(sleep_time1, sleep_time2) = (0.3, 0.9)
target = ((sleep_time1 + sleep_time2) / 2)
timer1 = MultiLevelTimer()
with timer1('Test'):
time.sleep(sleep_time1)
timer2 = MultiLevelTimer()
with timer2('Test'):
time.sleep(sleep_time2)
data = MultiLevelTimer.mean_elapsed([timer1, timer2])
assert np.allclose(data['Test'], target, atol=0.02), "'mean_elapsed' off by more than 2ms."
|
class FontDataLoader():
def __init__(self, dataset, sampler, batch_size):
self.data_loader = torch.utils.data.DataLoader(dataset, sampler=sampler, batch_size=batch_size)
def __iter__(self):
self.data_loader_iterator = iter(self.data_loader)
return self
def __next__(self):
return next(self.data_loader_iterator)
|
class FontData():
def __init__(self, font_name, font_path, image=None):
self.font_name = font_name
self.font_path = font_path
self.image = None
def load_data(self, loader):
if (self.image == None):
self.image = loader(self.font_path)
return self.image
def __repr__(self):
return ('<FontData font_name: %s>' % self.font_name)
|
class FontDataset(Dataset):
'The Font Dataset.'
def __init__(self, root_dir, glyph_size=(64, 64), glyphs_per_image=26):
self.fonts = self.load_font_filenames(root_dir)
self.root_dir = root_dir
self.glyph_size = glyph_size
self.glyphs_per_image = glyphs_per_image
def __len__(self):
return len(self.fonts)
def __getitem__(self, index):
_index = index
if torch.is_tensor(_index):
_index = _index.tolist()
font = self.fonts[_index]
font_data = font.load_data(image_loader)
transform = transforms.Compose([transforms.Resize(self.glyph_size[0]), transforms.Grayscale(num_output_channels=1), transforms.ToTensor()])
return transform(font_data)
def load_font_filenames(self, root_dir):
font_images = []
assert os.path.isdir(root_dir), ('%s is not a valid directory!' % root_dir)
for (root, _, filenames) in sorted(os.walk(root_dir)):
for filename in filenames:
font_images.append(FontData(filename, os.path.join(root, filename)))
return font_images
|
def image_loader(path):
return Image.open(path).convert('RGB')
|
def l1_and_adversarial_loss(D, G, real_data, generated_data, losses, options):
l1_lamba = 10
return (min_max_loss(D, G, real_data, generated_data, losses, options) + (l1_lamba * l1_loss(D, G, real_data, generated_data, losses, options)))
|
def wasserstein_loss(D, G, real_data, generated_data, losses, options):
real_loss = D(real_data)
generated_loss = D(generated_data)
(batch_size, data_type) = itemgetter('batch_size', 'data_type')(options)
gradient_penalty_weight = 10
gradient_penalty = calculate_gradient_penalty(D, real_data, generated_data, batch_size, gradient_penalty_weight, losses, data_type)
losses['GP'].append(gradient_penalty.data)
loss = ((generated_loss.mean() - real_loss.mean()) + gradient_penalty)
losses['Generated'].append(generated_loss.mean().data)
losses['Real'].append(real_loss.mean().data)
losses['D'].append(loss.data)
return loss
|
def min_max_loss(D, G, real_data, generated_data, losses, options):
discriminator_loss = D(generated_data)
loss = (- discriminator_loss.mean())
return loss
|
def l1_loss(D, G, real_data, generated_data, losses, options):
'\n Performs the L1 loss between the generated data and the real data.\n\n It is expected that both `real_data` and `generated_data` are of the same shape.\n '
return torch.nn.L1Loss()(generated_data, real_data)
|
def calculate_gradient_penalty(D, real_data, generated_data, batch_size, gradient_penalty_weight, losses, data_type):
alpha = torch.rand(batch_size, 1, 1, 1).expand_as(real_data).type(data_type)
interpolated = ((alpha * real_data.data) + ((1 - alpha) * generated_data.data)).type(data_type)
interpolated.requires_grad = True
probability_interpolated = D(interpolated)
gradients = torch_grad(outputs=probability_interpolated, inputs=interpolated, grad_outputs=torch.ones(probability_interpolated.size()).type(data_type), create_graph=True, retain_graph=True)[0]
gradients = gradients.view(batch_size, (- 1))
losses['gradient_norm'].append(gradients.norm(2, dim=1).mean().data)
gradients_norm = torch.sqrt((torch.sum((gradients ** 2), dim=1) + 1e-12))
return (gradient_penalty_weight * ((gradients_norm - 1) ** 2).mean())
|
def build_font_shape_generator(glyph_size=(64, 64, 1), glyph_count=26, dimension=16):
'\n Generator model for our GAN.\n\n Architecture is similar to DC-GAN with the exception of the input being an image.\n\n Inputs:\n - `image_size`: A triple (W, H, C) for the size of the images and number of channels. This model generates images the same size as the input (but for every character of the alphabet.)\n - `dimension`: Depth\n\n Output:\n -\n '
return intermediate_generator_alt(glyph_size=glyph_size, glyph_count=glyph_count, dimension=dimension)
|
def simple_upscale_generator(dimension):
'\n A generator that performs several ConvTranpsose2D Operations to upscale an image from `individual_image_size` to `final_image_size`. The dimensions of `final_image_size` must be an integer multiple of `individual_image_size.`\n\n Inputs:\n - `individual_image_size`: (W, H) the size of the images provided (and the expected output size.)\n - `dimension`: This imapcts the scale of the number of features in the upscale.\n\n Output:\n - An image that is 256 * 512\n '
return nn.Sequential(nn.ConvTranspose2d(in_channels=1, out_channels=(8 * dimension), kernel_size=(4, 4), stride=(2, 2), padding=(1, 1)), nn.ReLU(), nn.ConvTranspose2d(in_channels=(8 * dimension), out_channels=(4 * dimension), kernel_size=(4, 4), stride=(2, 2), padding=(1, 1)), nn.ReLU(), nn.ConvTranspose2d(in_channels=(4 * dimension), out_channels=(2 * dimension), kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.ReLU(), nn.ConvTranspose2d(in_channels=(2 * dimension), out_channels=dimension, kernel_size=(3, 4), stride=(1, 2), padding=(1, 1)), nn.ReLU(), nn.ConvTranspose2d(in_channels=dimension, out_channels=1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.Sigmoid())
|
def intermediate_generator(glyph_size=(64, 64), glyph_count=26, dimension=16):
linear_width = int((((((2 * dimension) * glyph_size[0]) / 4) * glyph_size[1]) / 4))
hidden_width = int((glyph_size[0] * glyph_size[1]))
final_width = int(((((4 * dimension) * glyph_count) * 2) * 2))
return nn.Sequential(nn.Conv2d(1, dimension, 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(dimension, (2 * dimension), 4, 2, 1), nn.LeakyReLU(0.2), Flatten(), nn.Linear(in_features=linear_width, out_features=hidden_width), nn.ReLU(), nn.Linear(hidden_width, final_width), nn.ReLU(), Unflatten(C=(dimension * 4), H=2, W=(2 * glyph_count)), nn.BatchNorm2d((dimension * 4)), nn.ConvTranspose2d((4 * dimension), (2 * dimension), 4, 2, 1), nn.BatchNorm2d((2 * dimension)), nn.ReLU(), nn.ConvTranspose2d((2 * dimension), dimension, 4, 2, 1), nn.BatchNorm2d(dimension), nn.ReLU(), nn.ConvTranspose2d(dimension, 1, 4, 2, 1), nn.Sigmoid())
|
def intermediate_generator_alt(glyph_size=(16, 16), glyph_count=26, dimension=512):
conv_dimensions = [dimension, int((dimension / 2)), int((dimension / 4))]
fc_layer_widths = [int(((((conv_dimensions[2] * glyph_size[0]) / 8) * glyph_size[1]) / 8)), int((glyph_size[0] * glyph_size[1])), int(((((((glyph_size[0] / 8) * glyph_size[1]) / 8) * glyph_count) * dimension) / 4))]
upconv_dimensions = [int((dimension / 4)), int((dimension / 8)), int((dimension / 16)), 1]
return nn.Sequential(nn.Conv2d(1, conv_dimensions[0], 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(conv_dimensions[0], conv_dimensions[1], 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(conv_dimensions[1], conv_dimensions[2], 4, 2, 1), nn.LeakyReLU(0.2), Flatten(), nn.Linear(in_features=fc_layer_widths[0], out_features=fc_layer_widths[1]), nn.ReLU(), nn.Linear(fc_layer_widths[1], fc_layer_widths[2]), nn.ReLU(), Unflatten(C=upconv_dimensions[0], H=int((glyph_size[0] / 8)), W=(int((glyph_size[1] / 8)) * glyph_count)), nn.BatchNorm2d(upconv_dimensions[0]), nn.ConvTranspose2d(upconv_dimensions[0], upconv_dimensions[1], 4, 2, 1), nn.BatchNorm2d(upconv_dimensions[1]), nn.ReLU(), nn.ConvTranspose2d(upconv_dimensions[1], upconv_dimensions[2], 4, 2, 1), nn.BatchNorm2d(upconv_dimensions[2]), nn.ReLU(), nn.ConvTranspose2d(upconv_dimensions[2], upconv_dimensions[3], 4, 2, 1), nn.Sigmoid())
|
def build_font_shape_discriminator(image_size=(64, 1664), dimension=16):
'\n PyTorch model implementing the GlyphGAN critic.\n\n Inputs:\n - `image_size`: The size of the entire alphabet (usually (H, W * 26))\n - `dimension`: The filter depth after each conv. Doubles per conv layer (1 - > 2 -> 4 -> 8)\n '
output_size = int((((8 * dimension) * (image_size[0] / 16)) * (image_size[1] / 16)))
return nn.Sequential(nn.Conv2d(1, dimension, 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(dimension, (2 * dimension), 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d((2 * dimension), (4 * dimension), 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d((4 * dimension), (8 * dimension), 4, 2, 1), nn.LeakyReLU(0.2), Flatten(), nn.Linear(output_size, 1), nn.Sigmoid())
|
def get_optimizer(model, learning_rate=0.0002, beta1=0.5, beta2=0.99):
'\n Adam optimizer for model\n\n Input:\n - model: A PyTorch model that we want to optimize.\n\n Returns:\n - An Adam optimizer for the model with the desired hyperparameters.\n '
optimizer = optim.Adam(model.parameters(), lr=learning_rate, betas=(beta1, beta2))
return optimizer
|
class Flatten(nn.Module):
def forward(self, x):
(N, _, _, _) = x.size()
return x.view(N, (- 1))
|
class Unflatten(nn.Module):
'\n An Unflatten module receives an input of shape (N, C*H*W) and reshapes it\n to produce an output of shape (N, C, H, W).\n '
def __init__(self, N=(- 1), C=128, H=7, W=7):
super(Unflatten, self).__init__()
self.N = N
self.C = C
self.H = H
self.W = W
def forward(self, x):
return x.view(self.N, self.C, self.H, self.W)
|
def initialize_weights(m):
if (isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d)):
nn.init.xavier_uniform_(m.weight.data)
|
class TestFontDatasets(unittest.TestCase):
def test_cannot_create_invalid_font_dataset(self):
with self.assertRaises(AssertionError):
FontDataset('does_not_exist')
def test_can_create_font_dataset(self):
dataset = FontDataset(abspath(join(dirname(__file__), 'test_datasets/valid')))
self.assertEqual(1, len(dataset))
def test_length_of_empty_folder(self):
dataset = FontDataset(abspath(join(dirname(__file__), 'test_datasets/empty')))
self.assertEqual(0, len(dataset))
|
def show_grayscale_image(image):
plot.imshow(transforms.Compose([transforms.ToPILImage(), transforms.Grayscale(num_output_channels=3)])(image))
plot.axis('off')
plot.show()
|
def CreateDataset(opt):
'loads dataset class'
if ((opt.arch == 'vae') or (opt.arch == 'gan')):
from data.grasp_sampling_data import GraspSamplingData
dataset = GraspSamplingData(opt)
else:
from data.grasp_evaluator_data import GraspEvaluatorData
dataset = GraspEvaluatorData(opt)
return dataset
|
class DataLoader():
'multi-threaded data loading'
def __init__(self, opt):
self.opt = opt
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.num_objects_per_batch, shuffle=(not opt.serial_batches), num_workers=int(opt.num_threads), collate_fn=collate_fn)
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
for (i, data) in enumerate(self.dataloader):
if ((i * self.opt.batch_size) >= self.opt.max_dataset_size):
break
(yield data)
|
def create_model(opt):
from .grasp_net import GraspNetModel
model = GraspNetModel(opt)
return model
|
class GraspNetModel():
' Class for training Model weights\n\n :args opt: structure containing configuration params\n e.g.,\n --dataset_mode -> sampling / evaluation)\n '
def __init__(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.is_train = opt.is_train
if (self.gpu_ids and (self.gpu_ids[0] >= torch.cuda.device_count())):
self.gpu_ids[0] = (torch.cuda.device_count() - 1)
self.device = (torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu'))
self.save_dir = join(opt.checkpoints_dir, opt.name)
self.optimizer = None
self.loss = None
self.pcs = None
self.grasps = None
self.net = networks.define_classifier(opt, self.gpu_ids, opt.arch, opt.init_type, opt.init_gain, self.device)
self.criterion = networks.define_loss(opt)
self.confidence_loss = None
if (self.opt.arch == 'vae'):
self.kl_loss = None
self.reconstruction_loss = None
elif (self.opt.arch == 'gan'):
self.reconstruction_loss = None
else:
self.classification_loss = None
if self.is_train:
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.scheduler = networks.get_scheduler(self.optimizer, opt)
if ((not self.is_train) or opt.continue_train):
self.load_network(opt.which_epoch, self.is_train)
def set_input(self, data):
input_pcs = torch.from_numpy(data['pc']).contiguous()
input_grasps = torch.from_numpy(data['grasp_rt']).float()
if (self.opt.arch == 'evaluator'):
targets = torch.from_numpy(data['labels']).float()
else:
targets = torch.from_numpy(data['target_cps']).float()
self.pcs = input_pcs.to(self.device).requires_grad_(self.is_train)
self.grasps = input_grasps.to(self.device).requires_grad_(self.is_train)
self.targets = targets.to(self.device)
def generate_grasps(self, pcs, z=None):
with torch.no_grad():
return self.net.module.generate_grasps(pcs, z=z)
def evaluate_grasps(self, pcs, gripper_pcs):
(success, _) = self.net.module(pcs, gripper_pcs)
return torch.sigmoid(success)
def forward(self):
return self.net(self.pcs, self.grasps, train=self.is_train)
def backward(self, out):
if (self.opt.arch == 'vae'):
(predicted_cp, confidence, mu, logvar) = out
predicted_cp = utils.transform_control_points(predicted_cp, predicted_cp.shape[0], device=self.device)
(self.reconstruction_loss, self.confidence_loss) = self.criterion[1](predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device)
self.kl_loss = (self.opt.kl_loss_weight * self.criterion[0](mu, logvar, device=self.device))
self.loss = ((self.kl_loss + self.reconstruction_loss) + self.confidence_loss)
elif (self.opt.arch == 'gan'):
(predicted_cp, confidence) = out
predicted_cp = utils.transform_control_points(predicted_cp, predicted_cp.shape[0], device=self.device)
(self.reconstruction_loss, self.confidence_loss) = self.criterion(predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device)
self.loss = (self.reconstruction_loss + self.confidence_loss)
elif (self.opt.arch == 'evaluator'):
(grasp_classification, confidence) = out
(self.classification_loss, self.confidence_loss) = self.criterion(grasp_classification.squeeze(), self.targets, confidence, self.opt.confidence_weight, device=self.device)
self.loss = (self.classification_loss + self.confidence_loss)
self.loss.backward()
def optimize_parameters(self):
self.optimizer.zero_grad()
out = self.forward()
self.backward(out)
self.optimizer.step()
def load_network(self, which_epoch, train=True):
'load model from disk'
save_filename = ('%s_net.pth' % which_epoch)
load_path = join(self.save_dir, save_filename)
net = self.net
if isinstance(net, torch.nn.DataParallel):
net = net.module
print(('loading the model from %s' % load_path))
checkpoint = torch.load(load_path, map_location=self.device)
if hasattr(checkpoint['model_state_dict'], '_metadata'):
del checkpoint['model_state_dict']._metadata
net.load_state_dict(checkpoint['model_state_dict'])
if train:
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
self.opt.epoch_count = checkpoint['epoch']
else:
net.eval()
def save_network(self, net_name, epoch_num):
'save model to disk'
save_filename = ('%s_net.pth' % net_name)
save_path = join(self.save_dir, save_filename)
torch.save({'epoch': (epoch_num + 1), 'model_state_dict': self.net.module.cpu().state_dict(), 'optimizer_state_dict': self.optimizer.state_dict(), 'scheduler_state_dict': self.scheduler.state_dict()}, save_path)
if ((len(self.gpu_ids) > 0) and torch.cuda.is_available()):
self.net.cuda(self.gpu_ids[0])
def update_learning_rate(self):
'update learning rate (called once every epoch)'
self.scheduler.step()
lr = self.optimizer.param_groups[0]['lr']
print(('learning rate = %.7f' % lr))
def test(self):
'tests model\n returns: number correct and total number\n '
with torch.no_grad():
out = self.forward()
(prediction, confidence) = out
if (self.opt.arch == 'vae'):
predicted_cp = utils.transform_control_points(prediction, prediction.shape[0], device=self.device)
(reconstruction_loss, _) = self.criterion[1](predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device)
return (reconstruction_loss, 1)
elif (self.opt.arch == 'gan'):
predicted_cp = utils.transform_control_points(prediction, prediction.shape[0], device=self.device)
(reconstruction_loss, _) = self.criterion(predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device)
return (reconstruction_loss, 1)
else:
predicted = torch.round(torch.sigmoid(prediction)).squeeze()
correct = (predicted == self.targets).sum().item()
return (correct, len(self.targets))
|
def control_point_l1_loss_better_than_threshold(pred_control_points, gt_control_points, confidence, confidence_threshold, device='cpu'):
npoints = pred_control_points.shape[1]
mask = torch.greater_equal(confidence, confidence_threshold)
mask_ratio = torch.mean(mask)
mask = torch.repeat_interleave(mask, npoints, dim=1)
p1 = pred_control_points[mask]
p2 = gt_control_points[mask]
return (control_point_l1_loss(p1, p2), mask_ratio)
|
def accuracy_better_than_threshold(pred_success_logits, gt, confidence, confidence_threshold, device='cpu'):
'\n Computes average precision for the grasps with confidence > threshold.\n '
pred_classes = torch.argmax(pred_success_logits, (- 1))
correct = torch.equal(pred_classes, gt)
mask = torch.squeeze(torch.greater_equal(confidence, confidence_threshold), (- 1))
positive_acc = (torch.sum(((correct * mask) * gt)) / torch.max(torch.sum((mask * gt)), torch.tensor(1)))
negative_acc = (torch.sum(((correct * mask) * (1.0 - gt))) / torch.max(torch.sum((mask * (1.0 - gt))), torch.tensor(1)))
return ((0.5 * (positive_acc + negative_acc)), (torch.sum(mask) / gt.shape[0]))
|
def control_point_l1_loss(pred_control_points, gt_control_points, confidence=None, confidence_weight=None, device='cpu'):
'\n Computes the l1 loss between the predicted control points and the\n groundtruth control points on the gripper.\n '
error = torch.sum(torch.abs((pred_control_points - gt_control_points)), (- 1))
error = torch.mean(error, (- 1))
if (confidence is not None):
assert (confidence_weight is not None)
error *= confidence
confidence_term = (torch.mean(torch.log(torch.max(confidence, torch.tensor(1e-10).to(device)))) * confidence_weight)
if (confidence is None):
return torch.mean(error)
else:
return (torch.mean(error), (- confidence_term))
|
def classification_with_confidence_loss(pred_logit, gt, confidence, confidence_weight, device='cpu'):
'\n Computes the cross entropy loss and confidence term that penalizes\n outputing zero confidence. Returns cross entropy loss and the confidence\n regularization term.\n '
classification_loss = torch.nn.functional.binary_cross_entropy_with_logits(pred_logit, gt)
confidence_term = (torch.mean(torch.log(torch.max(confidence, torch.tensor(1e-10).to(device)))) * confidence_weight)
return (classification_loss, (- confidence_term))
|
def min_distance_loss(pred_control_points, gt_control_points, confidence=None, confidence_weight=None, threshold=None, device='cpu'):
'\n Computes the minimum distance (L1 distance)between each gt control point \n and any of the predicted control points.\n\n Args: \n pred_control_points: tensor of (N_pred, M, 4) shape. N is the number of\n grasps. M is the number of points on the gripper.\n gt_control_points: (N_gt, M, 4)\n confidence: tensor of N_pred, tensor for the confidence of each \n prediction.\n confidence_weight: float, the weight for confidence loss.\n '
pred_shape = pred_control_points.shape
gt_shape = gt_control_points.shape
if (len(pred_shape) != 3):
raise ValueError('pred_control_point should have len of 3. {}'.format(pred_shape))
if (len(gt_shape) != 3):
raise ValueError('gt_control_point should have len of 3. {}'.format(gt_shape))
if (pred_shape != gt_shape):
raise ValueError('shapes do no match {} != {}'.format(pred_shape, gt_shape))
error = (pred_control_points.unsqueeze(1) - gt_control_points.unsqueeze(0))
error = torch.sum(torch.abs(error), (- 1))
error = torch.mean(error, (- 1))
(min_distance_error, closest_index) = error.min(0)
if (confidence is not None):
selected_confidence = torch.nn.functional.one_hot(closest_index, num_classes=closest_index.shape[0]).float()
selected_confidence *= confidence
selected_confidence = torch.sum(selected_confidence, (- 1))
min_distance_error *= selected_confidence
confidence_term = (torch.mean(torch.log(torch.max(confidence, torch.tensor(0.0001).to(device)))) * confidence_weight)
else:
confidence_term = 0.0
return (torch.mean(min_distance_error), (- confidence_term))
|
def min_distance_better_than_threshold(pred_control_points, gt_control_points, confidence, confidence_threshold, device='cpu'):
error = (torch.expand_dims(pred_control_points, 1) - torch.expand_dims(gt_control_points, 0))
error = torch.sum(torch.abs(error), (- 1))
error = torch.mean(error, (- 1))
error = torch.min(error, (- 1))
mask = torch.greater_equal(confidence, confidence_threshold)
mask = torch.squeeze(mask, dim=(- 1))
return (torch.mean(error[mask]), torch.mean(mask))
|
def kl_divergence(mu, log_sigma, device='cpu'):
'\n Computes the kl divergence for batch of mu and log_sigma.\n '
return torch.mean(((- 0.5) * torch.sum((((1.0 + log_sigma) - (mu ** 2)) - torch.exp(log_sigma)), dim=(- 1))))
|
def confidence_loss(confidence, confidence_weight, device='cpu'):
return (torch.mean(torch.log(torch.max(confidence, torch.tensor(1e-10).to(device)))) * confidence_weight)
|
def get_scheduler(optimizer, opt):
if (opt.lr_policy == 'lambda'):
def lambda_rule(epoch):
lr_l = (1.0 - (max(0, (((epoch + 1) + 1) - opt.niter)) / float((opt.niter_decay + 1))))
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif (opt.lr_policy == 'step'):
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif (opt.lr_policy == 'plateau'):
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
|
def init_weights(net, init_type, init_gain):
def init_func(m):
classname = m.__class__.__name__
if (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
init.normal_(m.weight.data, 0.0, init_gain)
elif (init_type == 'xavier'):
init.xavier_normal_(m.weight.data, gain=init_gain)
elif (init_type == 'kaiming'):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError(('initialization method [%s] is not implemented' % init_type))
elif (classname.find('BatchNorm') != (- 1)):
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func)
|
def init_net(net, init_type, init_gain, gpu_ids):
if (len(gpu_ids) > 0):
assert torch.cuda.is_available()
net.cuda(gpu_ids[0])
net = net.cuda()
net = torch.nn.DataParallel(net, gpu_ids)
if (init_type != 'none'):
init_weights(net, init_type, init_gain)
return net
|
def define_classifier(opt, gpu_ids, arch, init_type, init_gain, device):
net = None
if (arch == 'vae'):
net = GraspSamplerVAE(opt.model_scale, opt.pointnet_radius, opt.pointnet_nclusters, opt.latent_size, device)
elif (arch == 'gan'):
net = GraspSamplerGAN(opt.model_scale, opt.pointnet_radius, opt.pointnet_nclusters, opt.latent_size, device)
elif (arch == 'evaluator'):
net = GraspEvaluator(opt.model_scale, opt.pointnet_radius, opt.pointnet_nclusters, device)
else:
raise NotImplementedError(('model name [%s] is not recognized' % arch))
return init_net(net, init_type, init_gain, gpu_ids)
|
def define_loss(opt):
if (opt.arch == 'vae'):
kl_loss = losses.kl_divergence
reconstruction_loss = losses.control_point_l1_loss
return (kl_loss, reconstruction_loss)
elif (opt.arch == 'gan'):
reconstruction_loss = losses.min_distance_loss
return reconstruction_loss
elif (opt.arch == 'evaluator'):
loss = losses.classification_with_confidence_loss
return loss
else:
raise NotImplementedError('Loss not found')
|
class GraspSampler(nn.Module):
def __init__(self, latent_size, device):
super(GraspSampler, self).__init__()
self.latent_size = latent_size
self.device = device
def create_decoder(self, model_scale, pointnet_radius, pointnet_nclusters, num_input_features):
self.decoder = base_network(pointnet_radius, pointnet_nclusters, model_scale, num_input_features)
self.q = nn.Linear((model_scale * 1024), 4)
self.t = nn.Linear((model_scale * 1024), 3)
self.confidence = nn.Linear((model_scale * 1024), 1)
def decode(self, xyz, z):
xyz_features = self.concatenate_z_with_pc(xyz, z).transpose((- 1), 1).contiguous()
for module in self.decoder[0]:
(xyz, xyz_features) = module(xyz, xyz_features)
x = self.decoder[1](xyz_features.squeeze((- 1)))
predicted_qt = torch.cat((F.normalize(self.q(x), p=2, dim=(- 1)), self.t(x)), (- 1))
return (predicted_qt, torch.sigmoid(self.confidence(x)).squeeze())
def concatenate_z_with_pc(self, pc, z):
z.unsqueeze_(1)
z = z.expand((- 1), pc.shape[1], (- 1))
return torch.cat((pc, z), (- 1))
def get_latent_size(self):
return self.latent_size
|
class GraspSamplerVAE(GraspSampler):
'Network for learning a generative VAE grasp-sampler\n '
def __init__(self, model_scale, pointnet_radius=0.02, pointnet_nclusters=128, latent_size=2, device='cpu'):
super(GraspSamplerVAE, self).__init__(latent_size, device)
self.create_encoder(model_scale, pointnet_radius, pointnet_nclusters)
self.create_decoder(model_scale, pointnet_radius, pointnet_nclusters, (latent_size + 3))
self.create_bottleneck((model_scale * 1024), latent_size)
def create_encoder(self, model_scale, pointnet_radius, pointnet_nclusters):
self.encoder = base_network(pointnet_radius, pointnet_nclusters, model_scale, 19)
def create_bottleneck(self, input_size, latent_size):
mu = nn.Linear(input_size, latent_size)
logvar = nn.Linear(input_size, latent_size)
self.latent_space = nn.ModuleList([mu, logvar])
def encode(self, xyz, xyz_features):
for module in self.encoder[0]:
(xyz, xyz_features) = module(xyz, xyz_features)
return self.encoder[1](xyz_features.squeeze((- 1)))
def bottleneck(self, z):
return (self.latent_space[0](z), self.latent_space[1](z))
def reparameterize(self, mu, logvar):
std = torch.exp((0.5 * logvar))
eps = torch.randn_like(std)
return (mu + (eps * std))
def forward(self, pc, grasp=None, train=True):
if train:
return self.forward_train(pc, grasp)
else:
return self.forward_test(pc, grasp)
def forward_train(self, pc, grasp):
input_features = torch.cat((pc, grasp.unsqueeze(1).expand((- 1), pc.shape[1], (- 1))), (- 1)).transpose((- 1), 1).contiguous()
z = self.encode(pc, input_features)
(mu, logvar) = self.bottleneck(z)
z = self.reparameterize(mu, logvar)
(qt, confidence) = self.decode(pc, z)
return (qt, confidence, mu, logvar)
def forward_test(self, pc, grasp):
input_features = torch.cat((pc, grasp.unsqueeze(1).expand((- 1), pc.shape[1], (- 1))), (- 1)).transpose((- 1), 1).contiguous()
z = self.encode(pc, input_features)
(mu, _) = self.bottleneck(z)
(qt, confidence) = self.decode(pc, mu)
return (qt, confidence)
def sample_latent(self, batch_size):
return torch.randn(batch_size, self.latent_size).to(self.device)
def generate_grasps(self, pc, z=None):
if (z is None):
z = self.sample_latent(pc.shape[0])
(qt, confidence) = self.decode(pc, z)
return (qt, confidence, z.squeeze())
def generate_dense_latents(self, resolution):
'\n For the VAE sampler we consider dense latents to correspond to those between -2 and 2\n '
latents = torch.meshgrid(*[torch.linspace((- 2), 2, resolution) for i in range(self.latent_size)])
return torch.stack([latents[i].flatten() for i in range(len(latents))], dim=(- 1)).to(self.device)
|
class GraspSamplerGAN(GraspSampler):
'\n Altough the name says this sampler is based on the GAN formulation, it is\n not actually optimizing based on the commonly known adversarial game.\n Instead, it is based on the Implicit Maximum Likelihood Estimation from\n https://arxiv.org/pdf/1809.09087.pdf which is similar to the GAN formulation\n but with new insights that avoids e.g. mode collapses.\n '
def __init__(self, model_scale, pointnet_radius, pointnet_nclusters, latent_size=2, device='cpu'):
super(GraspSamplerGAN, self).__init__(latent_size, device)
self.create_decoder(model_scale, pointnet_radius, pointnet_nclusters, (latent_size + 3))
def sample_latent(self, batch_size):
return torch.rand(batch_size, self.latent_size).to(self.device)
def forward(self, pc, grasps=None, train=True):
z = self.sample_latent(pc.shape[0])
return self.decode(pc, z)
def generate_grasps(self, pc, z=None):
if (z is None):
z = self.sample_latent(pc.shape[0])
(qt, confidence) = self.decode(pc, z)
return (qt, confidence, z.squeeze())
def generate_dense_latents(self, resolution):
latents = torch.meshgrid(*[torch.linspace(0, 1, resolution) for i in range(self.latent_size)])
return torch.stack([latents[i].flatten() for i in range(len(latents))], dim=(- 1)).to(self.device)
|
class GraspEvaluator(nn.Module):
def __init__(self, model_scale=1, pointnet_radius=0.02, pointnet_nclusters=128, device='cpu'):
super(GraspEvaluator, self).__init__()
self.create_evaluator(pointnet_radius, model_scale, pointnet_nclusters)
self.device = device
def create_evaluator(self, pointnet_radius, model_scale, pointnet_nclusters):
self.evaluator = base_network(pointnet_radius, pointnet_nclusters, model_scale, 4)
self.predictions_logits = nn.Linear((1024 * model_scale), 1)
self.confidence = nn.Linear((1024 * model_scale), 1)
def evaluate(self, xyz, xyz_features):
for module in self.evaluator[0]:
(xyz, xyz_features) = module(xyz, xyz_features)
return self.evaluator[1](xyz_features.squeeze((- 1)))
def forward(self, pc, gripper_pc, train=True):
(pc, pc_features) = self.merge_pc_and_gripper_pc(pc, gripper_pc)
x = self.evaluate(pc, pc_features.contiguous())
return (self.predictions_logits(x), torch.sigmoid(self.confidence(x)))
def merge_pc_and_gripper_pc(self, pc, gripper_pc):
'\n Merges the object point cloud and gripper point cloud and\n adds a binary auxiliary feature that indicates whether each point\n belongs to the object or to the gripper.\n '
pc_shape = pc.shape
gripper_shape = gripper_pc.shape
assert (len(pc_shape) == 3)
assert (len(gripper_shape) == 3)
assert (pc_shape[0] == gripper_shape[0])
npoints = pc_shape[1]
batch_size = pc_shape[0]
l0_xyz = torch.cat((pc, gripper_pc), 1)
labels = [torch.ones(pc.shape[1], 1, dtype=torch.float32), torch.zeros(gripper_pc.shape[1], 1, dtype=torch.float32)]
labels = torch.cat(labels, 0)
labels.unsqueeze_(0)
labels = labels.repeat(batch_size, 1, 1)
l0_points = torch.cat([l0_xyz, labels.to(self.device)], (- 1)).transpose((- 1), 1)
return (l0_xyz, l0_points)
|
def base_network(pointnet_radius, pointnet_nclusters, scale, in_features):
sa1_module = pointnet2.PointnetSAModule(npoint=pointnet_nclusters, radius=pointnet_radius, nsample=64, mlp=[in_features, (64 * scale), (64 * scale), (128 * scale)])
sa2_module = pointnet2.PointnetSAModule(npoint=32, radius=0.04, nsample=128, mlp=[(128 * scale), (128 * scale), (128 * scale), (256 * scale)])
sa3_module = pointnet2.PointnetSAModule(mlp=[(256 * scale), (256 * scale), (256 * scale), (512 * scale)])
sa_modules = nn.ModuleList([sa1_module, sa2_module, sa3_module])
fc_layer = nn.Sequential(nn.Linear((512 * scale), (1024 * scale)), nn.BatchNorm1d((1024 * scale)), nn.ReLU(True), nn.Linear((1024 * scale), (1024 * scale)), nn.BatchNorm1d((1024 * scale)), nn.ReLU(True))
return nn.ModuleList([sa_modules, fc_layer])
|
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataset_root_folder', type=str, default='/home/jens/Documents/datasets/grasping/unified_grasp_data/', help='path to root directory of the dataset.')
self.parser.add_argument('--num_objects_per_batch', type=int, default=1, help='data batch size.')
self.parser.add_argument('--num_grasps_per_object', type=int, default=64)
self.parser.add_argument('--npoints', type=int, default=1024, help='number of points in each batch')
self.parser.add_argument('--occlusion_nclusters', type=int, default=0, help='clusters the points to nclusters to be selected for simulating the dropout')
self.parser.add_argument('--occlusion_dropout_rate', type=float, default=0, help='probability at which the clusters are removed from point cloud.')
self.parser.add_argument('--depth_noise', type=float, default=0.0)
self.parser.add_argument('--num_grasp_clusters', type=int, default=32)
self.parser.add_argument('--arch', choices={'vae', 'gan', 'evaluator'}, default='vae')
self.parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples per epoch')
self.parser.add_argument('--num_threads', default=3, type=int, help='# threads for loading data')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes meshes in order, otherwise takes them randomly')
self.parser.add_argument('--seed', type=int, help='if specified, uses seed')
self.parser.add_argument('--gripper', type=str, default='panda', help='type of the gripper. Leave it to panda if you want to use it for franka robot')
self.parser.add_argument('--latent_size', type=int, default=2)
self.parser.add_argument('--gripper_pc_npoints', type=int, default=(- 1), help='number of points representing the gripper. -1 just uses the points on the finger and also the base. other values use subsampling of the gripper mesh')
self.parser.add_argument('--merge_pcs_in_vae_encoder', type=int, default=0, help='whether to create unified pc in encoder by coloring the points (similar to evaluator')
self.parser.add_argument('--allowed_categories', type=str, default='', help='if left blank uses all the categories in the <DATASET_ROOT_PATH>/splits/<category>.json, otherwise only chooses the categories that are set.')
self.parser.add_argument('--blacklisted_categories', type=str, default='', help='The opposite of allowed categories')
self.parser.add_argument('--use_uniform_quaternions', type=int, default=0)
self.parser.add_argument('--model_scale', type=int, default=1, help='the scale of the parameters. Use scale >= 1. Scale=2 increases the number of parameters in model by 4x.')
self.parser.add_argument('--splits_folder_name', type=str, default='splits', help='Folder name for the directory that has all the jsons for train/test splits.')
self.parser.add_argument('--grasps_folder_name', type=str, default='grasps', help='Directory that contains the grasps. Will be joined with the dataset_root_folder and the file names as defined in the splits.')
self.parser.add_argument('--pointnet_radius', help='Radius for ball query for PointNet++, just the first layer', type=float, default=0.02)
self.parser.add_argument('--pointnet_nclusters', help='Number of cluster centroids for PointNet++, just the first layer', type=int, default=128)
self.parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
self.parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
self.parser.add_argument('--grasps_ratio', type=float, default=1.0, help='used for checking the effect of number of grasps per object on the success of the model.')
self.parser.add_argument('--skip_error', action='store_true', help='Will not fill the dataset with a new grasp if it raises NoPositiveGraspsException')
self.parser.add_argument('--balanced_data', action='store_true', default=False)
self.parser.add_argument('--confidence_weight', type=float, default=1.0, help='initially I wanted to compute confidence for vae and evaluator outputs, setting the confidence weight to 1. immediately pushes the confidence to 1.0.')
def parse(self):
if (not self.initialized):
self.initialize()
(self.opt, unknown) = self.parser.parse_known_args()
self.opt.is_train = self.is_train
if self.opt.is_train:
self.opt.dataset_split = 'train'
else:
self.opt.dataset_split = 'test'
self.opt.batch_size = (self.opt.num_objects_per_batch * self.opt.num_grasps_per_object)
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
self.opt.gpu_ids.append(id)
if (len(self.opt.gpu_ids) > 0):
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
if (self.opt.seed is not None):
import numpy as np
import random
torch.manual_seed(self.opt.seed)
np.random.seed(self.opt.seed)
random.seed(self.opt.seed)
if self.is_train:
print('------------ Options -------------')
for (k, v) in sorted(args.items()):
print(('%s: %s' % (str(k), str(v))))
print('-------------- End ----------------')
name = self.opt.arch
name += ((('_lr_' + str(self.opt.lr).split('.')[(- 1)]) + '_bs_') + str(self.opt.batch_size))
name += ((((('_scale_' + str(self.opt.model_scale)) + '_npoints_') + str(self.opt.pointnet_nclusters)) + '_radius_') + str(self.opt.pointnet_radius).split('.')[(- 1)])
if ((self.opt.arch == 'vae') or (self.opt.arch == 'gan')):
name += ('_latent_size_' + str(self.opt.latent_size))
self.opt.name = name
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
if (os.path.isdir(expr_dir) and (not self.opt.continue_train)):
option = (('Directory ' + expr_dir) + ' already exists and you have not chosen to continue to train.\nDo you want to override that training instance with a new one the press (Y/N).')
print(option)
while True:
choice = input()
if (choice.upper() == 'Y'):
print(('Overriding directory ' + expr_dir))
shutil.rmtree(expr_dir)
utils.mkdir(expr_dir)
break
elif (choice.upper() == 'N'):
print('Terminating. Remember, if you want to continue to train from a saved instance then run the script with the flag --continue_train')
return None
else:
utils.mkdir(expr_dir)
yaml_path = os.path.join(expr_dir, 'opt.yaml')
with open(yaml_path, 'w') as yaml_file:
yaml.dump(args, yaml_file)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write('-------------- End ----------------\n')
return self.opt
|
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.is_train = False
|
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=250, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--run_test_freq', type=int, default=1, help='frequency of running test in training script')
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=2000, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.9, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau')
self.parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
self.parser.add_argument('--kl_loss_weight', type=float, default=0.01)
self.parser.add_argument('--no_vis', action='store_true', help='will not use tensorboard')
self.parser.add_argument('--verbose_plot', action='store_true', help='plots network weights, etc.')
self.is_train = True
|
class OnlineObjectRenderer():
def __init__(self, fov=(np.pi / 6), caching=True):
'\n Args:\n fov: float, \n '
self._fov = fov
self._fy = self._fx = (1 / (0.5 / np.tan((self._fov * 0.5))))
self.mesh = None
self._scene = None
self.tmesh = None
self._init_scene()
self._current_context = None
self._cache = ({} if caching else None)
self._caching = caching
def _init_scene(self):
self._scene = pyrender.Scene()
camera = pyrender.PerspectiveCamera(yfov=self._fov, aspectRatio=1.0, znear=0.001)
camera_pose = tra.euler_matrix(np.pi, 0, 0)
self._scene.add(camera, pose=camera_pose, name='camera')
self.renderer = None
def _load_object(self, path, scale):
if ((path, scale) in self._cache):
return self._cache[(path, scale)]
obj = sample.Object(path)
obj.rescale(scale)
tmesh = obj.mesh
tmesh_mean = np.mean(tmesh.vertices, 0)
tmesh.vertices -= np.expand_dims(tmesh_mean, 0)
lbs = np.min(tmesh.vertices, 0)
ubs = np.max(tmesh.vertices, 0)
object_distance = (np.max((ubs - lbs)) * 5)
mesh = pyrender.Mesh.from_trimesh(tmesh)
context = {'tmesh': copy.deepcopy(tmesh), 'distance': object_distance, 'node': pyrender.Node(mesh=mesh), 'mesh_mean': np.expand_dims(tmesh_mean, 0)}
self._cache[(path, scale)] = context
return self._cache[(path, scale)]
def change_object(self, path, scale):
if (self._current_context is not None):
self._scene.remove_node(self._current_context['node'])
if (not self._caching):
self._cache = {}
self._current_context = self._load_object(path, scale)
self._scene.add_node(self._current_context['node'])
def current_context(self):
return self._current_context
def _to_pointcloud(self, depth):
height = depth.shape[0]
width = depth.shape[1]
mask = np.where((depth > 0))
x = mask[1]
y = mask[0]
normalized_x = ((x.astype(np.float32) - (width * 0.5)) / width)
normalized_y = ((y.astype(np.float32) - (height * 0.5)) / height)
world_x = ((self._fx * normalized_x) * depth[(y, x)])
world_y = ((self._fy * normalized_y) * depth[(y, x)])
world_z = depth[(y, x)]
ones = np.ones(world_z.shape[0], dtype=np.float32)
return np.vstack((world_x, world_y, world_z, ones)).T
def change_and_render(self, cad_path, cad_scale, pose, render_pc=True):
self.change_object(cad_path, cad_scale)
(color, depth, pc, transferred_pose) = self.render(pose)
return (color, depth, pc, transferred_pose)
def render(self, pose, render_pc=True):
if (self.renderer is None):
self.renderer = pyrender.OffscreenRenderer(400, 400)
if (self._current_context is None):
raise ValueError('invoke change_object first')
transferred_pose = pose.copy()
transferred_pose[(2, 3)] = self._current_context['distance']
self._scene.set_pose(self._current_context['node'], transferred_pose)
(color, depth) = self.renderer.render(self._scene)
if render_pc:
pc = self._to_pointcloud(depth)
else:
pc = None
return (color, depth, pc, transferred_pose)
def render_canonical_pc(self, poses):
all_pcs = []
for pose in poses:
(_, _, pc, pose) = self.render(pose)
pc = pc.dot(utils.inverse_transform(pose).T)
all_pcs.append(pc)
all_pcs = np.concatenate(all_pcs, 0)
return all_pcs
|
def run_test(epoch=(- 1), name=''):
print('Running Test')
opt = TestOptions().parse()
opt.serial_batches = True
opt.name = name
dataset = DataLoader(opt)
model = create_model(opt)
writer = Writer(opt)
writer.reset_counter()
for (i, data) in enumerate(dataset):
model.set_input(data)
(ncorrect, nexamples) = model.test()
writer.update_counter(ncorrect, nexamples)
writer.print_acc(epoch, writer.acc)
return writer.acc
|
def main():
opt = TrainOptions().parse()
if (opt == None):
return
dataset = DataLoader(opt)
dataset_size = (len(dataset) * opt.num_grasps_per_object)
model = create_model(opt)
writer = Writer(opt)
total_steps = 0
for epoch in range(opt.epoch_count, ((opt.niter + opt.niter_decay) + 1)):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for (i, data) in enumerate(dataset):
iter_start_time = time.time()
if ((total_steps % opt.print_freq) == 0):
t_data = (iter_start_time - iter_data_time)
total_steps += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data)
model.optimize_parameters()
if ((total_steps % opt.print_freq) == 0):
loss_types = []
if (opt.arch == 'vae'):
loss = [model.loss, model.kl_loss, model.reconstruction_loss, model.confidence_loss]
loss_types = ['total_loss', 'kl_loss', 'reconstruction_loss', 'confidence loss']
elif (opt.arch == 'gan'):
loss = [model.loss, model.reconstruction_loss, model.confidence_loss]
loss_types = ['total_loss', 'reconstruction_loss', 'confidence_loss']
else:
loss = [model.loss, model.classification_loss, model.confidence_loss]
loss_types = ['total_loss', 'classification_loss', 'confidence_loss']
t = ((time.time() - iter_start_time) / opt.batch_size)
writer.print_current_losses(epoch, epoch_iter, loss, t, t_data, loss_types)
writer.plot_loss(loss, epoch, epoch_iter, dataset_size, loss_types)
if ((i % opt.save_latest_freq) == 0):
print(('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps)))
model.save_network('latest', epoch)
iter_data_time = time.time()
if ((epoch % opt.save_epoch_freq) == 0):
print(('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps)))
model.save_network('latest', epoch)
model.save_network(str(epoch), epoch)
print(('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, (opt.niter + opt.niter_decay), (time.time() - epoch_start_time))))
model.update_learning_rate()
if opt.verbose_plot:
writer.plot_model_wts(model, epoch)
if ((epoch % opt.run_test_freq) == 0):
acc = run_test(epoch, name=opt.name)
writer.plot_acc(acc, epoch)
writer.close()
|
class Writer():
def __init__(self, opt):
self.name = opt.name
self.opt = opt
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
self.log_name = os.path.join(self.save_dir, 'loss_log.txt')
self.testacc_log = os.path.join(self.save_dir, 'testacc_log.txt')
self.start_logs()
self.nexamples = 0
self.confidence_acc = 0
self.ncorrect = 0
if (opt.is_train and (not opt.no_vis) and (SummaryWriter is not None)):
self.display = SummaryWriter(logdir=(os.path.join(self.opt.checkpoints_dir, self.opt.name) + '/tensorboard'))
else:
self.display = None
def start_logs(self):
' creates test / train log files '
if self.opt.is_train:
with open(self.log_name, 'a') as log_file:
now = time.strftime('%c')
log_file.write(('================ Training Loss (%s) ================\n' % now))
else:
with open(self.testacc_log, 'a') as log_file:
now = time.strftime('%c')
log_file.write(('================ Testing Acc (%s) ================\n' % now))
def print_current_losses(self, epoch, i, losses, t, t_data, loss_types='total_loss'):
' prints train loss to terminal / file '
if (type(losses) == list):
message = ('(epoch: %d, iters: %d, time: %.3f, data: %.3f)' % (epoch, i, t, t_data))
for (loss_type, loss_value) in zip(loss_types, losses):
message += (' %s: %.3f' % (loss_type, loss_value.item()))
else:
message = ('(epoch: %d, iters: %d, time: %.3f, data: %.3f) loss: %.3f ' % (epoch, i, t, t_data, losses.item()))
print(message)
with open(self.log_name, 'a') as log_file:
log_file.write(('%s\n' % message))
def plot_loss(self, losses, epoch, i, n, loss_types):
iters = (i + ((epoch - 1) * n))
if self.display:
if (type(losses) == list):
for (loss_type, loss_value) in zip(loss_types, losses):
self.display.add_scalar(('data/train_loss/' + loss_type), loss_value, iters)
else:
self.display.add_scalar('data/train_loss', losses, iters)
def plot_model_wts(self, model, epoch):
if (self.opt.is_train and self.display):
for (name, param) in model.net.named_parameters():
self.display.add_histogram(name, param.clone().cpu().data.numpy(), epoch)
def print_acc(self, epoch, acc):
' prints test accuracy to terminal / file '
if (self.opt.arch == 'evaluator'):
message = 'epoch: {}, TEST ACC: [{:.5} %]\n'.format(epoch, (acc * 100))
else:
message = 'epoch: {}, TEST REC LOSS: [{:.5}]\n'.format(epoch, acc)
print(message)
with open(self.testacc_log, 'a') as log_file:
log_file.write(('%s\n' % message))
def plot_acc(self, acc, epoch):
if self.display:
if (self.opt.arch == 'evaluator'):
self.display.add_scalar('data/test_acc/grasp_prediction', acc, epoch)
else:
self.display.add_scalar('data/test_loss/grasp_reconstruction', acc, epoch)
def reset_counter(self):
'\n counts # of correct examples\n '
self.ncorrect = 0
self.nexamples = 0
def update_counter(self, ncorrect, nexamples):
self.nexamples += nexamples
self.ncorrect += ncorrect
@property
def acc(self):
return (float(self.ncorrect) / self.nexamples)
def close(self):
if (self.display is not None):
self.display.close()
|
def define_model(x, is_training, model, num_classes):
if (model == 'MTT_musicnn'):
return build_musicnn(x, is_training, num_classes, num_filt_midend=64, num_units_backend=200)
elif (model == 'MTT_vgg'):
return vgg(x, is_training, num_classes, 128)
elif (model == 'MSD_musicnn'):
return build_musicnn(x, is_training, num_classes, num_filt_midend=64, num_units_backend=200)
elif (model == 'MSD_musicnn_big'):
return build_musicnn(x, is_training, num_classes, num_filt_midend=512, num_units_backend=500)
elif (model == 'MSD_vgg'):
return vgg(x, is_training, num_classes, 128)
else:
raise ValueError('Model not implemented!')
|
def build_musicnn(x, is_training, num_classes, num_filt_frontend=1.6, num_filt_midend=64, num_units_backend=200):
frontend_features_list = frontend(x, is_training, config.N_MELS, num_filt=1.6, type='7774timbraltemporal')
frontend_features = tf.concat(frontend_features_list, 2)
midend_features_list = midend(frontend_features, is_training, num_filt_midend)
midend_features = tf.concat(midend_features_list, 2)
(logits, penultimate, mean_pool, max_pool) = backend(midend_features, is_training, num_classes, num_units_backend, type='globalpool_dense')
timbral = tf.concat([frontend_features_list[0], frontend_features_list[1]], 2)
temporal = tf.concat([frontend_features_list[2], frontend_features_list[3], frontend_features_list[4]], 2)
(cnn1, cnn2, cnn3) = (midend_features_list[1], midend_features_list[2], midend_features_list[3])
mean_pool = tf.squeeze(mean_pool, [2])
max_pool = tf.squeeze(max_pool, [2])
return (logits, timbral, temporal, cnn1, cnn2, cnn3, mean_pool, max_pool, penultimate)
|
def frontend(x, is_training, yInput, num_filt, type):
expand_input = tf.expand_dims(x, 3)
normalized_input = tf.compat.v1.layers.batch_normalization(expand_input, training=is_training)
if ('timbral' in type):
input_pad_7 = tf.pad(normalized_input, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
if ('74' in type):
f74 = timbral_block(inputs=input_pad_7, filters=int((num_filt * 128)), kernel_size=[7, int((0.4 * yInput))], is_training=is_training)
if ('77' in type):
f77 = timbral_block(inputs=input_pad_7, filters=int((num_filt * 128)), kernel_size=[7, int((0.7 * yInput))], is_training=is_training)
if ('temporal' in type):
s1 = tempo_block(inputs=normalized_input, filters=int((num_filt * 32)), kernel_size=[128, 1], is_training=is_training)
s2 = tempo_block(inputs=normalized_input, filters=int((num_filt * 32)), kernel_size=[64, 1], is_training=is_training)
s3 = tempo_block(inputs=normalized_input, filters=int((num_filt * 32)), kernel_size=[32, 1], is_training=is_training)
if (type == '7774timbraltemporal'):
return [f74, f77, s1, s2, s3]
|
def timbral_block(inputs, filters, kernel_size, is_training, padding='valid', activation=tf.nn.relu):
conv = tf.compat.v1.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, padding=padding, activation=activation)
bn_conv = tf.compat.v1.layers.batch_normalization(conv, training=is_training)
pool = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv, pool_size=[1, bn_conv.shape[2]], strides=[1, bn_conv.shape[2]])
return tf.squeeze(pool, [2])
|
def tempo_block(inputs, filters, kernel_size, is_training, padding='same', activation=tf.nn.relu):
conv = tf.compat.v1.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, padding=padding, activation=activation)
bn_conv = tf.compat.v1.layers.batch_normalization(conv, training=is_training)
pool = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv, pool_size=[1, bn_conv.shape[2]], strides=[1, bn_conv.shape[2]])
return tf.squeeze(pool, [2])
|
def midend(front_end_output, is_training, num_filt):
front_end_output = tf.expand_dims(front_end_output, 3)
front_end_pad = tf.pad(front_end_output, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
conv1 = tf.compat.v1.layers.conv2d(inputs=front_end_pad, filters=num_filt, kernel_size=[7, front_end_pad.shape[2]], padding='valid', activation=tf.nn.relu)
bn_conv1 = tf.compat.v1.layers.batch_normalization(conv1, training=is_training)
bn_conv1_t = tf.transpose(bn_conv1, [0, 1, 3, 2])
bn_conv1_pad = tf.pad(bn_conv1_t, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
conv2 = tf.compat.v1.layers.conv2d(inputs=bn_conv1_pad, filters=num_filt, kernel_size=[7, bn_conv1_pad.shape[2]], padding='valid', activation=tf.nn.relu)
bn_conv2 = tf.compat.v1.layers.batch_normalization(conv2, training=is_training)
conv2 = tf.transpose(bn_conv2, [0, 1, 3, 2])
res_conv2 = tf.add(conv2, bn_conv1_t)
bn_conv2_pad = tf.pad(res_conv2, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
conv3 = tf.compat.v1.layers.conv2d(inputs=bn_conv2_pad, filters=num_filt, kernel_size=[7, bn_conv2_pad.shape[2]], padding='valid', activation=tf.nn.relu)
bn_conv3 = tf.compat.v1.layers.batch_normalization(conv3, training=is_training)
conv3 = tf.transpose(bn_conv3, [0, 1, 3, 2])
res_conv3 = tf.add(conv3, res_conv2)
return [front_end_output, bn_conv1_t, res_conv2, res_conv3]
|
def backend(feature_map, is_training, num_classes, output_units, type):
max_pool = tf.reduce_max(feature_map, axis=1)
(mean_pool, var_pool) = tf.nn.moments(feature_map, axes=[1])
tmp_pool = tf.concat([max_pool, mean_pool], 2)
flat_pool = tf.compat.v1.layers.flatten(tmp_pool)
flat_pool = tf.compat.v1.layers.batch_normalization(flat_pool, training=is_training)
flat_pool_dropout = tf.compat.v1.layers.dropout(flat_pool, rate=0.5, training=is_training)
dense = tf.compat.v1.layers.dense(inputs=flat_pool_dropout, units=output_units, activation=tf.nn.relu)
bn_dense = tf.compat.v1.layers.batch_normalization(dense, training=is_training)
dense_dropout = tf.compat.v1.layers.dropout(bn_dense, rate=0.5, training=is_training)
logits = tf.compat.v1.layers.dense(inputs=dense_dropout, activation=None, units=num_classes)
return (logits, bn_dense, mean_pool, max_pool)
|
def vgg(x, is_training, num_classes, num_filters=32):
input_layer = tf.expand_dims(x, 3)
bn_input = tf.compat.v1.layers.batch_normalization(input_layer, training=is_training)
conv1 = tf.compat.v1.layers.conv2d(inputs=bn_input, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='1CNN')
bn_conv1 = tf.compat.v1.layers.batch_normalization(conv1, training=is_training)
pool1 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv1, pool_size=[4, 1], strides=[2, 2])
do_pool1 = tf.compat.v1.layers.dropout(pool1, rate=0.25, training=is_training)
conv2 = tf.compat.v1.layers.conv2d(inputs=do_pool1, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='2CNN')
bn_conv2 = tf.compat.v1.layers.batch_normalization(conv2, training=is_training)
pool2 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv2, pool_size=[2, 2], strides=[2, 2])
do_pool2 = tf.compat.v1.layers.dropout(pool2, rate=0.25, training=is_training)
conv3 = tf.compat.v1.layers.conv2d(inputs=do_pool2, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='3CNN')
bn_conv3 = tf.compat.v1.layers.batch_normalization(conv3, training=is_training)
pool3 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv3, pool_size=[2, 2], strides=[2, 2])
do_pool3 = tf.compat.v1.layers.dropout(pool3, rate=0.25, training=is_training)
conv4 = tf.compat.v1.layers.conv2d(inputs=do_pool3, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='4CNN')
bn_conv4 = tf.compat.v1.layers.batch_normalization(conv4, training=is_training)
pool4 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv4, pool_size=[2, 2], strides=[2, 2])
do_pool4 = tf.compat.v1.layers.dropout(pool4, rate=0.25, training=is_training)
conv5 = tf.compat.v1.layers.conv2d(inputs=do_pool4, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.relu, name='5CNN')
bn_conv5 = tf.compat.v1.layers.batch_normalization(conv5, training=is_training)
pool5 = tf.compat.v1.layers.max_pooling2d(inputs=bn_conv5, pool_size=[4, 4], strides=[4, 4])
flat_pool5 = tf.compat.v1.layers.flatten(pool5)
do_pool5 = tf.compat.v1.layers.dropout(flat_pool5, rate=0.5, training=is_training)
output = tf.compat.v1.layers.dense(inputs=do_pool5, activation=None, units=num_classes)
return (output, pool1, pool2, pool3, pool4, pool5)
|
def top_tags(file_name, model='MTT_musicnn', topN=3, input_length=3, input_overlap=False, print_tags=True, save_tags=False):
" Predict the topN tags of the music-clip in file_name with the selected model.\n\n INPUT\n\n - file_name: path to the music file to tag.\n Data format: string.\n Example: './audio/TRWJAZW128F42760DD_test.mp3'\n \n - model: select a music audio tagging model.\n Data format: string.\n Options: 'MTT_musicnn', 'MTT_vgg', 'MSD_musicnn', 'MSD_musicnn_big' or 'MSD_vgg'.\n MTT models are trained with the MagnaTagATune dataset.\n MSD models are trained with the Million Song Dataset.\n To know more about these models, check our musicnn / vgg examples, and the FAQs.\n Important! 'MSD_musicnn_big' is only available if you install from source: python setup.py install.\n\n - topN: extract N most likely tags according to the selected model.\n Data format: integer.\n Example: 3\n \n - input_length: length (in seconds) of the input spectrogram patches. Set it small for real-time applications.\n Note: This is the length of the data that is going to be fed to the model. In other words, this parameter defines the temporal resolution of the taggram.\n Recommended value: 3, because the models were trained with 3 second inputs.\n Observation: the vgg models do not allow for different input lengths. For this reason, the vgg models' input_length needs to be set to 3. However, musicnn models allow for different input lengths: see this jupyter notebook.\n Data format: floating point number.\n Example: 3.1\n \n - input_overlap: ammount of overlap (in seconds) of the input spectrogram patches.\n Note: Set it considering the input_length.\n Data format: floating point number.\n Example: 1.0\n \n - print_tags: set it True for printing the tags.\n Note: although you don't print the tags, these will be returned by the musicnn.tagger.top_tags() function.\n Data format: boolean.\n Options: False (for NOT printing the tags), True (for printing the tags).\n \n - save_tags: Path where to store/save the tags.\n Data format: string.\n Example: 'file_name.tags'\n\n OUTPUT\n \n tags: topN most likely tags of the music-clip in file_name considering the selected model.\n Data format: list.\n Example: ['synth', 'techno']\n "
if (('vgg' in model) and (input_length != 3)):
raise ValueError('Set input_length=3, the VGG models cannot handle different input lengths.')
(taggram, tags) = extractor(file_name, model=model, input_length=input_length, input_overlap=input_overlap, extract_features=False)
tags_likelihood_mean = np.mean(taggram, axis=0)
if print_tags:
print((((('[' + file_name) + '] Top') + str(topN)) + ' tags: '))
if save_tags:
to = open(save_tags, 'a')
to.write(((((((file_name + ',') + model) + ',input_length=') + str(input_length)) + ',input_overlap=') + str(input_overlap)))
topN_tags = []
for tag_index in tags_likelihood_mean.argsort()[(- topN):][::(- 1)]:
topN_tags.append(tags[tag_index])
if print_tags:
print((' - ' + tags[tag_index]))
if save_tags:
to.write((',' + tags[tag_index]))
if save_tags:
to.write('\n')
to.close()
return topN_tags
|
def parse_args():
parser = argparse.ArgumentParser(description='Predict the topN tags of the music-clip in file_name with the selected model')
parser.add_argument('file_name', type=str, help='audio file to process')
parser.add_argument('-mod', '--model', metavar='', type=str, default='MTT_musicnn', help='select the music audio tagging model to employ (python -m musicnn.tagger music.mp3 --model MTT_musicnn)', required=False)
parser.add_argument('-n', '--topN', metavar='', type=int, default=3, help='extract N most likely tags according to the selected model (python -m musicnn.tagger music.mp3 --topN 10)', required=False)
parser.add_argument('-len', '--length', metavar='', type=float, default=3.0, help='length (in seconds) of the input spectrogram patches (python -m musicnn.tagger music.mp3 -len 3.1)', required=False)
parser.add_argument('-ov', '--overlap', metavar='', type=float, default=False, help='ammount of overlap (in seconds) of the input spectrogram patches (python -m musicnn.tagger music.mp3 -ov 1.0)', required=False)
parser.add_argument('-p', '--print', default=False, action='store_true', help='employ --print flag for printing the tags (python -m musicnn.tagger music.mp3 --print)', required=False)
parser.add_argument('-s', '--save', metavar='', type=str, default=False, help='path where to store/save the tags (python -m musicnn.tagger music.mp3 --save out.tags)', required=False)
args = parser.parse_args()
return args
|
class SubSectionTitleOrder():
"Sort example gallery by title of subsection.\n\n Assumes README.txt exists for all subsections and uses the subsection with\n dashes, '---', as the adornment.\n "
def __init__(self, src_dir):
self.src_dir = src_dir
self.regex = re.compile('^([\\w ]+)\\n-', re.MULTILINE)
def __repr__(self):
return f'<{self.__class__.__name__}>'
def __call__(self, directory):
src_path = os.path.normpath(os.path.join(self.src_dir, directory))
if (os.path.basename(src_path) == 'release_highlights'):
return '0'
readme = os.path.join(src_path, 'README.txt')
try:
with open(readme) as f:
content = f.read()
except FileNotFoundError:
return directory
title_match = self.regex.search(content)
if (title_match is not None):
return title_match.group(1)
return directory
|
def gh_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
'Link to a GitHub issue.'
try:
int(text)
except ValueError:
slug = text
else:
slug = ('issues/' + text)
text = ('#' + text)
ref = ('https://github.com/juaml/julearn/' + slug)
set_classes(options)
node = reference(rawtext, text, refuri=ref, **options)
return ([node], [])
|
def setup(app):
app.add_role('gh', gh_role)
return
|
def pearson_scorer(y_true, y_pred):
return scipy.stats.pearsonr(y_true.squeeze(), y_pred.squeeze())[0]
|
def change_column_type(column: str, new_type: str):
'Change the type of a column.\n\n Parameters\n ----------\n column : str\n The column to change the type of.\n new_type : str\n The new type of the column.\n\n Returns\n -------\n str\n The new column name with the type changed.\n '
return '__:type:__'.join((column.split('__:type:__')[0:1] + [new_type]))
|
def get_column_type(column):
'Get the type of a column.\n\n Parameters\n ----------\n column : str\n The column to get the type of.\n\n Returns\n -------\n str\n The type of the column.\n '
return column.split('__:type:__')[1]
|
def get_renamer(X_df):
'Get the dictionary that will rename the columns to add the type.\n\n Parameters\n ----------\n X_df : pd.DataFrame\n The dataframe to rename the columns of.\n\n Returns\n -------\n dict\n The dictionary that will rename the columns.\n\n '
return {x: (x if ('__:type:__' in x) else f'{x}__:type:__continuous') for x in X_df.columns}
|
class make_type_selector():
'Make a type selector.\n\n This type selector is to be used with\n :class:`sklearn.compose.ColumnTransformer`\n\n Parameters\n ----------\n pattern : str\n The pattern to select the columns.\n\n Returns\n -------\n function\n The type selector.\n\n '
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, X_df):
'Select the columns based on the pattern.\n\n Parameters\n ----------\n X_df : pd.DataFrame\n The dataframe to select the columns of.\n\n Returns\n -------\n list\n The list of selected columns.\n\n '
renamer = get_renamer(X_df)
_X_df = X_df.rename(columns=renamer)
reverse_renamer = {new_name: name for (name, new_name) in renamer.items()}
selected_columns = make_column_selector(self.pattern)(_X_df)
if (len(selected_columns) == 0):
raise_error(f'No columns selected with pattern {self.pattern} in {_X_df.columns.to_list()}')
return [(reverse_renamer[col] if (col in reverse_renamer) else col) for col in selected_columns]
|
class ColumnTypes():
'Class to hold types in regards to a pd.DataFrame Column.\n\n Parameters\n ----------\n column_types : ColumnTypes or str or list of str or set of str\n One str representing on type if columns or a list of these.\n Instead of a str you can also provide a ColumnTypes itself.\n '
def __init__(self, column_types: ColumnTypesLike):
if isinstance(column_types, ColumnTypes):
_types = column_types._column_types.copy()
elif isinstance(column_types, str):
_types = {column_types}
elif (not isinstance(column_types, Set)):
_types = set(column_types)
elif isinstance(column_types, Set):
_types = column_types
else:
raise_error(f'Cannot construct a ColumnType from {column_types}')
self._column_types = _types
def add(self, column_types: ColumnTypesLike) -> 'ColumnTypes':
'Add more column_types to the column_types.\n\n Parameters\n ----------\n column_types : ColumnTypes or str or list of str or ColumnTypes\n One str representing on type if columns or a list of these.\n Instead of a str you can also provide a ColumnTypes itself.\n\n\n Returns\n -------\n self: ColumnTypes\n The updates ColumnTypes.\n\n '
if (not isinstance(column_types, ColumnTypes)):
column_types = ColumnTypes(column_types)
self._column_types.update(column_types)
return self
@property
def pattern(self) -> str:
'Get the pattern/regex that matches all the column types.'
return self._to_pattern()
def to_type_selector(self) -> Callable:
'Create a type selector from the ColumnType.\n\n The type selector is usable by\n :class:`sklearn.compose.ColumnTransformer`\n\n\n Returns\n -------\n Callable\n The type selector.\n '
return make_type_selector(self.pattern)
def _to_pattern(self):
'Convert column_types to pattern/regex.\n\n This pattern is usable to make a column_selector.\n\n Returns\n -------\n pattern: str\n The pattern/regex that matches all the column types\n\n '
if (('*' in self._column_types) or ('.*' in self._column_types)):
pattern = '.*'
else:
types_patterns = []
for t_type in self._column_types:
if ('__:type:__' in t_type):
t_pattern = t_type
elif ('target' == t_type):
t_pattern = t_type
else:
t_pattern = f'__:type:__{t_type}'
types_patterns.append(t_pattern)
pattern = f'(?:{types_patterns[0]}'
if (len(types_patterns) > 1):
for t in types_patterns[1:]:
pattern += f'|{t}'
pattern += ')'
return pattern
def __eq__(self, other: Union[('ColumnTypes', str)]):
'Check if the column_types are equal to another column_types.\n\n Parameters\n ----------\n other : ColumnTypes or str\n The other column_types to compare to.\n\n Returns\n -------\n bool\n True if the column_types are equal, False otherwise.\n '
other = (other if isinstance(other, ColumnTypes) else ColumnTypes(other))
return (self._column_types == other._column_types)
def __iter__(self):
'Iterate over the column_types.'
return self._column_types.__iter__()
def __repr__(self):
'Get the representation of the ColumnTypes.'
return f'ColumnTypes<types={self._column_types}; pattern={self.pattern}>'
def copy(self) -> 'ColumnTypes':
'Get a copy of the ColumnTypes.\n\n Returns\n -------\n ColumnTypes\n The copy of the ColumnTypes.\n '
return ColumnTypes(self)
|
def ensure_column_types(attr: ColumnTypesLike) -> ColumnTypes:
'Ensure that the attribute is a ColumnTypes.\n\n Parameters\n ----------\n attr : ColumnTypes or str\n The attribute to check.\n\n Returns\n -------\n ColumnTypes\n The attribute as a ColumnTypes.\n '
return (ColumnTypes(attr) if (not isinstance(attr, ColumnTypes)) else attr)
|
def set_config(key: str, value: Any) -> None:
'Set a global config value.\n\n Parameters\n ----------\n key : str\n The key to set.\n value : Any\n The value to set.\n '
if (key not in _global_config):
raise_error(f'Global config {key} does not exist')
logger.info(f'Setting global config {key} to {value}')
_global_config[key] = value
|
def get_config(key: str) -> Any:
'Get a global config value.\n\n Parameters\n ----------\n key : str\n The key to get.\n\n Returns\n -------\n Any\n The value of the key.\n '
return _global_config.get(key, None)
|
class PipelineInspector():
def __init__(self, model):
check_is_fitted(model)
self._model = model
def get_step_names(self):
return list(self._model.named_steps.keys())
def get_step(self, name, as_estimator=False):
step = self._model.named_steps[name]
if (not as_estimator):
step = _EstimatorInspector(step)
return step
def get_params(self):
if hasattr(self._model, 'best_estimator_'):
self._model.best_estimator_.get_params()
return self._model.get_params()
def get_fitted_params(self):
fitted_params = {}
model = (self._model.best_estimator_ if hasattr(self._model, 'best_estimator_') else self._model)
for (name, step) in model.steps:
params = _EstimatorInspector(step).get_fitted_params()
fitted_params = {**fitted_params, **{f'{name}__{param}': val for (param, val) in params.items()}}
return fitted_params
|
class _EstimatorInspector():
def __init__(self, estimator):
self._estimator = estimator
def get_params(self):
return self._estimator.get_params()
def get_fitted_params(self):
all_params = vars(self._estimator)
if isinstance(self._estimator, JuColumnTransformer):
all_params = {**all_params, **vars(self._estimator.column_transformer_.transformers_[0][1])}
return {param: val for (param, val) in all_params.items() if re.match('^[a-zA-Z].*[a-zA-Z0-9]*_$', param)}
@property
def estimator(self):
return self._estimator
|
def preprocess(pipeline: Pipeline, X: List[str], data: pd.DataFrame, until: Optional[str]=None, with_column_types: bool=False) -> pd.DataFrame:
'Preprocess data with a pipeline until a certain step (inclusive).\n\n Parameters\n ----------\n pipeline : Pipeline\n The pipeline to use.\n X : list of str\n The features to use.\n data : pd.DataFrame\n The data to preprocess.\n until : str, optional\n The name of the step to preprocess until (inclusive). If None, will\n preprocess all steps (default is None).\n with_column_types : bool, optional\n Whether to include the column types in the output (default is False).\n\n Returns\n -------\n pd.DataFrame\n The preprocessed data.\n '
_X = data[X]
if (until is None):
i = (- 1)
else:
i = 1
for (name, _) in pipeline.steps[:(- 1)]:
if (name == until):
break
i += 1
else:
raise_error(f'No step named {until} found.')
df_out = pipeline[:i].transform(_X)
if ((not isinstance(df_out, pd.DataFrame)) and (with_column_types is False)):
raise_error('The output of the pipeline is not a DataFrame. Cannot remove column types.')
if (not with_column_types):
rename_dict = {col: col.split('__:type:__')[0] for col in df_out.columns}
df_out.rename(columns=rename_dict, inplace=True)
return df_out
|
class Inspector():
'Base class for inspector.\n\n Parameters\n ----------\n scores : pd.DataFrame\n The scores as dataframe.\n model : str, optional\n The model to inspect (default None).\n X : list of str, optional\n The features as list (default None).\n y : str, optional\n The target (default None).\n groups : str, optional\n The grouping labels in case a group CV is used (default None).\n cv : int, optional\n The number of folds for cross-validation (default None).\n\n '
def __init__(self, scores: 'pd.DataFrame', model: Union[(str, 'PipelineCreator', List['PipelineCreator'], 'BaseEstimator', None)]=None, X: Optional[List[str]]=None, y: Optional[str]=None, groups: Optional[str]=None, cv: Optional[int]=None) -> None:
self._scores = scores
self._model = model
self._X = X
self._y = y
self._groups = groups
self._cv = cv
@property
def model(self) -> PipelineInspector:
'Return the model.\n\n Returns\n -------\n PipelineInspector\n A PipelineInspector instance with model set.\n\n Raises\n ------\n ValueError\n If no ``model`` is provided.\n\n '
if (self._model is None):
raise_error('No model was provided. Cannot inspect the model.')
return PipelineInspector(model=self._model)
@property
def folds(self) -> FoldsInspector:
'Return the folds.\n\n Returns\n -------\n FoldsInspector\n A FoldsInspector instance with parameters set.\n\n Raises\n ------\n ValueError\n If no ``cv``, ``X`` or ``y`` is provided.\n\n '
if (self._cv is None):
raise_error('No cv was provided. Cannot inspect the folds.')
if (self._X is None):
raise_error('No X was provided. Cannot inspect the folds.')
if (self._y is None):
raise_error('No y was provided. Cannot inspect the folds.')
return FoldsInspector(scores=self._scores, X=self._X, y=self._y, groups=self._groups, cv=self._cv)
|
def list_searchers() -> List[str]:
'List all available searching algorithms.\n\n Returns\n -------\n out : list(str)\n A list of all available searcher names.\n '
return list(_available_searchers)
|
def get_searcher(name: str) -> object:
'Get a searcher by name.\n\n Parameters\n ----------\n name : str\n The searchers name.\n\n Returns\n -------\n obj\n scikit-learn compatible searcher.\n\n Raises\n ------\n ValueError\n If the specified searcher is not available.\n '
if (name not in _available_searchers):
raise_error(f'The specified searcher ({name}) is not available. Valid options are: {list(_available_searchers.keys())}')
out = _available_searchers[name]
return out
|
def register_searcher(searcher_name: str, searcher: object, overwrite: Optional[bool]=None) -> None:
'Register searcher to julearn.\n\n This function allows you to add a scikit-learn compatible searching\n algorithm to julearn. After, you can call it as all other searchers in\n julearn.\n\n Parameters\n ----------\n searcher_name : str\n Name by which the searcher will be referenced by.\n searcher : obj\n The searcher class by which the searcher can be initialized.\n overwrite : bool | None, optional\n decides whether overwrite should be allowed, by default None.\n Options are:\n\n * None : overwrite is possible, but warns the user\n * True : overwrite is possible without any warns\n * False : overwrite is not possible, error is raised instead\n\n Raises\n ------\n ValueError\n If the specified searcher is already available and overwrite is set to\n False.\n '
if (searcher_name in list_searchers()):
if (overwrite is None):
warn_with_log(f'searcher named {searcher_name} already exists. Therefore, {searcher_name} will be overwritten. To remove this warn_with_loging set `overwrite=True`. ')
elif (overwrite is False):
raise_error(f'searcher named {searcher_name} already exists and overwrite is set to False, therefore you cannot overwrite existing searchers. Set `overwrite=True` in case you want to overwrite existing searchers.')
logger.info(f'Registering new searcher: {searcher_name}')
_available_searchers[searcher_name] = searcher
|
def reset_searcher_register() -> None:
'Reset the searcher register to its initial state.'
global _available_searchers
_available_searchers = deepcopy(_available_searchers_reset)
|
def _discretize_y(method: str, y: np.ndarray, n_bins: int) -> np.ndarray:
discrete_y = None
if (method == 'binning'):
bins = np.histogram_bin_edges(y, bins=n_bins)
elif (method == 'quantile'):
bins = np.quantile(y, np.linspace(0, 1, (n_bins + 1)))
else:
raise_error(f'Unknown y discreatization method {method}. ', ValueError)
discrete_y = np.digitize(y, bins=bins[:(- 1)])
return discrete_y
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.