code stringlengths 17 6.64M |
|---|
def xavier_uniform_init(module, gain=1.0):
if (isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d)):
nn.init.xavier_uniform_(module.weight.data, gain)
nn.init.constant_(module.bias.data, 0)
return module
|
def adjust_lr(optimizer, init_lr, timesteps, max_timesteps):
lr = (init_lr * (1 - (timesteps / max_timesteps)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
|
def get_n_params(model):
return (str(np.round((np.array([p.numel() for p in model.parameters()]).sum() / 1000000.0), 3)) + ' M params')
|
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), (- 1))
|
class MlpModel(nn.Module):
def __init__(self, input_dims=4, hidden_dims=[64, 64], **kwargs):
'\n input_dim: (int) number of the input dimensions\n hidden_dims: (list) list of the dimensions for the hidden layers\n use_batchnorm: (bool) whether to use batchnorm\n '
super(MlpModel, self).__init__()
hidden_dims = ([input_dims] + hidden_dims)
layers = []
for i in range((len(hidden_dims) - 1)):
in_features = hidden_dims[i]
out_features = hidden_dims[(i + 1)]
layers.append(nn.Linear(in_features, out_features))
layers.append(nn.ReLU())
self.layers = nn.Sequential(*layers)
self.output_dim = hidden_dims[(- 1)]
self.apply(orthogonal_init)
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
|
class NatureModel(nn.Module):
def __init__(self, in_channels, **kwargs):
'\n input_shape: (tuple) tuple of the input dimension shape (channel, height, width)\n filters: (list) list of the tuples consists of (number of channels, kernel size, and strides)\n use_batchnorm: (bool) whether to use batchnorm\n '
super(NatureModel, self).__init__()
self.layers = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=8, stride=4), nn.ReLU(), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1), nn.ReLU(), Flatten(), nn.Linear(in_features=((64 * 7) * 7), out_features=512), nn.ReLU())
self.output_dim = 512
self.apply(orthogonal_init)
def forward(self, x):
x = self.layers(x)
return x
|
class ResidualBlock(nn.Module):
def __init__(self, in_channels):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x):
out = nn.ReLU()(x)
out = self.conv1(out)
out = nn.ReLU()(out)
out = self.conv2(out)
return (out + x)
|
class ImpalaBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ImpalaBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.res1 = ResidualBlock(out_channels)
self.res2 = ResidualBlock(out_channels)
def forward(self, x):
x = self.conv(x)
x = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)(x)
x = self.res1(x)
x = self.res2(x)
return x
|
class ImpalaModel(nn.Module):
def __init__(self, in_channels, **kwargs):
super(ImpalaModel, self).__init__()
self.block1 = ImpalaBlock(in_channels=in_channels, out_channels=16)
self.block2 = ImpalaBlock(in_channels=16, out_channels=32)
self.block3 = ImpalaBlock(in_channels=32, out_channels=32)
self.fc = nn.Linear(in_features=((32 * 8) * 8), out_features=256)
self.output_dim = 256
self.apply(xavier_uniform_init)
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = nn.ReLU()(x)
x = Flatten()(x)
x = self.fc(x)
x = nn.ReLU()(x)
return x
|
class GRU(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRU, self).__init__()
self.gru = orthogonal_init(nn.GRU(input_size, hidden_size), gain=1.0)
def forward(self, x, hxs, masks):
if (x.size(0) == hxs.size(0)):
masks = masks.unsqueeze((- 1))
(x, hxs) = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))
x = x.squeeze(0)
hxs = hxs.squeeze(0)
else:
N = hxs.size(0)
T = int((x.size(0) / N))
x = x.view(T, N, x.size(1))
masks = masks.view(T, N)
has_zeros = (masks[1:] == 0.0).any(dim=(- 1)).nonzero().squeeze().cpu()
if (has_zeros.dim() == 0):
has_zeros = [(has_zeros.item() + 1)]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
has_zeros = (([0] + has_zeros) + [T])
hxs = hxs.unsqueeze(0)
outputs = []
for i in range((len(has_zeros) - 1)):
start_idx = has_zeros[i]
end_idx = has_zeros[(i + 1)]
(rnn_scores, hxs) = self.gru(x[start_idx:end_idx], (hxs * masks[start_idx].view(1, (- 1), 1)))
outputs.append(rnn_scores)
x = torch.cat(outputs, dim=0)
x = x.view((T * N), (- 1))
hxs = hxs.squeeze(0)
return (x, hxs)
|
class CategoricalPolicy(nn.Module):
def __init__(self, embedder, recurrent, action_size):
'\n embedder: (torch.Tensor) model to extract the embedding for observation\n action_size: number of the categorical actions\n '
super(CategoricalPolicy, self).__init__()
self.embedder = embedder
self.fc_policy = orthogonal_init(nn.Linear(self.embedder.output_dim, action_size), gain=0.01)
self.fc_value = orthogonal_init(nn.Linear(self.embedder.output_dim, 1), gain=1.0)
self.recurrent = recurrent
if self.recurrent:
self.gru = GRU(self.embedder.output_dim, self.embedder.output_dim)
def is_recurrent(self):
return self.recurrent
def forward(self, x, hx, masks):
hidden = self.embedder(x)
if self.recurrent:
(hidden, hx) = self.gru(hidden, hx, masks)
logits = self.fc_policy(hidden)
log_probs = F.log_softmax(logits, dim=1)
p = Categorical(logits=log_probs)
v = self.fc_value(hidden).reshape((- 1))
return (p, v, hx)
|
def load_model(args):
if (args.model == 'clip_vis'):
model = CLIP_Visual(classes=classes, device=device, inet=(args.dataset == 'imagenet')).to(device)
elif (args.model == 'clip_zero'):
model = CLIP_Zero_Shot(classes=classes, prompt=prompt, device=device).to(device)
else:
raise ValueError(f'model = {args.model}, is not supported at the moment')
if (args.model != 'clip_zero'):
model.load_state_dict(torch.load(os.path.join(save_dir, args.dataset, args.exp_name, f'epoch_{args.epoch}.pth')))
else:
os.makedirs(os.path.join(save_dir, args.dataset, args.exp_name), exist_ok=True)
model.eval()
return model
|
def predict(image):
global model, zero_shot_model, preprocess, device
image = Image.fromarray(image.astype('uint8'), 'RGB')
input_tensor = preprocess(image)
input_batch = input_tensor.unsqueeze(0)
input_batch = input_batch.to(device)
model = model.to(device)
zero_shot_model = zero_shot_model.to(device)
with torch.no_grad():
clippr_pred = int(np.round(model(input_batch)[0].item()))
clip_pred = zero_shot_model(input_batch).argmax(dim=1, keepdim=True)[0].item()
return (clippr_pred, clip_pred)
|
def sample_assumed_distribution(dist_parameters, num_samples):
dist_type = dist_parameters['dist_type']
if (dist_type == 'gaussian'):
distribution = torch.distributions.Normal(loc=dist_parameters['mean'], scale=dist_parameters['std'])
sample = distribution.sample([num_samples])
sample = torch.clip(sample, min=dist_parameters['min'], max=dist_parameters['max'])
return sample
elif (dist_type == 'costum'):
sample = np.random.choice(dist_parameters['example'], size=num_samples, replace=True)
return torch.tensor(sample)
else:
raise ValueError(f'No such supported assumed distribution type as {dist_type}')
|
class DictX(dict):
'\n Taken From https://dev.to/0xbf/use-dot-syntax-to-access-dictionary-key-python-tips-10ec\n '
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
def __repr__(self):
return (('<DictX ' + dict.__repr__(self)) + '>')
|
def save_experiment_hyper_params(args, exp_dir, verbose=True):
with open(join(exp_dir, f'args.txt'), 'w+') as f:
f.write('\n\n\n')
f.write('Experiment Args:\n\n')
for k in args:
f.write(f''' {k}: {args[k]}
''')
f.write('\n\n\n')
if verbose:
with open(join(exp_dir, f'args.txt'), 'r') as f:
for line in f:
print(line)
return
|
def verify_token(headers, path):
token = headers.get('authorization', '')[7:]
if (os.environ['SYSTEM_TOKEN'] == token):
return True
elif ((not path.startswith('/upload_video')) and (os.environ['USER_TOKEN'] == token)):
return True
else:
return False
|
@app.get('/jobid/{task_id}')
def check_job(task_id: str) -> str:
res = celery_workers.AsyncResult(task_id)
if (res.state == states.PENDING):
reserved_tasks = celery_workers.control.inspect().reserved()
tasks = []
if reserved_tasks:
tasks_per_worker = reserved_tasks.values()
tasks = [item for sublist in tasks_per_worker for item in sublist]
found = False
for task in tasks:
if (task['id'] == task_id):
found = True
result = {'jobs_in_queue': len(tasks)}
elif (res.state == states.FAILURE):
result = str(res.result)
else:
result = res.result
return {'state': res.state, 'result': result}
|
def fix_obj(parent_obj):
for obj in parent_obj.children:
fix_obj(obj)
parent_obj.rotation_euler.x = 0
if (parent_obj.name in ['pCube0', 'pCube1', 'pCube2']):
parent_obj.location.y = (- 13)
if (parent_obj.name == 'pCube3'):
parent_obj.location.y = (- 10)
if (parent_obj.name == 'pCube5'):
parent_obj.location.y = (- 9.5)
if ('materials' in dir(parent_obj.data)):
if parent_obj.data.materials:
parent_obj.data.materials[0] = mat
else:
parent_obj.data.materials.append(mat)
|
class TaskFailure(Exception):
pass
|
def validate_bvh_file(bvh_file):
MAX_NUMBER_FRAMES = int(os.environ['MAX_NUMBER_FRAMES'])
FRAME_TIME = (1.0 / float(os.environ['RENDER_FPS']))
file_content = bvh_file.decode('utf-8')
mocap = Bvh(file_content)
counter = None
for line in file_content.split('\n'):
if ((counter is not None) and line.strip()):
counter += 1
if (line.strip() == 'MOTION'):
counter = (- 2)
if (mocap.nframes != counter):
raise TaskFailure(f'The number of rows with motion data ({counter}) does not match the Frames field ({mocap.nframes})')
if ((MAX_NUMBER_FRAMES != (- 1)) and (mocap.nframes > MAX_NUMBER_FRAMES)):
raise TaskFailure(f'The supplied number of frames ({mocap.nframes}) is bigger than {MAX_NUMBER_FRAMES}')
if (mocap.frame_time != FRAME_TIME):
raise TaskFailure(f'The supplied frame time ({mocap.frame_time}) differs from the required {FRAME_TIME}')
|
@celery.task(name='tasks.render', bind=True, hard_time_limit=WORKER_TIMEOUT)
def render(self, bvh_file_uri: str) -> str:
HEADERS = {'Authorization': (f'Bearer ' + os.environ['SYSTEM_TOKEN'])}
API_SERVER = os.environ['API_SERVER']
logger.info('rendering..')
self.update_state(state='PROCESSING')
bvh_file = requests.get((API_SERVER + bvh_file_uri), headers=HEADERS).content
validate_bvh_file(bvh_file)
with tempfile.NamedTemporaryFile(suffix='.bhv') as tmpf:
tmpf.write(bvh_file)
tmpf.seek(0)
process = subprocess.Popen(['/blender/blender-2.83.0-linux64/blender', '-noaudio', '-b', '--python', 'blender_render.py', '--', tmpf.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
total = None
current_frame = None
for line in process.stdout:
line = line.decode('utf-8').strip()
if line.startswith('total_frames '):
(_, total) = line.split(' ')
total = int(float(total))
elif line.startswith('Append frame '):
(*_, current_frame) = line.split(' ')
current_frame = int(current_frame)
elif line.startswith('output_file'):
(_, file_name) = line.split(' ')
files = {'file': (os.path.basename(file_name), open(file_name, 'rb'))}
return requests.post((API_SERVER + '/upload_video'), files=files, headers=HEADERS).text
if (total and current_frame):
self.update_state(state='RENDERING', meta={'current': current_frame, 'total': total})
if (process.returncode != 0):
raise TaskFailure(process.stderr.read().decode('utf-8'))
|
class BlobProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BLOBPROTO
|
class BlobProtoVector(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BLOBPROTOVECTOR
|
class Datum(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DATUM
|
class FillerParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FILLERPARAMETER
|
class NetParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _NETPARAMETER
|
class SolverParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SOLVERPARAMETER
|
class SolverState(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SOLVERSTATE
|
class NetState(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _NETSTATE
|
class NetStateRule(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _NETSTATERULE
|
class LayerParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LAYERPARAMETER
|
class TransformationParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRANSFORMATIONPARAMETER
|
class AccuracyParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ACCURACYPARAMETER
|
class ArgMaxParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ARGMAXPARAMETER
|
class ConcatParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CONCATPARAMETER
|
class ContrastiveLossParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CONTRASTIVELOSSPARAMETER
|
class ConvolutionParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CONVOLUTIONPARAMETER
|
class DataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DATAPARAMETER
|
class DropoutParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DROPOUTPARAMETER
|
class DummyDataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DUMMYDATAPARAMETER
|
class EltwiseParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ELTWISEPARAMETER
|
class ThresholdParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _THRESHOLDPARAMETER
|
class HDF5DataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _HDF5DATAPARAMETER
|
class HDF5OutputParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _HDF5OUTPUTPARAMETER
|
class HingeLossParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _HINGELOSSPARAMETER
|
class ImageDataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _IMAGEDATAPARAMETER
|
class InfogainLossParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _INFOGAINLOSSPARAMETER
|
class InnerProductParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _INNERPRODUCTPARAMETER
|
class LRNParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LRNPARAMETER
|
class MemoryDataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MEMORYDATAPARAMETER
|
class MVNParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MVNPARAMETER
|
class PoolingParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _POOLINGPARAMETER
|
class PowerParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _POWERPARAMETER
|
class ReLUParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _RELUPARAMETER
|
class SigmoidParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SIGMOIDPARAMETER
|
class SliceParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SLICEPARAMETER
|
class SoftmaxParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SOFTMAXPARAMETER
|
class TanHParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TANHPARAMETER
|
class WindowDataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _WINDOWDATAPARAMETER
|
class V0LayerParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _V0LAYERPARAMETER
|
class BlobShape(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BLOBSHAPE
|
class BlobProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BLOBPROTO
|
class BlobProtoVector(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BLOBPROTOVECTOR
|
class Datum(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DATUM
|
class FillerParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FILLERPARAMETER
|
class NetParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _NETPARAMETER
|
class SolverParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SOLVERPARAMETER
|
class SolverState(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SOLVERSTATE
|
class NetState(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _NETSTATE
|
class NetStateRule(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _NETSTATERULE
|
class ParamSpec(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PARAMSPEC
|
class LayerParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LAYERPARAMETER
|
class TransformationParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TRANSFORMATIONPARAMETER
|
class LossParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOSSPARAMETER
|
class AccuracyParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ACCURACYPARAMETER
|
class ArgMaxParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ARGMAXPARAMETER
|
class ConcatParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CONCATPARAMETER
|
class ContrastiveLossParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CONTRASTIVELOSSPARAMETER
|
class ConvolutionParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CONVOLUTIONPARAMETER
|
class DataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DATAPARAMETER
|
class DropoutParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DROPOUTPARAMETER
|
class DummyDataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DUMMYDATAPARAMETER
|
class EltwiseParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ELTWISEPARAMETER
|
class EmbedParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _EMBEDPARAMETER
|
class ExpParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _EXPPARAMETER
|
class FlattenParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FLATTENPARAMETER
|
class HDF5DataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _HDF5DATAPARAMETER
|
class HDF5OutputParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _HDF5OUTPUTPARAMETER
|
class HingeLossParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _HINGELOSSPARAMETER
|
class ImageDataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _IMAGEDATAPARAMETER
|
class InfogainLossParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _INFOGAINLOSSPARAMETER
|
class InnerProductParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _INNERPRODUCTPARAMETER
|
class LogParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOGPARAMETER
|
class LRNParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LRNPARAMETER
|
class MemoryDataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MEMORYDATAPARAMETER
|
class MVNParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MVNPARAMETER
|
class PoolingParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _POOLINGPARAMETER
|
class PowerParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _POWERPARAMETER
|
class PythonParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PYTHONPARAMETER
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.