code stringlengths 101 5.91M |
|---|
def simple_while(A: dace.int32[10]):
i = 0
while (i < 10):
A[i] += (2 * i)
i += 1 |
class GrailEntityDisambFeature():
def __init__(self, pid, input_ids, token_type_ids, target_idx):
self.pid = pid
self.candidate_input_ids = input_ids
self.candidate_token_type_ids = token_type_ids
self.target_idx = target_idx |
def save_summary(epoch: int, global_step: int, accuracies: List[utils.AverageMeter], duration: timedelta, tracking_file: str, mode: str, top=(1,)):
result: Dict[(str, Any)] = OrderedDict()
result['timestamp'] = datetime.now()
result['mode'] = mode
result['epoch'] = epoch
result['global_step'] = global_step
result['duration'] = duration
for (k, acc) in zip(top, accuracies):
result[f'top{k}_accuracy'] = acc.avg
utils.save_result(result, tracking_file) |
class ROCExplanation(ExplanationBase):
def __init__(self):
super().__init__()
self.explanations = {}
def add(self, fpr: Dict, tpr: Dict, auc: Dict):
self.explanations = {'fpr': fpr, 'tpr': tpr, 'auc': auc}
def get_explanations(self):
return self.explanations
def plot(self, class_names=None, linewidth=2, **kwargs):
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
fpr = self.explanations['fpr']
tpr = self.explanations['tpr']
auc = self.explanations['auc']
colors = list(mcolors.TABLEAU_COLORS.values())
fig = plt.figure()
plt.plot(fpr['micro'], tpr['micro'], label='Micro-average ROC curve (area = {:0.2f})'.format(auc['micro']), color='deeppink', linestyle=':', linewidth=linewidth)
plt.plot(fpr['macro'], tpr['macro'], label='Macro-average ROC curve (area = {:0.2f})'.format(auc['macro']), color='navy', linestyle=':', linewidth=linewidth)
for i in range((len(fpr) - 2)):
label = (class_names[i] if (class_names is not None) else i)
plt.plot(fpr[i], tpr[i], color=colors[(i % len(colors))], linewidth=linewidth, label='ROC curve of class {} (area = {:0.2f})'.format(label, auc[i]))
plt.plot([0, 1], [0, 1], 'k--', linewidth=linewidth)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves')
plt.legend(loc='lower right')
plt.grid()
return fig
def _plotly_figure(self, class_names=None, linewidth=2, **kwargs):
import plotly.graph_objects as go
fpr = self.explanations['fpr']
tpr = self.explanations['tpr']
auc = self.explanations['auc']
fig = go.Figure()
fig.add_trace(go.Scatter(x=fpr['micro'], y=tpr['micro'], name='Micro-average ROC curve (area = {:0.2f})'.format(auc['micro']), line=dict(width=linewidth)))
fig.add_trace(go.Scatter(x=fpr['macro'], y=tpr['macro'], name='Macro-average ROC curve (area = {:0.2f})'.format(auc['macro']), line=dict(width=linewidth)))
for i in range((len(fpr) - 2)):
label = (class_names[i] if (class_names is not None) else i)
fig.add_trace(go.Scatter(x=fpr[i], y=tpr[i], name='ROC curve of class {} (area = {:0.2f})'.format(label, auc[i]), line=dict(width=linewidth)))
fig.add_trace(go.Scatter(x=[0, 1], y=[0, 1], line=dict(color='black', dash='dash', width=linewidth), name='Baseline'))
fig.update_layout(xaxis_title='False Positive Rate', yaxis_title='True Positive Rate', title={'text': 'ROC Curves'})
return fig
def plotly_plot(self, class_names=None, **kwargs):
return DashFigure(self._plotly_figure(class_names, **kwargs))
def ipython_plot(self, class_names=None, **kwargs):
import plotly
plotly.offline.iplot(self._plotly_figure(class_names, **kwargs))
def from_dict(cls, d):
e = d['explanations']
for metric in ['fpr', 'tpr', 'auc']:
r = {}
for (key, value) in e[metric].items():
try:
key = int(key)
except:
pass
r[key] = value
e[metric] = r
exp = ROCExplanation()
exp.explanations = e
return exp |
def numpy_to_hls_code(ndarray, dtype, hls_var_name, pack_innermost_dim=True, no_decl=False):
hls_dtype = dtype.get_hls_datatype_str()
if ((type(ndarray) != np.ndarray) or (ndarray.dtype != np.float32)):
ndarray = np.asarray(ndarray, dtype=np.float32)
if pack_innermost_dim:
idimlen = ndarray.shape[(- 1)]
idimbits = (idimlen * dtype.bitwidth())
idimbits = roundup_to_integer_multiple(idimbits, 4)
ndarray = pack_innermost_dim_as_hex_string(ndarray, dtype, idimbits)
hls_dtype = ('ap_uint<%d>' % idimbits)
ndims = ndarray.ndim
ret = ('%s %s' % (hls_dtype, hls_var_name))
for d in range(ndims):
ret += ('[%d]' % ndarray.shape[d])
orig_printops = np.get_printoptions()
np.set_printoptions(threshold=sys.maxsize)
def elem2str(x):
if ((type(x) == str) or (type(x) == np.str_) or (type(x) == np.str)):
return ('%s("%s", 16)' % (hls_dtype, x))
elif (type(x) == np.float32):
if dtype.is_integer():
return str(int(x))
else:
return str(x)
else:
raise Exception('Unsupported type for numpy_to_hls_code')
strarr = np.array2string(ndarray, separator=', ', formatter={'all': elem2str})
np.set_printoptions(**orig_printops)
strarr = strarr.replace('[', '{').replace(']', '}')
if no_decl:
ret = (strarr + ';')
else:
ret = (((ret + ' = \n') + strarr) + ';')
return ret |
def plot_avg_clustering(G_times, fname):
max_time = len(G_times)
t = list(range(0, max_time))
avg_clustering = []
for G in G_times:
avg_clustering.append(nx.average_clustering(G))
plt.rcParams.update({'figure.autolayout': True})
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(figsize=(4, 2))
ax = fig.add_subplot(1, 1, 1)
ax.plot(t, avg_clustering, marker='o', color='#78f542', ls='solid', linewidth=0.5, markersize=1)
ax.set_xlabel('time', fontsize=8)
outliers = find_rarity_windowed_outlier(avg_clustering, percent_ranked=0.05, window=5, initial_period=10)
outliers.sort()
for xc in outliers:
plt.axvline(x=xc, color='k', linestyle=':', linewidth=0.5)
ax.set_ylabel('average clustering coefficient', fontsize=8)
plt.title('plotting temporal average clustering coefficient ', fontsize='x-small')
plt.savefig((fname + 'clustering.pdf'), pad_inches=0)
return outliers |
class FiniteWordPath_all_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_all, FiniteWord_class):
pass |
def BetsyRoss():
E = 'abcdefghijk'
CC = {2: ['acfg', 'bdgh', 'cehi', 'befj', 'adij', 'dfk', 'egk', 'ahk', 'bik', 'cjk'], 3: [E]}
M = CircuitClosuresMatroid(groundset=E, circuit_closures=CC)
M.rename(('BetsyRoss: ' + repr(M)))
return M |
def test_Detector_init():
(detector, parent, tl) = create_detector(dark_count=10)
tl.init()
assert (len(tl.events) == 2) |
def Q_calc(TP, TN, FP, FN):
try:
OR = ((TP * TN) / (FP * FN))
result = ((OR - 1) / (OR + 1))
return result
except (ZeroDivisionError, TypeError):
return 'None' |
class DateTimeField(fields.DateTimeField):
def __init__(self, *args, **kwargs):
if (not has_timezone):
raise ImportError('DateTimeField requires Django >= 1.5')
super(DateTimeField, self).__init__(*args, **kwargs)
def process_formdata(self, valuelist):
super(DateTimeField, self).process_formdata(valuelist)
date = self.data
if (settings.USE_TZ and (date is not None) and timezone.is_naive(date)):
current_timezone = timezone.get_current_timezone()
self.data = timezone.make_aware(date, current_timezone)
def _value(self):
date = self.data
if (settings.USE_TZ and isinstance(date, datetime.datetime) and timezone.is_aware(date)):
self.data = timezone.localtime(date)
return super(DateTimeField, self)._value() |
def get_params(argv='1'):
print('SET: {}'.format(argv))
params = dict(quick_test=True, finetune_mode=False, pretrained_model_weights='models/1_1_foa_dev_split6_model.h5', dataset_dir='/scratch/asignal/partha/DCASE2022_SELD_dataset', feat_label_dir='/scratch/asignal/partha/DCASE2022_SELD_dataset/seld_feat_label', model_dir='models/', dcase_output_dir='results/', mode='dev', dataset='foa', fs=24000, hop_len_s=0.02, label_hop_len_s=0.1, max_audio_len_s=60, nb_mel_bins=64, use_salsalite=False, fmin_doa_salsalite=50, fmax_doa_salsalite=2000, fmax_spectra_salsalite=9000, multi_accdoa=False, thresh_unify=15, label_sequence_length=50, batch_size=128, dropout_rate=0.05, nb_cnn2d_filt=64, f_pool_size=[4, 4, 2], nb_rnn_layers=2, rnn_size=128, self_attn=False, nb_heads=4, nb_fnn_layers=1, fnn_size=128, nb_epochs=100, lr=0.001, average='macro', lad_doa_thresh=20)
if (argv == '1'):
print('USING DEFAULT PARAMETERS\n')
elif (argv == '2'):
print('FOA + ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'foa'
params['multi_accdoa'] = False
elif (argv == '3'):
print('FOA + multi ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'foa'
params['multi_accdoa'] = True
elif (argv == '4'):
print('MIC + GCC + ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = False
params['multi_accdoa'] = False
elif (argv == '5'):
print('MIC + SALSA + ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = True
params['multi_accdoa'] = False
elif (argv == '6'):
print('MIC + GCC + multi ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = False
params['multi_accdoa'] = True
elif (argv == '7'):
print('MIC + SALSA + multi ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = True
params['multi_accdoa'] = True
elif (argv == '999'):
print('QUICK TEST MODE\n')
params['quick_test'] = True
else:
print('ERROR: unknown argument {}'.format(argv))
exit()
feature_label_resolution = int((params['label_hop_len_s'] // params['hop_len_s']))
params['feature_sequence_length'] = (params['label_sequence_length'] * feature_label_resolution)
params['t_pool_size'] = [feature_label_resolution, 1, 1]
params['patience'] = int(params['nb_epochs'])
if ('2020' in params['dataset_dir']):
params['unique_classes'] = 14
elif ('2021' in params['dataset_dir']):
params['unique_classes'] = 12
elif ('2022' in params['dataset_dir']):
params['unique_classes'] = 13
for (key, value) in params.items():
print('\t{}: {}'.format(key, value))
return params |
class Decoder(Network):
def __init__(self, output_width, output_height, output_depth, stride=2, kernel=5, final_dim=64, scope_name='decoder', *args, **kwargs):
super(Decoder, self).__init__(*args, scope_name=scope_name, **kwargs)
self.output_width = output_width
self.output_height = output_height
self.output_depth = output_depth
self.stride = stride
self.kernel = kernel
self.final_dim = final_dim
def build(self, z, train):
with tf.variable_scope(self.scope_name, reuse=tf.AUTO_REUSE):
batch_size = tf.shape(z)[0]
layers = [z]
with tf.variable_scope('layer0'):
layers.append(linear(layers[(- 1)], 1024))
layers.append(batch_norm()(layers[(- 1)], train=train))
layers.append(tf.nn.relu(layers[(- 1)]))
with tf.variable_scope('layer1'):
layers.append(linear(layers[(- 1)], (((self.final_dim * 4) * 4) * 4)))
layers.append(batch_norm()(layers[(- 1)], train=train))
layers.append(tf.nn.relu(layers[(- 1)]))
layers.append(tf.reshape(layers[(- 1)], ((- 1), 4, 4, (self.final_dim * 4))))
with tf.variable_scope('layer2'):
layers.append(deconv2d(layers[(- 1)], [batch_size, 8, 8, (self.final_dim * 2)], d_h=self.stride, d_w=self.stride, k_h=self.kernel, k_w=self.kernel))
layers.append(batch_norm()(layers[(- 1)], train=train))
layers.append(tf.nn.relu(layers[(- 1)]))
with tf.variable_scope('layer3'):
layers.append(deconv2d(layers[(- 1)], [batch_size, 16, 16, self.final_dim], d_h=self.stride, d_w=self.stride, k_h=self.kernel, k_w=self.kernel))
layers.append(batch_norm()(layers[(- 1)], train=train))
layers.append(tf.nn.relu(layers[(- 1)]))
with tf.variable_scope('layer4'):
layers.append(deconv2d(layers[(- 1)], [batch_size, self.output_height, self.output_width, self.output_depth], d_h=self.stride, d_w=self.stride, k_h=self.kernel, k_w=self.kernel))
layers.append(tf.nn.tanh(layers[(- 1)]))
return (layers[(- 1)], layers) |
def _launch_worker(exp_key, worker_id, host, port, result_db_name):
command = 'hyperopt-mongo-worker --mongo={h}:{p}/{db} --poll-interval=10 --exp-key={key} > hyperopt_worker{id}.log 2>&1'
command = command.format(h=host, p=port, db=result_db_name, key=exp_key, id=worker_id)
fail = os.system(command)
if fail:
raise RuntimeError('Problem starting hyperopt-mongo-worker.') |
def arg_str2bool(v):
if isinstance(v, bool):
return v
elif (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.') |
class KaHFM_model(keras.Model):
def __init__(self, user_factors, item_factors, learning_rate=0.001, l_w=0, l_b=0, name='NNBPRMF', **kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(42)
self._learning_rate = learning_rate
self.l_w = l_w
self.l_b = l_b
self.initializer = tf.initializers.GlorotUniform()
self.Bi = tf.Variable(tf.zeros(item_factors.shape[0]), name='Bi', dtype=tf.float32)
self.Gu = tf.Variable(user_factors, name='Gu', dtype=tf.float32)
self.Gi = tf.Variable(item_factors, name='Gi', dtype=tf.float32)
self.optimizer = tf.optimizers.Adam(self._learning_rate)
def call(self, inputs, training=None, **kwargs):
(user, item) = inputs
beta_i = tf.squeeze(tf.nn.embedding_lookup(self.Bi, item))
gamma_u = tf.squeeze(tf.nn.embedding_lookup(self.Gu, user))
gamma_i = tf.squeeze(tf.nn.embedding_lookup(self.Gi, item))
xui = (beta_i + tf.reduce_sum((gamma_u * gamma_i), 1))
return (xui, beta_i, gamma_u, gamma_i)
def train_step(self, batch):
with tf.GradientTape() as tape:
(user, pos, neg) = batch
(xu_pos, beta_pos, gamma_u, gamma_pos) = self.call(inputs=(user, pos), training=True)
(xu_neg, beta_neg, gamma_u, gamma_neg) = self.call(inputs=(user, neg), training=True)
difference = tf.clip_by_value((xu_pos - xu_neg), (- 80.0), .0)
loss = tf.reduce_sum(tf.nn.softplus((- difference)))
reg_loss = (((self.l_w * tf.reduce_sum([tf.nn.l2_loss(gamma_u), tf.nn.l2_loss(gamma_pos), tf.nn.l2_loss(gamma_neg)])) + (self.l_b * tf.nn.l2_loss(beta_pos))) + ((self.l_b * tf.nn.l2_loss(beta_neg)) / 10))
loss += reg_loss
grads = tape.gradient(loss, [self.Bi, self.Gu, self.Gi])
self.optimizer.apply_gradients(zip(grads, [self.Bi, self.Gu, self.Gi]))
return loss
def predict_all(self):
return (self.Bi + tf.matmul(self.Gu, self.Gi, transpose_b=True))
def predict_batch(self, start, stop):
return (self.Bi + tf.matmul(self.Gu[start:stop], self.Gi, transpose_b=True))
def predict(self, inputs, training=False, **kwargs):
(logits, _) = self.call(inputs=inputs, training=True)
return logits
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, (- np.inf)), k=k, sorted=True)
def get_config(self):
raise NotImplementedError |
def exec_command(command, execute_in='', use_shell=None, use_tee=None, _with_python=1, **env):
warnings.warn('exec_command is deprecated since NumPy v1.17, use subprocess.Popen instead', DeprecationWarning, stacklevel=1)
log.debug(('exec_command(%r,%s)' % (command, ','.join([('%s=%r' % kv) for kv in env.items()]))))
if (use_tee is None):
use_tee = (os.name == 'posix')
if (use_shell is None):
use_shell = (os.name == 'posix')
execute_in = os.path.abspath(execute_in)
oldcwd = os.path.abspath(os.getcwd())
if (__name__[(- 12):] == 'exec_command'):
exec_dir = os.path.dirname(os.path.abspath(__file__))
elif os.path.isfile('exec_command.py'):
exec_dir = os.path.abspath('.')
else:
exec_dir = os.path.abspath(sys.argv[0])
if os.path.isfile(exec_dir):
exec_dir = os.path.dirname(exec_dir)
if (oldcwd != execute_in):
os.chdir(execute_in)
log.debug(('New cwd: %s' % execute_in))
else:
log.debug(('Retaining cwd: %s' % oldcwd))
oldenv = _preserve_environment(list(env.keys()))
_update_environment(**env)
try:
st = _exec_command(command, use_shell=use_shell, use_tee=use_tee, **env)
finally:
if (oldcwd != execute_in):
os.chdir(oldcwd)
log.debug(('Restored cwd to %s' % oldcwd))
_update_environment(**oldenv)
return st |
class AffineGroupElement(MultiplicativeGroupElement):
def __init__(self, parent, A, b=0, convert=True, check=True):
try:
A = A.matrix()
except AttributeError:
pass
if (is_Matrix(A) and (A.nrows() == A.ncols() == (parent.degree() + 1))):
g = A
d = parent.degree()
A = g.submatrix(0, 0, d, d)
b = [g[(i, d)] for i in range(d)]
convert = True
if convert:
A = parent.matrix_space()(A)
b = parent.vector_space()(b)
if check:
if (not is_Matrix(A)):
raise TypeError('A must be a matrix')
if (not (A.parent() is parent.matrix_space())):
raise TypeError(('A must be an element of ' + str(parent.matrix_space())))
if (not (b.parent() is parent.vector_space())):
raise TypeError(('b must be an element of ' + str(parent.vector_space())))
parent._element_constructor_check(A, b)
super().__init__(parent)
self._A = A
self._b = b
def A(self):
return self._A
def b(self):
return self._b
_method
def matrix(self):
A = self._A
b = self._b
parent = self.parent()
d = parent.degree()
from sage.matrix.constructor import matrix, zero_matrix, block_matrix
zero = zero_matrix(parent.base_ring(), 1, d)
one = matrix(parent.base_ring(), [[1]])
m = block_matrix(2, 2, [A, b.column(), zero, one])
m.set_immutable()
return m
_matrix_ = matrix
def _repr_(self):
A = str(self._A)
b = str(self._b.column())
deg = self.parent().degree()
indices = range(deg)
s = []
for (Ai, bi, i) in zip(A.splitlines(), b.splitlines(), indices):
if (i == (deg // 2)):
s.append(((('x |-> ' + Ai) + ' x + ') + bi))
else:
s.append((((' ' + Ai) + ' ') + bi))
return '\n'.join(s)
def _latex_(self):
return ((('\\vec{x}\\mapsto ' + self.A()._latex_()) + '\\vec{x} + ') + self.b().column()._latex_())
def _ascii_art_(self):
from sage.typeset.ascii_art import ascii_art
deg = self.parent().degree()
A = ascii_art(self._A, baseline=(deg // 2))
b = ascii_art(self._b.column(), baseline=(deg // 2))
return (((ascii_art('x |-> ') + A) + ascii_art(' x + ')) + b)
def _unicode_art_(self):
from sage.typeset.unicode_art import unicode_art
deg = self.parent().degree()
A = unicode_art(self._A, baseline=(deg // 2))
b = unicode_art(self._b.column(), baseline=(deg // 2))
return (((unicode_art('x ') + A) + unicode_art(' x + ')) + b)
def _mul_(self, other):
parent = self.parent()
A = (self._A * other._A)
b = (self._b + (self._A * other._b))
return parent.element_class(parent, A, b, check=False)
def __call__(self, v):
parent = self.parent()
if (v in parent.vector_space()):
return ((self._A * v) + self._b)
from sage.rings.polynomial.polynomial_element import Polynomial
if (isinstance(v, Polynomial) and (parent.degree() == 1)):
ring = v.parent()
return ring([self._A[(0, 0)], self._b[0]])
from sage.rings.polynomial.multi_polynomial import MPolynomial
if (isinstance(v, MPolynomial) and (parent.degree() == v.parent().ngens())):
ring = v.parent()
from sage.modules.free_module_element import vector
image_coords = ((self._A * vector(ring, ring.gens())) + self._b)
return v(*image_coords)
import sage.geometry.abc
if isinstance(v, sage.geometry.abc.Polyhedron):
return ((self._A * v) + self._b)
v = parent.vector_space()(v)
return ((self._A * v) + self._b)
def _act_on_(self, x, self_on_left):
if self_on_left:
return self(x)
def __invert__(self):
parent = self.parent()
A = parent.matrix_space()((~ self._A))
b = ((- A) * self.b())
return parent.element_class(parent, A, b, check=False)
def _richcmp_(self, other, op):
lx = self._A
rx = other._A
if (lx != rx):
return richcmp_not_equal(lx, rx, op)
return richcmp(self._b, other._b, op)
def list(self):
return [r.list() for r in self.matrix().rows()] |
def gen_expr_simps(simps: LeanExprSimps, at_var: Optional[str]=None, indent: int=0) -> List[str]:
lines = []
simp_at = (f' at {at_var}' if (at_var is not None) else '')
if (0 < len(simps.const_div_rw)):
lines.append(((' ' * indent) + f"try {{ simp only [{', '.join(simps.const_div_rw)}]{simp_at} }},"))
if (0 < len(simps.add_comm)):
lines.append(((' ' * indent) + f"try {{ simp only [{', '.join(simps.add_comm)}]{simp_at} }},"))
return lines |
def test_sum_add_bad_node_raise_type_error():
var1 = optplan.Parameter()
var2 = optplan.Parameter()
sum1 = optplan.Sum(functions=[var1, var2])
with pytest.raises(TypeError, match='add a node'):
(sum1 + optplan.SimulationSpace()) |
def get_key(paragraphs, question, reasoningType):
return (paragraphs.replace('\n', '').replace(' ', '').lower(), question.lower(), reasoningType) |
class Partition5(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/T5Block[3]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/Linear[lm_head]', 'T5ForConditionalGeneration/CrossEntropyLoss[lm_loss]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:5'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.3', 'l_1': 'decoder.4', 'l_2': 'decoder.5', 'l_3': 'decoder.final_layer_norm', 'l_4': 'decoder.dropout', 'l_5': 'lm_head', 'l_6': 'lm_loss'}
self.to(self.device)
def forward(self, *args):
(decoder_attention_mask, inverted_encoder_attention_mask, lm_labels, x0, x1, x2, x3) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=decoder_attention_mask, position_bias=x1, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=x2)
t_0 = self.l_1(t_0, attention_mask=decoder_attention_mask, position_bias=x1, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=x2)
t_0 = self.l_2(t_0, attention_mask=decoder_attention_mask, position_bias=x1, encoder_hidden_states=x0, encoder_attention_mask=inverted_encoder_attention_mask, encoder_decoder_position_bias=x2)
t_0 = self.l_3(t_0)
t_0 = self.l_4(t_0)
t_0 = (t_0 * 0.)
t_0 = self.l_5(t_0)
t_1 = t_0.size((- 1))
t_1 = t_0.view((- 1), t_1)
t_0 = lm_labels.view((- 1))
t_0 = self.l_6(t_1, t_0)
return (t_0,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def dump_example(dataset_name):
print('Converting {:}.h5 ...'.format(dataset_name))
file = h5py.File(os.path.join(path, 'traindata', '{:}.h5'.format(dataset_name)), 'r')
for (seq_idx, seq_name) in enumerate(file):
if (dataset_name == 'scenes11_train'):
scale = 0.4
else:
scale = 1
if (((dataset_name == 'sun3d_train_1.6m_to_infm') and (seq_idx == 7)) or ((dataset_name == 'sun3d_train_0.4m_to_0.8m') and (seq_idx == 15)) or ((dataset_name == 'scenes11_train') and ((seq_idx == 2758) or (seq_idx == 4691) or (seq_idx == 7023) or (seq_idx == 11157) or (seq_idx == 17168) or (seq_idx == 19595)))):
continue
print('Processing sequence {:d}/{:d}'.format(seq_idx, len(file)))
dump_dir = os.path.join(path, '../train', ((dataset_name + '_') + '{:05d}'.format(seq_idx)))
if (not os.path.isdir(dump_dir)):
os.mkdir(dump_dir)
dump_dir = Path(dump_dir)
sequence = file[seq_name]['frames']['t0']
poses = []
for (f_idx, f_name) in enumerate(sequence):
frame = sequence[f_name]
for dt_type in frame:
dataset = frame[dt_type]
img = dataset[...]
if (dt_type == 'camera'):
if (f_idx == 0):
intrinsics = np.array([[img[0], 0, img[3]], [0, img[1], img[4]], [0, 0, 1]])
pose = np.array([[img[5], img[8], img[11], (img[14] * scale)], [img[6], img[9], img[12], (img[15] * scale)], [img[7], img[10], img[13], (img[16] * scale)]])
poses.append(pose.tolist())
elif (dt_type == 'depth'):
dimension = dataset.attrs['extents']
depth = np.array(np.frombuffer(decompress(img.tobytes(), ((dimension[0] * dimension[1]) * 2)), dtype=np.float16)).astype(np.float32)
depth = (depth.reshape(dimension[0], dimension[1]) * scale)
dump_depth_file = (dump_dir / '{:04d}.npy'.format(f_idx))
np.save(dump_depth_file, depth)
elif (dt_type == 'image'):
img = imageio.imread(img.tobytes())
dump_img_file = (dump_dir / '{:04d}.jpg'.format(f_idx))
scipy.misc.imsave(dump_img_file, img)
dump_cam_file = (dump_dir / 'cam.txt')
np.savetxt(dump_cam_file, intrinsics)
poses_file = (dump_dir / 'poses.txt')
np.savetxt(poses_file, np.array(poses).reshape((- 1), 12), fmt='%.6e')
if (len(dump_dir.files('*.jpg')) < 2):
dump_dir.rmtree() |
_mapper()
def modify_in_place(x: DataPoint) -> DataPoint:
x.d['my_key'] = 0
return Row(num=x.num, d=x.d, d_new=x.d) |
class _Encoder(nn.Module):
def __init__(self, imageSize):
super(_Encoder, self).__init__()
n = math.log2(imageSize)
assert (n == round(n)), 'imageSize must be a power of 2'
assert (n >= 3), 'imageSize must be at least 8'
n = int(n)
self.conv1 = nn.Conv2d((ngf * (2 ** (n - 3))), nz, 4)
self.conv2 = nn.Conv2d((ngf * (2 ** (n - 3))), nz, 4)
self.encoder = nn.Sequential()
self.encoder.add_module('input-conv', nn.Conv2d(nc, ngf, 4, 2, 1, bias=False))
self.encoder.add_module('input-relu', nn.LeakyReLU(0.2, inplace=True))
for i in range((n - 3)):
self.encoder.add_module('pyramid.{0}-{1}.conv'.format((ngf * (2 ** i)), (ngf * (2 ** (i + 1)))), nn.Conv2d((ngf * (2 ** i)), (ngf * (2 ** (i + 1))), 4, 2, 1, bias=False))
self.encoder.add_module('pyramid.{0}.batchnorm'.format((ngf * (2 ** (i + 1)))), nn.BatchNorm2d((ngf * (2 ** (i + 1)))))
self.encoder.add_module('pyramid.{0}.relu'.format((ngf * (2 ** (i + 1)))), nn.LeakyReLU(0.2, inplace=True))
def forward(self, input):
output = self.encoder(input)
return [self.conv1(output), self.conv2(output)] |
class TestParameters(unittest.TestCase):
def test_parameters(self):
gdb.execute('set cy_colorize_code on')
assert libcython.parameters.colorize_code
gdb.execute('set cy_colorize_code off')
assert (not libcython.parameters.colorize_code) |
_ENCODERS.register_module()
class DarkNet53(nn.Module):
def __init__(self, freeze_layer=2, pretrained='./data/weights/darknet.weights', out_layer=(6, 8, 13)):
super(DarkNet53, self).__init__()
self.fp16_enabled = False
assert isinstance(out_layer, tuple)
self.out_layer = out_layer
self.darknet = nn.ModuleList([*darknet_conv((3, 32), (32, 64), (3, 3), (1, 2)), DarknetBlock(64), *darknet_conv((64,), (128,), (3,), (2,)), DarknetBlock(128, num_block=2), *darknet_conv((128,), (256,), (3,), (2,)), DarknetBlock(256, num_block=8), *darknet_conv((256,), (512,), (3,), (2,)), DarknetBlock(512, num_block=8), *darknet_conv((512,), (1024,), (3,), (2,)), DarknetBlock(1024, num_block=4), DarknetBlock(1024, num_block=2, shortcut=False), *darknet_conv((1024, 512), (512, 1024), (1, 3), (1, 1))])
if (pretrained is not None):
parse_yolo_weights(self, pretrained, len(self.darknet))
if is_main():
logger = get_root_logger()
logger.info(f'load pretrained visual backbone from {pretrained}')
self.do_train = False
if (freeze_layer is not None):
freeze_params(self.darknet[:(- freeze_layer)])
else:
self.do_train = True
_fp32(apply_to=('img',))
def forward(self, img, y):
x = []
for (i, mod) in enumerate(self.darknet):
img = mod(img)
if (i in self.out_layer):
x.append(img)
if (len(self.out_layer) == 1):
return x[0]
else:
return x |
def batch_normalization(x, beta, gamma, mean, variance, axes=[1], decay_rate=0.9, eps=1e-05, batch_stat=True, output_stat=False, n_outputs=None):
from .function_bases import batch_normalization as batch_normalization_base
n_outputs = (3 if output_stat else 1)
axes = _force_list(axes)
axes = [(a + (len(x.shape) * (a < 0))) for a in axes]
assert (batch_stat or (not output_stat))
if ((not batch_stat) and ((mean is None) or (variance is None))):
raise ValueError('If batch_stat is False, mean and variable must not be None.')
(_, _, mean, variance) = _create_bn_dummy_vars(x, axes, beta, gamma, mean, variance)
if (batch_stat and ((mean.parent or variance.parent) is not None)):
raise ValueError('if batch_stat is True, mean and variable must not have a parent function.')
no_scale = (gamma is None)
no_bias = (beta is None)
if (len(axes) == 1):
return batch_normalization_base(x, beta, gamma, mean, variance, axes=axes, decay_rate=decay_rate, eps=eps, batch_stat=batch_stat, no_scale=no_scale, no_bias=no_bias, n_outputs=n_outputs)
in_adapter = BatchNormalizationInOutAdapter(x.ndim, axes)
param_adapter = BatchNormalizationInOutAdapter(x.ndim, axes)
inp = in_adapter(x)
if (beta is not None):
beta = param_adapter(beta)
if (gamma is not None):
gamma = param_adapter(gamma)
mean = param_adapter(mean)
variance = param_adapter(variance)
axis = (x.ndim - len(axes))
if (n_outputs == 1):
out = batch_normalization_base(inp, beta, gamma, mean, variance, axes=[axis], decay_rate=decay_rate, eps=eps, batch_stat=batch_stat, no_scale=no_scale, no_bias=no_bias, n_outputs=n_outputs)
return in_adapter.inv(out)
(out, mean, variance) = batch_normalization_base(inp, beta, gamma, mean, variance, axes=[axis], decay_rate=decay_rate, eps=eps, batch_stat=batch_stat, no_scale=no_scale, no_bias=no_bias, n_outputs=n_outputs)
out = in_adapter.inv(out)
mean = param_adapter.inv(mean)
variance = param_adapter.inv(variance)
return (out, mean, variance) |
def supersample(clip, d, nframes):
def fl(gf, t):
tt = np.linspace((t - d), (t + d), nframes)
avg = np.mean((1.0 * np.array([gf(t_) for t_ in tt], dtype='uint16')), axis=0)
return avg.astype('uint8')
return clip.fl(fl) |
def get_layers(in_index, in_channels, embed_dims, channels, embed_neck_cfg, embed_cfg, fusion_cfg):
embed_layers = {}
for (i, in_channels, embed_dim) in zip(in_index, in_channels, embed_dims):
if (i == in_index[(- 1)]):
embed_layers[str(i)] = build_layer(in_channels, embed_dim, **embed_neck_cfg)
else:
embed_layers[str(i)] = build_layer(in_channels, embed_dim, **embed_cfg)
embed_layers = nn.ModuleDict(embed_layers)
fuse_layer = build_layer(sum(embed_dims), channels, **fusion_cfg)
return (embed_layers, fuse_layer) |
def cleaner_mimic(text, spacy=True):
text = re.sub('\\s+', ' ', text.strip())
if spacy:
text = [t.text.lower() for t in nlp(text)]
else:
text = [t.lower() for t in text.split()]
text = ' '.join(text)
text = re.sub('\\[\\s*\\*\\s*\\*(.*?)\\*\\s*\\*\\s*\\]', ' <DE> ', text)
text = re.sub('([^a-zA-Z0-9])(\\s*\\1\\s*)+', '\\1 ', text)
text = re.sub('\\s+', ' ', text.strip())
text = [('qqq' if any((char.isdigit() for char in word)) else word) for word in text.split(' ')]
return ' '.join(text) |
class _ReBenchDB(_ConcretePersistence):
def __init__(self, configurator, data_store, ui):
super(_ReBenchDB, self).__init__(data_store, ui)
self._configurator = configurator
self._rebench_db = configurator.get_rebench_db_connector()
self._lock = Lock()
self._cache_for_seconds = 30
self._cache = {}
self._last_send = time()
def set_start_time(self, start_time):
assert (self._start_time is None)
self._start_time = start_time
def load_data(self, runs, discard_run_data):
raise RuntimeError('Does not yet support data loading from ReBenchDB')
def persist_data_point(self, data_point):
with self._lock:
if (data_point.run_id not in self._cache):
self._cache[data_point.run_id] = []
self._cache[data_point.run_id].append(data_point)
def send_data(self):
current_time = time()
time_past = (current_time - self._last_send)
self.ui.debug_output_info('ReBenchDB: data last send {seconds}s ago\n', seconds=round(time_past, 2))
if (time_past >= self._cache_for_seconds):
self._send_data_and_empty_cache()
self._last_send = time()
def _send_data_and_empty_cache(self):
if self._cache:
if self._send_data(self._cache):
self._cache = {}
def _send_data(self, cache):
self.ui.debug_output_info('ReBenchDB: Prepare data for sending\n')
num_measurements = 0
all_data = []
criteria = {}
for (run_id, data_points) in cache.items():
dp_data = []
for dp in data_points:
measurements = dp.measurements_as_dict(criteria)
num_measurements += len(measurements['m'])
dp_data.append(measurements)
all_data.append({'runId': run_id.as_dict(), 'd': dp_data})
criteria_index = []
for (c, idx) in criteria.items():
criteria_index.append({'c': c[0], 'u': c[1], 'i': idx})
self.ui.debug_output_info('ReBenchDB: Sending {num_m} measures. startTime: {st}\n', num_m=num_measurements, st=self._start_time)
return self._rebench_db.send_results({'data': all_data, 'criteria': criteria_index, 'env': determine_environment(), 'startTime': self._start_time, 'source': determine_source_details(self._configurator)}, num_measurements)
def close(self):
with self._lock:
self._send_data_and_empty_cache() |
class Experiment():
def __init__(self, experiment_id, params):
self._experiment_id = experiment_id
self._cluster_spec = params['cluster_spec']
self._policy = params['policy']
self._seed = int(params['seed'])
self._lam = (- 1)
self._num_total_jobs = (- 1)
self._profiling_percentage = (- 1)
self._num_reference_models = (- 1)
self._average_jct = None
self._utilization = None
self._makespan = None
self._total_cost = None
self._num_SLO_violations = None
if ('lam' in params):
self._lam = float(params['lam'])
if ('num_total_jobs' in params):
self._num_total_jobs = int(params['num_total_jobs'])
if ('profiling_percentage' in params):
self._profiling_percentage = float(params['profiling_percentage'])
if ('num_reference_models' in params):
self._num_reference_models = int(params['num_reference_models'])
def update_results(self, results):
self._average_jct = float(results['average JCT'])
self._utilization = float(results['utilization'])
if ('makespan' in results):
self._makespan = float(results['makespan'])
if ('total_cost' in results):
self._total_cost = float(results['total_cost'][1:])
if ('num_SLO_violations' in results):
self._num_SLO_violations = int(results['num_SLO_violations']) |
('data.dmlab', 'class')
class DmlabData(base.ImageTfdsData):
def __init__(self, data_dir=None):
dataset_builder = tfds.builder('dmlab:2.0.1', data_dir=data_dir)
tfds_splits = {'train': 'train', 'val': 'validation', 'trainval': 'train+validation', 'test': 'test', 'train800': 'train[:800]', 'val200': 'validation[:200]', 'train800val200': 'train[:800]+validation[:200]'}
train_count = dataset_builder.info.splits['train'].num_examples
val_count = dataset_builder.info.splits['validation'].num_examples
test_count = dataset_builder.info.splits['test'].num_examples
num_samples_splits = {'train': train_count, 'val': val_count, 'trainval': (train_count + val_count), 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
super(DmlabData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_and_cast_tensors_fn({'image': ('image', None), 'label': ('label', None)}), num_classes=dataset_builder.info.features['label'].num_classes, image_key='image') |
def _load_split_txt(path):
with open(path, 'r') as f:
return list(map((lambda s: str(s.split()[0])), f.readlines())) |
class BlackBodySimpleSourceRelativistic(BlackBodySimpleSource):
def from_model(cls, model, *args, **kwargs):
return cls(model.time_explosion, model.r_inner[0], model.t_inner.value, *args, **kwargs)
def __init__(self, time_explosion=None, **kwargs):
self.time_explosion = time_explosion
super().__init__(**kwargs)
def set_state_from_model(self, model):
self.time_explosion = model.time_explosion
super().set_state_from_model(model)
def create_packets(self, no_of_packets):
if ((self.radius is None) or (self.time_explosion is None)):
raise ValueError("Black body Radius or Time of Explosion isn't set")
self.beta = ((self.radius / self.time_explosion) / const.c).to('')
return super().create_packets(no_of_packets)
def create_packet_mus(self, no_of_packets):
z = self.rng.random(no_of_packets)
beta = self.beta
return ((- beta) + np.sqrt((((beta ** 2) + ((2 * beta) * z)) + z)))
def create_packet_energies(self, no_of_packets):
beta = self.beta
gamma = (1.0 / np.sqrt((1 - (beta ** 2))))
static_inner_boundary2cmf_factor = (((2 * beta) + 1) / (1 - (beta ** 2)))
energies = (np.ones(no_of_packets) / no_of_packets)
return ((energies * static_inner_boundary2cmf_factor) / gamma) |
def repo_list(recipe_folder='tests/recipes', field='HF_repo'):
HF_repos = []
for recipe_csvfile in os.listdir(recipe_folder):
if (recipe_csvfile in __skip_list):
continue
with open(os.path.join(recipe_folder, recipe_csvfile), newline='') as csvf:
reader = csv.DictReader(csvf, delimiter=',', skipinitialspace=True)
for row in reader:
if (len(row[field]) > 0):
repos = row[field].split(' ')
for repo in repos:
if (len(repo) > 0):
HF_repos.append(repo)
HF_repos = set(HF_repos)
return HF_repos |
def test_case57():
url = (brokerIp + '/ngsi-ld/v1/entityOperations/upsert')
headers = {'Content-Type': 'application/json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata48), headers=headers)
print(r.content)
assert (r.status_code == 404) |
def gaussian_measure_full(mean, cov, f):
if (not is_pos_def(cov)):
logger.warn(f'cov={cov} not positive definite')
L = cholesky(cov)
def integrand(x):
y = ((L x) + mean)
return (norm_pdf(x) * f(y))
K = mean.shape[0]
lim = ([[(- 10), 10]] * K)
integral = nquad(integrand, lim)[0]
return integral |
def all_reduce_losses(losses):
(names, values) = ([], [])
for (k, v) in losses.items():
names.append(k)
values.append(v)
values = torch.cat([v.view(1) for v in values], dim=0)
dist.all_reduce(values, dist.ReduceOp.SUM)
values.div_(dist.get_world_size())
values = torch.chunk(values, values.size(0), dim=0)
return OrderedDict(((k, v.view(())) for (k, v) in zip(names, values))) |
def test_ticket_701():
arr = numpy.arange(4).reshape((2, 2))
def func(x):
return numpy.min(x)
res = ndimage.generic_filter(arr, func, size=(1, 1))
res2 = ndimage.generic_filter(arr, func, size=1)
assert_equal(res, res2) |
def test_KMaxPooling():
with CustomObjectScope({'KMaxPooling': sequence.KMaxPooling}):
layer_test(sequence.KMaxPooling, kwargs={'k': 3, 'axis': 1}, input_shape=(BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE, 2)) |
class BSNSNmat(SpectralMatrix):
def assemble(self, method):
(test, trial) = (self.testfunction, self.trialfunction)
assert isinstance(test[0], SN)
assert isinstance(trial[0], SN)
N = test[0].N
k = np.arange((N - 2), dtype=float)
alpha = (((k * (k + 1)) / (k + 2)) / (k + 3))
d0 = get_norm_sq(test[0], trial[0], method)
d = {0: (d0[:(- 2)] + ((alpha ** 2) * d0[2:])), 2: ((- alpha[:(- 2)]) * d0[2:(- 2)])}
d[(- 2)] = d[2].copy()
return d |
def seed_worker(worker_id):
clear_logging()
worker_seed = (torch.initial_seed() % (2 ** 32))
seed_everything(worker_seed) |
def load_weight(sess, data, include=[]):
for scope in include:
for v in tf.compat.v1.global_variables():
if ((v.name in data.keys()) and (scope in v.name)):
if (v.shape == data[v.name].shape):
sess.run(v.assign(data[v.name]))
print('load weight: ', v.name) |
def get_training_roidb(imdb):
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
rdl_roidb.prepare_roidb(imdb)
print('done')
return imdb.roidb |
class TFGroupViTPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def _is_exception(obj):
if (not inspect.isclass(obj)):
return False
return issubclass(obj, Exception) |
class NormalizedClassifier(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.Tensor(1501, 2048))
self.weight.data.uniform_((- 1), 1).renorm_(2, 0, 1e-05).mul_(100000.0)
def forward(self, x):
w = self.weight
x = nn.functional.normalize(x, p=2, dim=1)
w = nn.functional.normalize(w, p=2, dim=1)
return nn.functional.linear(x, w) |
_test()
def test_matmul_np():
def matmul_np(A: dace.float64[(128, 64)], B: dace.float64[(64, 32)], C: dace.float64[(128, 32)]):
C[:] = (A B)
A = np.random.rand(128, 64).astype(np.float64)
B = np.random.rand(64, 32).astype(np.float64)
C = np.random.rand(128, 32).astype(np.float64)
sdfg = matmul_np.to_sdfg()
sdfg.apply_transformations([FPGATransformSDFG])
from dace.libraries.blas import Gemm
Gemm.default_implementation = 'FPGA1DSystolic'
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated([InlineSDFG])
C_regression = (A B)
sdfg(A=A, B=B, C=C)
assert np.allclose(C, C_regression, atol=1e-06)
return sdfg |
class Optimizer(object):
def __init__(self, cost, params):
self.cost = cost
self.params = params
self.updates = self._updates()
def _updates(self):
raise NotImplementedError() |
def read_posetrack_keypoints(output_folder):
people = dict()
for (idx, result_file) in enumerate(sorted(os.listdir(output_folder))):
json_file = osp.join(output_folder, result_file)
data = json.load(open(json_file))
for person in data['people']:
person_id = person['person_id'][0]
joints2d = person['pose_keypoints_2d']
if (person_id in people.keys()):
people[person_id]['joints2d'].append(joints2d)
people[person_id]['frames'].append(idx)
else:
people[person_id] = {'joints2d': [], 'frames': []}
people[person_id]['joints2d'].append(joints2d)
people[person_id]['frames'].append(idx)
for k in people.keys():
people[k]['joints2d'] = np.array(people[k]['joints2d']).reshape((len(people[k]['joints2d']), (- 1), 3))
people[k]['frames'] = np.array(people[k]['frames'])
return people |
def log_likelihood(mu, var, x, muq, varq, a, mask_flat, config):
if (config.out_distr == 'bernoulli'):
log_lik = log_bernoulli(x, mu, eps=1e-06)
elif (config.out_distr == 'gaussian'):
log_lik = log_gaussian(x, mu, var)
log_lik = tf.reduce_sum(log_lik, 1)
log_lik = tf.multiply(mask_flat, log_lik)
if (config.ll_keep_prob < 1.0):
log_lik = tf.layers.dropout(log_lik, config.ll_keep_prob)
num_el = tf.reduce_sum(mask_flat)
log_px_given_a = tf.truediv(tf.reduce_sum(log_lik), num_el)
if config.use_vae:
log_qa_given_x = tf.reduce_sum(log_gaussian(a, muq, varq), 1)
log_qa_given_x = tf.multiply(mask_flat, log_qa_given_x)
log_qa_given_x = tf.truediv(tf.reduce_sum(log_qa_given_x), num_el)
else:
log_qa_given_x = tf.constant(0.0, dtype=tf.float32, shape=())
LL = (log_px_given_a - log_qa_given_x)
return (LL, log_px_given_a, log_qa_given_x) |
def write_list(out_filename, dataset):
formatted_dataset = [line._asdict() for line in dataset]
with open(out_filename, 'w') as fout:
fout.write('[\n')
for (idx, line) in enumerate(formatted_dataset):
fout.write(' ')
json.dump(line, fout, ensure_ascii=False)
if (idx < (len(formatted_dataset) - 1)):
fout.write(',')
fout.write('\n')
fout.write(']\n') |
class ConcatChannel(SOFactor):
n_next = 1
def __init__(self, Ns, axis=0):
self.Ns = Ns
self.axis = axis
self.repr_init()
self.n_prev = len(Ns)
self.N = sum(Ns)
def sample(self, *Zs):
if (len(Zs) != self.n_prev):
raise ValueError(f'expect {self.n_prev} arrays')
for (k, Z) in enumerate(Zs):
if (Z.shape[self.axis] != self.Ns[k]):
raise ValueError(f'expect Z k={k} array of dimension {self.Ns[k]} along axis {self.axis} but got array of dimension {Z.shape[self.axis]}')
X = np.concatenate(Zs, axis=self.axis)
assert (X.shape[self.axis] == self.N)
return X
def math(self):
return '$\\oplus$'
def second_moment(self, *tau_zs):
if (len(tau_zs) != self.n_prev):
raise ValueError(f'expect {self.n_prev} tau_zs')
tau_x = (sum(((N * tau_z) for (N, tau_z) in zip(self.Ns, tau_zs))) / self.N)
return tau_x
def compute_forward_posterior(self, az, bz, ax, bx):
(rz, vz) = self.compute_backward_posterior(az, bz, ax, bx)
rx = np.concatenate(rz, axis=self.axis)
vx = (sum(((N * v) for (N, v) in zip(self.Ns, vz))) / self.N)
return (rx, vx)
def _compute_ak_bk(self, az, bz, ax, bx):
for (N, Z) in zip(self.Ns, bz):
assert (bz.shape[self.axis] == N)
assert (bx.shape[self.axis] == self.N)
idx = ([0] + list(np.cumsum(self.Ns)))
bx_subs = [np.take(bx, range(idx_min, idx_max), axis=self.axis) for (idx_min, idx_max) in zip(idx[:(- 1)], idx[1:])]
ak = [(a + ax) for a in az]
bk = [(b + bx_sub) for (b, bx_sub) in zip(bz, bx_subs)]
return (ak, bk)
def compute_backward_posterior(self, az, bz, ax, bx):
(ak, bk) = self._compute_ak_bk(az, bz, ax, bx)
vz = [(1 / a) for a in ak]
rz = [(b / a) for (a, b) in zip(ak, bk)]
return (rz, vz)
def compute_forward_error(self, az, ax, tau_z):
vz = self.compute_backward_error(az, ax, tau_z)
vx = (sum(((N * v) for (N, v) in zip(self.Ns, vz))) / self.N)
return vx
def compute_backward_error(self, az, ax, tau_z):
ak = [(a + ax) for a in az]
vz = [(1 / ak) for a in ak]
return vz
def compute_log_partition(self, az, bz, ax, bx):
(ak, bk) = self._compute_ak_bk(az, bz, ax, bx)
logZ = sum([(0.5 * np.sum((((b ** 2) / a) + np.log(((2 * np.pi) / a))))) for (a, b) in zip(ak, bk)])
return logZ
def compute_free_energy(self, az, ax, tau_z):
raise NotImplementedError |
def write_label_file(labels_to_class_names, dataset_dir, filename=LABELS_FILENAME):
labels_filename = os.path.join(dataset_dir, filename)
with tf.gfile.Open(labels_filename, 'w') as f:
for label in labels_to_class_names:
class_name = labels_to_class_names[label]
f.write(('%d:%s\n' % (label, class_name))) |
def main():
args = create_argparser().parse_args()
logger.log(f'args: {args}')
dist_util.setup_dist()
logger.configure()
logger.log('creating 2d model and diffusion...')
(model, diffusion) = create_model_and_diffusion_2d(**args_to_dict(args, model_and_diffusion_defaults_2d().keys()))
model.to(dist_util.dev())
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
logger.log('creating 2d data loader...')
data = load_2d_data(batch_size=args.batch_size, shape=list(Synthetic2DType)[args.task])
logger.log('training 2d model...')
TrainLoop(model=model, diffusion=diffusion, data=data, batch_size=args.batch_size, microbatch=args.microbatch, lr=args.lr, ema_rate=args.ema_rate, log_interval=args.log_interval, save_interval=args.save_interval, resume_checkpoint=args.resume_checkpoint, use_fp16=args.use_fp16, fp16_scale_growth=args.fp16_scale_growth, schedule_sampler=schedule_sampler, weight_decay=args.weight_decay, lr_anneal_steps=args.lr_anneal_steps).run_loop() |
class Partition0(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[word_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[position_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[token_type_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Linear[dense]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:0'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'bert.embeddings.word_embeddings', 'l_1': 'bert.embeddings.position_embeddings', 'l_2': 'bert.embeddings.token_type_embeddings', 'l_3': 'bert.embeddings.LayerNorm', 'l_4': 'bert.embeddings.dropout', 'l_5': 'bert.encoder.0.attention.self.query', 'l_6': 'bert.encoder.0.attention.self.key', 'l_7': 'bert.encoder.0.attention.self.value', 'l_8': 'bert.encoder.0.attention.self.softmax', 'l_9': 'bert.encoder.0.attention.self.dropout', 'l_10': 'bert.encoder.0.attention.output.dense', 'l_11': 'bert.encoder.0.attention.output.dropout', 'l_12': 'bert.encoder.0.attention.output.LayerNorm', 'l_13': 'bert.encoder.0.intermediate.dense', 'l_14': 'bert.encoder.0.output.dense', 'l_15': 'bert.encoder.0.output.dropout', 'l_16': 'bert.encoder.0.output.LayerNorm', 'l_17': 'bert.encoder.1.attention.self.query', 'l_18': 'bert.encoder.1.attention.self.key', 'l_19': 'bert.encoder.1.attention.self.value', 'l_20': 'bert.encoder.1.attention.self.softmax', 'l_21': 'bert.encoder.1.attention.self.dropout', 'l_22': 'bert.encoder.1.attention.output.dense', 'l_23': 'bert.encoder.1.attention.output.dropout', 'l_24': 'bert.encoder.1.attention.output.LayerNorm', 'l_25': 'bert.encoder.1.intermediate.dense', 'l_26': 'bert.encoder.1.output.dense', 'l_27': 'bert.encoder.1.output.dropout', 'l_28': 'bert.encoder.1.output.LayerNorm', 'l_29': 'bert.encoder.2.attention.self.query', 'l_30': 'bert.encoder.2.attention.self.key', 'l_31': 'bert.encoder.2.attention.self.value', 'l_32': 'bert.encoder.2.attention.self.softmax', 'l_33': 'bert.encoder.2.attention.self.dropout', 'l_34': 'bert.encoder.2.attention.output.dense', 'l_35': 'bert.encoder.2.attention.output.dropout', 'l_36': 'bert.encoder.2.attention.output.LayerNorm', 'l_37': 'bert.encoder.2.intermediate.dense', 'l_38': 'bert.encoder.2.output.dense', 'l_39': 'bert.encoder.2.output.dropout', 'l_40': 'bert.encoder.2.output.LayerNorm', 'l_41': 'bert.encoder.3.attention.self.query', 'l_42': 'bert.encoder.3.attention.self.key', 'l_43': 'bert.encoder.3.attention.self.value', 'l_44': 'bert.encoder.3.attention.self.softmax', 'l_45': 'bert.encoder.3.attention.self.dropout', 'l_46': 'bert.encoder.3.attention.output.dense', 'l_47': 'bert.encoder.3.attention.output.dropout', 'l_48': 'bert.encoder.3.attention.output.LayerNorm', 'l_49': 'bert.encoder.3.intermediate.dense', 'l_50': 'bert.encoder.3.output.dense', 'l_51': 'bert.encoder.3.output.dropout', 'l_52': 'bert.encoder.3.output.LayerNorm', 'l_53': 'bert.encoder.4.attention.self.query', 'l_54': 'bert.encoder.4.attention.self.key', 'l_55': 'bert.encoder.4.attention.self.value', 'l_56': 'bert.encoder.4.attention.self.softmax', 'l_57': 'bert.encoder.4.attention.self.dropout', 'l_58': 'bert.encoder.4.attention.output.dense', 'l_59': 'bert.encoder.4.attention.output.dropout', 'l_60': 'bert.encoder.4.attention.output.LayerNorm', 'l_61': 'bert.encoder.4.intermediate.dense', 'l_62': 'bert.encoder.4.output.dense', 'l_63': 'bert.encoder.4.output.dropout', 'l_64': 'bert.encoder.4.output.LayerNorm', 'l_65': 'bert.encoder.5.attention.self.query', 'l_66': 'bert.encoder.5.attention.self.key', 'l_67': 'bert.encoder.5.attention.self.value', 'l_68': 'bert.encoder.5.attention.self.softmax', 'l_69': 'bert.encoder.5.attention.self.dropout', 'l_70': 'bert.encoder.5.attention.output.dense', 'l_71': 'bert.encoder.5.attention.output.dropout', 'l_72': 'bert.encoder.5.attention.output.LayerNorm', 'l_73': 'bert.encoder.5.intermediate.dense', 'l_74': 'bert.encoder.5.output.dense', 'l_75': 'bert.encoder.5.output.dropout', 'l_76': 'bert.encoder.5.output.LayerNorm', 'l_77': 'bert.encoder.6.attention.self.query', 'l_78': 'bert.encoder.6.attention.self.key', 'l_79': 'bert.encoder.6.attention.self.value', 'l_80': 'bert.encoder.6.attention.self.softmax', 'l_81': 'bert.encoder.6.attention.self.dropout', 'l_82': 'bert.encoder.6.attention.output.dense', 'l_83': 'bert.encoder.6.attention.output.dropout', 'l_84': 'bert.encoder.6.attention.output.LayerNorm', 'l_85': 'bert.encoder.6.intermediate.dense', 'l_86': 'bert.encoder.6.output.dense', 'l_87': 'bert.encoder.6.output.dropout', 'l_88': 'bert.encoder.6.output.LayerNorm', 'l_89': 'bert.encoder.7.attention.self.query', 'l_90': 'bert.encoder.7.attention.self.key', 'l_91': 'bert.encoder.7.attention.self.value', 'l_92': 'bert.encoder.7.attention.self.softmax', 'l_93': 'bert.encoder.7.attention.self.dropout', 'l_94': 'bert.encoder.7.attention.output.dense', 'l_95': 'bert.encoder.7.attention.output.dropout', 'l_96': 'bert.encoder.7.attention.output.LayerNorm', 'l_97': 'bert.encoder.7.intermediate.dense', 'l_98': 'bert.encoder.7.output.dense', 'l_99': 'bert.encoder.7.output.dropout', 'l_100': 'bert.encoder.7.output.LayerNorm', 'l_101': 'bert.encoder.8.attention.self.query', 'l_102': 'bert.encoder.8.attention.self.key', 'l_103': 'bert.encoder.8.attention.self.value', 'l_104': 'bert.encoder.8.attention.self.softmax', 'l_105': 'bert.encoder.8.attention.self.dropout', 'l_106': 'bert.encoder.8.attention.output.dense', 'l_107': 'bert.encoder.8.attention.output.dropout', 'l_108': 'bert.encoder.8.attention.output.LayerNorm', 'l_109': 'bert.encoder.8.intermediate.dense', 'l_110': 'bert.encoder.8.output.dense', 'l_111': 'bert.encoder.8.output.dropout', 'l_112': 'bert.encoder.8.output.LayerNorm', 'l_113': 'bert.encoder.9.attention.self.query', 'l_114': 'bert.encoder.9.attention.self.key', 'l_115': 'bert.encoder.9.attention.self.value', 'l_116': 'bert.encoder.9.attention.self.softmax', 'l_117': 'bert.encoder.9.attention.self.dropout', 'l_118': 'bert.encoder.9.attention.output.dense', 'l_119': 'bert.encoder.9.attention.output.dropout', 'l_120': 'bert.encoder.9.attention.output.LayerNorm', 'l_121': 'bert.encoder.9.intermediate.dense', 'l_122': 'bert.encoder.9.output.dense', 'l_123': 'bert.encoder.9.output.dropout', 'l_124': 'bert.encoder.9.output.LayerNorm', 'l_125': 'bert.encoder.10.attention.self.query', 'l_126': 'bert.encoder.10.attention.self.key', 'l_127': 'bert.encoder.10.attention.self.value', 'l_128': 'bert.encoder.10.attention.self.softmax', 'l_129': 'bert.encoder.10.attention.self.dropout', 'l_130': 'bert.encoder.10.attention.output.dense', 'l_131': 'bert.encoder.10.attention.output.dropout', 'l_132': 'bert.encoder.10.attention.output.LayerNorm', 'l_133': 'bert.encoder.10.intermediate.dense', 'l_134': 'bert.encoder.10.output.dense', 'l_135': 'bert.encoder.10.output.dropout', 'l_136': 'bert.encoder.10.output.LayerNorm', 'l_137': 'bert.encoder.11.attention.self.query', 'l_138': 'bert.encoder.11.attention.self.key', 'l_139': 'bert.encoder.11.attention.self.value', 'l_140': 'bert.encoder.11.attention.self.softmax', 'l_141': 'bert.encoder.11.attention.self.dropout', 'l_142': 'bert.encoder.11.attention.output.dense', 'l_143': 'bert.encoder.11.attention.output.dropout', 'l_144': 'bert.encoder.11.attention.output.LayerNorm', 'l_145': 'bert.encoder.11.intermediate.dense'}
self.to(self.device)
def forward(self, *args):
(attention_mask, input_ids, token_type_ids) = unflatten(args, self.input_structure)
t_0 = self.l_0(input_ids)
t_1 = self.l_2(token_type_ids)
t_2 = attention_mask.unsqueeze(1)
t_2 = t_2.unsqueeze(2)
t_2 = t_2.to(dtype=torch.float32)
t_2 = (1.0 - t_2)
t_2 = (t_2 * (- 10000.0))
t_3 = input_ids.size(1)
t_3 = torch.arange(t_3, dtype=torch.int64, device=self.device)
t_3 = t_3.unsqueeze(0)
t_3 = t_3.expand_as(input_ids)
t_3 = self.l_1(t_3)
t_3 = (t_0 + t_3)
t_1 = (t_3 + t_1)
t_1 = self.l_3(t_1)
t_1 = self.l_4(t_1)
t_3 = self.l_5(t_1)
t_0 = self.l_6(t_1)
t_4 = self.l_7(t_1)
t_5 = t_3.size()
t_6 = t_0.size()
t_7 = t_4.size()
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_8 = t_5[0]
t_9 = t_5[1]
t_10 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_8, t_9, t_10, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_10 = t_6[0]
t_9 = t_6[1]
t_8 = t_6[2]
t_6 = t_6[3]
t_6 = t_0.view(t_10, t_9, t_8, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (16, 64))
t_8 = t_7[0]
t_9 = t_7[1]
t_10 = t_7[2]
t_7 = t_7[3]
t_7 = t_4.view(t_8, t_9, t_10, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_6 = t_6.transpose((- 1), (- 2))
t_6 = torch.matmul(t_5, t_6)
t_5 = math.sqrt(64)
t_5 = (t_6 / t_5)
t_5 = (t_5 + t_2)
t_5 = self.l_8(t_5)
t_5 = self.l_9(t_5)
t_7 = torch.matmul(t_5, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_7 = t_7.contiguous()
t_5 = t_7.size()
t_5 = t_5[slice(None, (- 2), None)]
t_5 = (t_5 + (1024,))
t_6 = t_5[0]
t_10 = t_5[1]
t_5 = t_5[2]
t_5 = t_7.view(t_6, t_10, t_5)
t_5 = self.l_10(t_5)
t_5 = self.l_11(t_5)
t_1 = (t_5 + t_1)
t_1 = self.l_12(t_1)
t_5 = self.l_13(t_1)
t_5 = torch.nn.functional.gelu(t_5)
t_5 = self.l_14(t_5)
t_5 = self.l_15(t_5)
t_1 = (t_5 + t_1)
t_1 = self.l_16(t_1)
t_5 = self.l_17(t_1)
t_10 = self.l_18(t_1)
t_6 = self.l_19(t_1)
t_7 = t_5.size()
t_9 = t_10.size()
t_8 = t_6.size()
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (16, 64))
t_4 = t_7[0]
t_0 = t_7[1]
t_3 = t_7[2]
t_7 = t_7[3]
t_7 = t_5.view(t_4, t_0, t_3, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (16, 64))
t_3 = t_9[0]
t_0 = t_9[1]
t_4 = t_9[2]
t_9 = t_9[3]
t_9 = t_10.view(t_3, t_0, t_4, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_4 = t_8[0]
t_0 = t_8[1]
t_3 = t_8[2]
t_8 = t_8[3]
t_8 = t_6.view(t_4, t_0, t_3, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_9 = t_9.transpose((- 1), (- 2))
t_9 = torch.matmul(t_7, t_9)
t_7 = math.sqrt(64)
t_7 = (t_9 / t_7)
t_7 = (t_7 + t_2)
t_7 = self.l_20(t_7)
t_7 = self.l_21(t_7)
t_8 = torch.matmul(t_7, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_8 = t_8.contiguous()
t_7 = t_8.size()
t_7 = t_7[slice(None, (- 2), None)]
t_7 = (t_7 + (1024,))
t_9 = t_7[0]
t_3 = t_7[1]
t_7 = t_7[2]
t_7 = t_8.view(t_9, t_3, t_7)
t_7 = self.l_22(t_7)
t_7 = self.l_23(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_24(t_1)
t_7 = self.l_25(t_1)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_26(t_7)
t_7 = self.l_27(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_28(t_1)
t_7 = self.l_29(t_1)
t_3 = self.l_30(t_1)
t_9 = self.l_31(t_1)
t_8 = t_7.size()
t_0 = t_3.size()
t_4 = t_9.size()
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_6 = t_8[0]
t_10 = t_8[1]
t_5 = t_8[2]
t_8 = t_8[3]
t_8 = t_7.view(t_6, t_10, t_5, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_5 = t_0[0]
t_10 = t_0[1]
t_6 = t_0[2]
t_0 = t_0[3]
t_0 = t_3.view(t_5, t_10, t_6, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_6 = t_4[0]
t_10 = t_4[1]
t_5 = t_4[2]
t_4 = t_4[3]
t_4 = t_9.view(t_6, t_10, t_5, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_0 = t_0.transpose((- 1), (- 2))
t_0 = torch.matmul(t_8, t_0)
t_8 = math.sqrt(64)
t_8 = (t_0 / t_8)
t_8 = (t_8 + t_2)
t_8 = self.l_32(t_8)
t_8 = self.l_33(t_8)
t_4 = torch.matmul(t_8, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_4 = t_4.contiguous()
t_8 = t_4.size()
t_8 = t_8[slice(None, (- 2), None)]
t_8 = (t_8 + (1024,))
t_0 = t_8[0]
t_5 = t_8[1]
t_8 = t_8[2]
t_8 = t_4.view(t_0, t_5, t_8)
t_8 = self.l_34(t_8)
t_8 = self.l_35(t_8)
t_1 = (t_8 + t_1)
t_1 = self.l_36(t_1)
t_8 = self.l_37(t_1)
t_8 = torch.nn.functional.gelu(t_8)
t_8 = self.l_38(t_8)
t_8 = self.l_39(t_8)
t_1 = (t_8 + t_1)
t_1 = self.l_40(t_1)
t_8 = self.l_41(t_1)
t_5 = self.l_42(t_1)
t_0 = self.l_43(t_1)
t_4 = t_8.size()
t_10 = t_5.size()
t_6 = t_0.size()
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_9 = t_4[0]
t_3 = t_4[1]
t_7 = t_4[2]
t_4 = t_4[3]
t_4 = t_8.view(t_9, t_3, t_7, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_10 = t_10[slice(None, (- 1), None)]
t_10 = (t_10 + (16, 64))
t_7 = t_10[0]
t_3 = t_10[1]
t_9 = t_10[2]
t_10 = t_10[3]
t_10 = t_5.view(t_7, t_3, t_9, t_10)
t_10 = t_10.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_9 = t_6[0]
t_3 = t_6[1]
t_7 = t_6[2]
t_6 = t_6[3]
t_6 = t_0.view(t_9, t_3, t_7, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_10 = t_10.transpose((- 1), (- 2))
t_10 = torch.matmul(t_4, t_10)
t_4 = math.sqrt(64)
t_4 = (t_10 / t_4)
t_4 = (t_4 + t_2)
t_4 = self.l_44(t_4)
t_4 = self.l_45(t_4)
t_6 = torch.matmul(t_4, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_6 = t_6.contiguous()
t_4 = t_6.size()
t_4 = t_4[slice(None, (- 2), None)]
t_4 = (t_4 + (1024,))
t_10 = t_4[0]
t_7 = t_4[1]
t_4 = t_4[2]
t_4 = t_6.view(t_10, t_7, t_4)
t_4 = self.l_46(t_4)
t_4 = self.l_47(t_4)
t_1 = (t_4 + t_1)
t_1 = self.l_48(t_1)
t_4 = self.l_49(t_1)
t_4 = torch.nn.functional.gelu(t_4)
t_4 = self.l_50(t_4)
t_4 = self.l_51(t_4)
t_1 = (t_4 + t_1)
t_1 = self.l_52(t_1)
t_4 = self.l_53(t_1)
t_7 = self.l_54(t_1)
t_10 = self.l_55(t_1)
t_6 = t_4.size()
t_3 = t_7.size()
t_9 = t_10.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_0 = t_6[0]
t_5 = t_6[1]
t_8 = t_6[2]
t_6 = t_6[3]
t_6 = t_4.view(t_0, t_5, t_8, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_8 = t_3[0]
t_5 = t_3[1]
t_0 = t_3[2]
t_3 = t_3[3]
t_3 = t_7.view(t_8, t_5, t_0, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (16, 64))
t_0 = t_9[0]
t_5 = t_9[1]
t_8 = t_9[2]
t_9 = t_9[3]
t_9 = t_10.view(t_0, t_5, t_8, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_3 = t_3.transpose((- 1), (- 2))
t_3 = torch.matmul(t_6, t_3)
t_6 = math.sqrt(64)
t_6 = (t_3 / t_6)
t_6 = (t_6 + t_2)
t_6 = self.l_56(t_6)
t_6 = self.l_57(t_6)
t_9 = torch.matmul(t_6, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_9 = t_9.contiguous()
t_6 = t_9.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (1024,))
t_3 = t_6[0]
t_8 = t_6[1]
t_6 = t_6[2]
t_6 = t_9.view(t_3, t_8, t_6)
t_6 = self.l_58(t_6)
t_6 = self.l_59(t_6)
t_1 = (t_6 + t_1)
t_1 = self.l_60(t_1)
t_6 = self.l_61(t_1)
t_6 = torch.nn.functional.gelu(t_6)
t_6 = self.l_62(t_6)
t_6 = self.l_63(t_6)
t_1 = (t_6 + t_1)
t_1 = self.l_64(t_1)
t_6 = self.l_65(t_1)
t_8 = self.l_66(t_1)
t_3 = self.l_67(t_1)
t_9 = t_6.size()
t_5 = t_8.size()
t_0 = t_3.size()
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (16, 64))
t_10 = t_9[0]
t_7 = t_9[1]
t_4 = t_9[2]
t_9 = t_9[3]
t_9 = t_6.view(t_10, t_7, t_4, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_4 = t_5[0]
t_7 = t_5[1]
t_10 = t_5[2]
t_5 = t_5[3]
t_5 = t_8.view(t_4, t_7, t_10, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_10 = t_0[0]
t_7 = t_0[1]
t_4 = t_0[2]
t_0 = t_0[3]
t_0 = t_3.view(t_10, t_7, t_4, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_9, t_5)
t_9 = math.sqrt(64)
t_9 = (t_5 / t_9)
t_9 = (t_9 + t_2)
t_9 = self.l_68(t_9)
t_9 = self.l_69(t_9)
t_0 = torch.matmul(t_9, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_0 = t_0.contiguous()
t_9 = t_0.size()
t_9 = t_9[slice(None, (- 2), None)]
t_9 = (t_9 + (1024,))
t_5 = t_9[0]
t_4 = t_9[1]
t_9 = t_9[2]
t_9 = t_0.view(t_5, t_4, t_9)
t_9 = self.l_70(t_9)
t_9 = self.l_71(t_9)
t_1 = (t_9 + t_1)
t_1 = self.l_72(t_1)
t_9 = self.l_73(t_1)
t_9 = torch.nn.functional.gelu(t_9)
t_9 = self.l_74(t_9)
t_9 = self.l_75(t_9)
t_1 = (t_9 + t_1)
t_1 = self.l_76(t_1)
t_9 = self.l_77(t_1)
t_4 = self.l_78(t_1)
t_5 = self.l_79(t_1)
t_0 = t_9.size()
t_7 = t_4.size()
t_10 = t_5.size()
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_3 = t_0[0]
t_8 = t_0[1]
t_6 = t_0[2]
t_0 = t_0[3]
t_0 = t_9.view(t_3, t_8, t_6, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (16, 64))
t_6 = t_7[0]
t_8 = t_7[1]
t_3 = t_7[2]
t_7 = t_7[3]
t_7 = t_4.view(t_6, t_8, t_3, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_10 = t_10[slice(None, (- 1), None)]
t_10 = (t_10 + (16, 64))
t_3 = t_10[0]
t_8 = t_10[1]
t_6 = t_10[2]
t_10 = t_10[3]
t_10 = t_5.view(t_3, t_8, t_6, t_10)
t_10 = t_10.permute(0, 2, 1, 3)
t_7 = t_7.transpose((- 1), (- 2))
t_7 = torch.matmul(t_0, t_7)
t_0 = math.sqrt(64)
t_0 = (t_7 / t_0)
t_0 = (t_0 + t_2)
t_0 = self.l_80(t_0)
t_0 = self.l_81(t_0)
t_10 = torch.matmul(t_0, t_10)
t_10 = t_10.permute(0, 2, 1, 3)
t_10 = t_10.contiguous()
t_0 = t_10.size()
t_0 = t_0[slice(None, (- 2), None)]
t_0 = (t_0 + (1024,))
t_7 = t_0[0]
t_6 = t_0[1]
t_0 = t_0[2]
t_0 = t_10.view(t_7, t_6, t_0)
t_0 = self.l_82(t_0)
t_0 = self.l_83(t_0)
t_1 = (t_0 + t_1)
t_1 = self.l_84(t_1)
t_0 = self.l_85(t_1)
t_0 = torch.nn.functional.gelu(t_0)
t_0 = self.l_86(t_0)
t_0 = self.l_87(t_0)
t_1 = (t_0 + t_1)
t_1 = self.l_88(t_1)
t_0 = self.l_89(t_1)
t_6 = self.l_90(t_1)
t_7 = self.l_91(t_1)
t_10 = t_0.size()
t_8 = t_6.size()
t_3 = t_7.size()
t_10 = t_10[slice(None, (- 1), None)]
t_10 = (t_10 + (16, 64))
t_5 = t_10[0]
t_4 = t_10[1]
t_9 = t_10[2]
t_10 = t_10[3]
t_10 = t_0.view(t_5, t_4, t_9, t_10)
t_10 = t_10.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_9 = t_8[0]
t_4 = t_8[1]
t_5 = t_8[2]
t_8 = t_8[3]
t_8 = t_6.view(t_9, t_4, t_5, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_5 = t_3[0]
t_4 = t_3[1]
t_9 = t_3[2]
t_3 = t_3[3]
t_3 = t_7.view(t_5, t_4, t_9, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_10, t_8)
t_10 = math.sqrt(64)
t_10 = (t_8 / t_10)
t_10 = (t_10 + t_2)
t_10 = self.l_92(t_10)
t_10 = self.l_93(t_10)
t_3 = torch.matmul(t_10, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_3 = t_3.contiguous()
t_10 = t_3.size()
t_10 = t_10[slice(None, (- 2), None)]
t_10 = (t_10 + (1024,))
t_8 = t_10[0]
t_9 = t_10[1]
t_10 = t_10[2]
t_10 = t_3.view(t_8, t_9, t_10)
t_10 = self.l_94(t_10)
t_10 = self.l_95(t_10)
t_1 = (t_10 + t_1)
t_1 = self.l_96(t_1)
t_10 = self.l_97(t_1)
t_10 = torch.nn.functional.gelu(t_10)
t_10 = self.l_98(t_10)
t_10 = self.l_99(t_10)
t_1 = (t_10 + t_1)
t_1 = self.l_100(t_1)
t_10 = self.l_101(t_1)
t_9 = self.l_102(t_1)
t_8 = self.l_103(t_1)
t_3 = t_10.size()
t_4 = t_9.size()
t_5 = t_8.size()
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_7 = t_3[0]
t_6 = t_3[1]
t_0 = t_3[2]
t_3 = t_3[3]
t_3 = t_10.view(t_7, t_6, t_0, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_0 = t_4[0]
t_6 = t_4[1]
t_7 = t_4[2]
t_4 = t_4[3]
t_4 = t_9.view(t_0, t_6, t_7, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_7 = t_5[0]
t_6 = t_5[1]
t_0 = t_5[2]
t_5 = t_5[3]
t_5 = t_8.view(t_7, t_6, t_0, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_4 = t_4.transpose((- 1), (- 2))
t_4 = torch.matmul(t_3, t_4)
t_3 = math.sqrt(64)
t_3 = (t_4 / t_3)
t_3 = (t_3 + t_2)
t_3 = self.l_104(t_3)
t_3 = self.l_105(t_3)
t_5 = torch.matmul(t_3, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_5 = t_5.contiguous()
t_3 = t_5.size()
t_3 = t_3[slice(None, (- 2), None)]
t_3 = (t_3 + (1024,))
t_4 = t_3[0]
t_0 = t_3[1]
t_3 = t_3[2]
t_3 = t_5.view(t_4, t_0, t_3)
t_3 = self.l_106(t_3)
t_3 = self.l_107(t_3)
t_1 = (t_3 + t_1)
t_1 = self.l_108(t_1)
t_3 = self.l_109(t_1)
t_3 = torch.nn.functional.gelu(t_3)
t_3 = self.l_110(t_3)
t_3 = self.l_111(t_3)
t_1 = (t_3 + t_1)
t_1 = self.l_112(t_1)
t_3 = self.l_113(t_1)
t_0 = self.l_114(t_1)
t_4 = self.l_115(t_1)
t_5 = t_3.size()
t_6 = t_0.size()
t_7 = t_4.size()
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_8 = t_5[0]
t_9 = t_5[1]
t_10 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_8, t_9, t_10, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_10 = t_6[0]
t_9 = t_6[1]
t_8 = t_6[2]
t_6 = t_6[3]
t_6 = t_0.view(t_10, t_9, t_8, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (16, 64))
t_8 = t_7[0]
t_9 = t_7[1]
t_10 = t_7[2]
t_7 = t_7[3]
t_7 = t_4.view(t_8, t_9, t_10, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_6 = t_6.transpose((- 1), (- 2))
t_6 = torch.matmul(t_5, t_6)
t_5 = math.sqrt(64)
t_5 = (t_6 / t_5)
t_5 = (t_5 + t_2)
t_5 = self.l_116(t_5)
t_5 = self.l_117(t_5)
t_7 = torch.matmul(t_5, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_7 = t_7.contiguous()
t_5 = t_7.size()
t_5 = t_5[slice(None, (- 2), None)]
t_5 = (t_5 + (1024,))
t_6 = t_5[0]
t_10 = t_5[1]
t_5 = t_5[2]
t_5 = t_7.view(t_6, t_10, t_5)
t_5 = self.l_118(t_5)
t_5 = self.l_119(t_5)
t_1 = (t_5 + t_1)
t_1 = self.l_120(t_1)
t_5 = self.l_121(t_1)
t_5 = torch.nn.functional.gelu(t_5)
t_5 = self.l_122(t_5)
t_5 = self.l_123(t_5)
t_1 = (t_5 + t_1)
t_1 = self.l_124(t_1)
t_5 = self.l_125(t_1)
t_10 = self.l_126(t_1)
t_6 = self.l_127(t_1)
t_7 = t_5.size()
t_9 = t_10.size()
t_8 = t_6.size()
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (16, 64))
t_4 = t_7[0]
t_0 = t_7[1]
t_3 = t_7[2]
t_7 = t_7[3]
t_7 = t_5.view(t_4, t_0, t_3, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (16, 64))
t_3 = t_9[0]
t_0 = t_9[1]
t_4 = t_9[2]
t_9 = t_9[3]
t_9 = t_10.view(t_3, t_0, t_4, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_4 = t_8[0]
t_0 = t_8[1]
t_3 = t_8[2]
t_8 = t_8[3]
t_8 = t_6.view(t_4, t_0, t_3, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_9 = t_9.transpose((- 1), (- 2))
t_9 = torch.matmul(t_7, t_9)
t_7 = math.sqrt(64)
t_7 = (t_9 / t_7)
t_7 = (t_7 + t_2)
t_7 = self.l_128(t_7)
t_7 = self.l_129(t_7)
t_8 = torch.matmul(t_7, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_8 = t_8.contiguous()
t_7 = t_8.size()
t_7 = t_7[slice(None, (- 2), None)]
t_7 = (t_7 + (1024,))
t_9 = t_7[0]
t_3 = t_7[1]
t_7 = t_7[2]
t_7 = t_8.view(t_9, t_3, t_7)
t_7 = self.l_130(t_7)
t_7 = self.l_131(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_132(t_1)
t_7 = self.l_133(t_1)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_134(t_7)
t_7 = self.l_135(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_136(t_1)
t_7 = self.l_137(t_1)
t_3 = self.l_138(t_1)
t_9 = self.l_139(t_1)
t_8 = t_7.size()
t_0 = t_3.size()
t_4 = t_9.size()
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_6 = t_8[0]
t_10 = t_8[1]
t_5 = t_8[2]
t_8 = t_8[3]
t_8 = t_7.view(t_6, t_10, t_5, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_5 = t_0[0]
t_10 = t_0[1]
t_6 = t_0[2]
t_0 = t_0[3]
t_0 = t_3.view(t_5, t_10, t_6, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_6 = t_4[0]
t_10 = t_4[1]
t_5 = t_4[2]
t_4 = t_4[3]
t_4 = t_9.view(t_6, t_10, t_5, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_0 = t_0.transpose((- 1), (- 2))
t_0 = torch.matmul(t_8, t_0)
t_8 = math.sqrt(64)
t_8 = (t_0 / t_8)
t_8 = (t_8 + t_2)
t_8 = self.l_140(t_8)
t_8 = self.l_141(t_8)
t_4 = torch.matmul(t_8, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_4 = t_4.contiguous()
t_8 = t_4.size()
t_8 = t_8[slice(None, (- 2), None)]
t_8 = (t_8 + (1024,))
t_0 = t_8[0]
t_5 = t_8[1]
t_8 = t_8[2]
t_8 = t_4.view(t_0, t_5, t_8)
t_8 = self.l_142(t_8)
t_8 = self.l_143(t_8)
t_1 = (t_8 + t_1)
t_1 = self.l_144(t_1)
t_8 = self.l_145(t_1)
t_8 = torch.nn.functional.gelu(t_8)
return list(flatten((t_2, t_1, t_8)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
class UCBVI(abc.ABC):
def __init__(self, mdp, n_episodes=1, init_state=None, reg_factor=1.0, confidence_scaling_factor=(- 1.0), delta=0.05, train_every=1, throttle=int(100.0)):
self.mdp = mdp
self.n_episodes = n_episodes
self.init_state = init_state
self.reg_factor = reg_factor
if (confidence_scaling_factor == (- 1.0)):
confidence_scaling_factor = mdp.noise_std
self.confidence_scaling_factor = confidence_scaling_factor
self.delta = delta
self.train_every = train_every
self.throttle = throttle
self.reset()
def reset_upper_confidence_bounds(self):
self.Q_hat = np.concatenate((np.empty((self.mdp.H, self.mdp.n_states, self.mdp.n_actions)), np.zeros((1, self.mdp.n_states, self.mdp.n_actions))))
self.exploration_bonus = np.empty((self.mdp.H, self.mdp.n_states, self.mdp.n_actions))
self.upper_confidence_bounds = np.ones((self.mdp.H, self.mdp.n_states, self.mdp.n_actions))
def reset_regrets(self):
self.regrets = np.empty(self.n_episodes)
(self.V_star, self.pi_star) = self.mdp.optimal_policy()
def reset_policy(self):
self.policy = np.empty((self.mdp.H, self.mdp.n_states)).astype('int')
def reset_state_action_reward_buffer(self):
self.buffer_states = np.empty((self.mdp.H + 1)).astype('int')
self.buffer_actions = np.empty(self.mdp.H).astype('int')
self.buffer_rewards = np.empty(self.mdp.H)
if (self.init_state is not None):
self.state = self.init_state
else:
self.state = np.random.choice(self.mdp.states)
def reset_A_inv(self):
self.A_inv = np.array([(np.eye(self.approximator_dim) / self.reg_factor) for _ in range(self.mdp.H)]).reshape(self.mdp.H, self.approximator_dim, self.approximator_dim)
def reset_grad_approx(self):
self.grad_approx = np.zeros((self.mdp.n_states, self.mdp.n_actions, self.approximator_dim))
def take_action(self):
self.policy[self.mdp.iteration] = np.argmax(self.upper_confidence_bounds[self.mdp.iteration], axis=1).astype('int')
self.action = self.policy[(self.mdp.iteration, self.state)]
def reset(self):
pass
def approximator_dim(self):
pass
def confidence_multiplier(self):
pass
def update_output_gradient(self):
pass
def train(self):
pass
def predict(self):
pass
def update_confidence_bounds(self):
self.update_output_gradient()
self.exploration_bonus[self.mdp.iteration] = (np.array([np.sqrt(((self.grad_approx[(s, a)].T self.A_inv[self.mdp.iteration]) self.grad_approx[(s, a)])) for (s, a) in itertools.product(self.mdp.states, self.mdp.actions)]).reshape(self.mdp.n_states, self.mdp.n_actions) * self.confidence_multiplier)
self.predict()
self.upper_confidence_bounds[self.mdp.iteration] = np.clip((self.Q_hat[self.mdp.iteration] + self.exploration_bonus[self.mdp.iteration]), None, self.mdp.H)
def update_A_inv(self):
self.A_inv[self.mdp.iteration] = inv_sherman_morrison(self.grad_approx[(self.state, self.action)], self.A_inv[self.mdp.iteration])
def run(self):
postfix = {'total regret': 0.0}
with tqdm(total=self.n_episodes, postfix=postfix) as pbar:
for k in range(self.n_episodes):
self.mdp.reset_iteration('backward')
for h in reversed(range(self.mdp.H)):
if (k > 0):
self.action = self.buffer_actions[h]
self.reward = self.buffer_rewards[h]
self.state = self.buffer_states[h]
self.update_A_inv()
if ((k % self.train_every) == 0):
self.train()
self.update_confidence_bounds()
self.mdp.iteration -= 1
self.mdp.reset_iteration('forward')
self.state = self.init_state
self.buffer_states[0] = self.state
for h in range(self.mdp.H):
self.take_action()
self.reward = self.mdp.rewards[(h, self.state, self.action)]
self.state = self.mdp.new_state(self.state, self.action)
self.buffer_actions[h] = self.action
self.buffer_rewards[h] = self.reward
self.buffer_states[(h + 1)] = self.state
self.mdp.iteration += 1
V = self.mdp.evaluate_policy(self.policy)
self.regrets[k] = (self.V_star[(0, self.init_state)] - V[(0, self.init_state)])
postfix['total regret'] += self.regrets[k]
if ((k % self.throttle) == 0):
pbar.set_postfix(postfix)
pbar.update(self.throttle) |
def get_kitchen_benchmark_goals():
object_goal_vals = {'bottom_burner': [(- 0.88), (- 0.01)], 'light_switch': [(- 0.69), (- 0.05)], 'slide_cabinet': [0.37], 'hinge_cabinet': [0.0, 0.5], 'microwave': [(- 0.5)], 'kettle': [(- 0.23), 0.75, 1.62]}
object_goal_idxs = {'bottom_burner': [9, 10], 'light_switch': [17, 18], 'slide_cabinet': [19], 'hinge_cabinet': [20, 21], 'microwave': [22], 'kettle': [23, 24, 25]}
base_task_names = ['bottom_burner', 'light_switch', 'slide_cabinet', 'hinge_cabinet', 'microwave', 'kettle']
goal_configs = []
for i in range(6):
goal_configs.append([base_task_names[i]])
for (i, j) in combinations([1, 2, 3, 5], 2):
goal_configs.append([base_task_names[i], base_task_names[j]])
obs_element_goals = []
obs_element_indices = []
for objects in goal_configs:
_goal = np.concatenate([object_goal_vals[obj] for obj in objects])
_goal_idxs = np.concatenate([object_goal_idxs[obj] for obj in objects])
obs_element_goals.append(_goal)
obs_element_indices.append(_goal_idxs)
return (obs_element_goals, obs_element_indices, goal_configs) |
def PreActResNet18(num_channels=3):
return PreActResNet(PreActBlock, [2, 2, 2, 2], num_channels=num_channels) |
def line_search(f, x0, dx, g0, alpha, condition, max_steps=10, c1=0.1):
assert (0 < alpha < 1)
f0 = f(x0)
for _ in range(max_steps):
x = (x0 + dx)
if ((f(x) > (f0 + ((c1 * g0.T) dx))) and condition(x)):
return x
dx *= alpha
print('Line search failed, returning x0')
return x0 |
class R1_mAP(Metric):
def __init__(self, num_query, max_rank=50, feat_norm='yes'):
super(R1_mAP, self).__init__()
self.num_query = num_query
self.max_rank = max_rank
self.feat_norm = feat_norm
def reset(self):
self.feats = []
self.pids = []
self.camids = []
def update(self, output):
(feat, pid, camid) = output
self.feats.append(feat)
self.pids.extend(np.asarray(pid))
self.camids.extend(np.asarray(camid))
def compute(self):
feats = torch.cat(self.feats, dim=0)
if (self.feat_norm == 'yes'):
print('The test feature is normalized')
feats = torch.nn.functional.normalize(feats, dim=1, p=2)
qf = feats[:self.num_query]
q_pids = np.asarray(self.pids[:self.num_query])
q_camids = np.asarray(self.camids[:self.num_query])
gf = feats[self.num_query:]
g_pids = np.asarray(self.pids[self.num_query:])
g_camids = np.asarray(self.camids[self.num_query:])
(m, n) = (qf.shape[0], gf.shape[0])
distmat = (torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t())
distmat.addmm_(qf, gf.t(), beta=1, alpha=(- 2))
distmat = distmat.cpu().numpy()
(cmc, mAP) = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)
return (cmc, mAP) |
class NovelViewSynthesizeModel(object):
def __init__(self, output_dir):
self.output_dir = mkdir(output_dir)
self.si_out_dir = self.output_dir
self.num_preds_si = 0
def imitate(self, src_infos: Dict[(str, Any)], ref_infos: Dict[(str, Any)]) -> List[str]:
raise NotImplementedError
def build_model(self):
raise NotImplementedError
def terminate(self):
raise NotImplementedError
def personalization(self, src_infos):
processed_src_infos = src_infos
return processed_src_infos |
class BinaryMorphology2D():
param_names = ['shape', 'footprint', 'radius', 'decomposition']
params = [((512, 512),), ('square', 'diamond', 'octagon', 'disk', 'ellipse', 'star'), (1, 3, 5, 15, 25, 40), (None, 'sequence', 'separable', 'crosses')]
def setup(self, shape, footprint, radius, decomposition):
rng = np.random.default_rng(123)
self.image = (rng.standard_normal(shape) < 3.5)
fp_func = getattr(morphology, footprint)
allow_sequence = ('rectangle', 'square', 'diamond', 'octagon', 'disk')
allow_separable = ('rectangle', 'square')
allow_crosses = ('disk', 'ellipse')
allow_decomp = tuple(((set(allow_sequence) | set(allow_separable)) | set(allow_crosses)))
footprint_kwargs = {}
if ((decomposition == 'sequence') and (footprint not in allow_sequence)):
raise NotImplementedError('decomposition unimplemented')
elif ((decomposition == 'separable') and (footprint not in allow_separable)):
raise NotImplementedError('separable decomposition unavailable')
elif ((decomposition == 'crosses') and (footprint not in allow_crosses)):
raise NotImplementedError('separable decomposition unavailable')
if (footprint in allow_decomp):
footprint_kwargs['decomposition'] = decomposition
if (footprint in ['rectangle', 'square']):
size = ((2 * radius) + 1)
self.footprint = fp_func(size, **footprint_kwargs)
elif (footprint in ['diamond', 'disk']):
self.footprint = fp_func(radius, **footprint_kwargs)
elif (footprint == 'star'):
a = max(((2 * radius) // 3), 1)
self.footprint = fp_func(a, **footprint_kwargs)
elif (footprint == 'octagon'):
m = n = max(((2 * radius) // 3), 1)
self.footprint = fp_func(m, n, **footprint_kwargs)
elif (footprint == 'ellipse'):
if (radius > 1):
self.footprint = fp_func((radius - 1), (radius + 1), **footprint_kwargs)
else:
self.footprint = fp_func(radius, radius, **footprint_kwargs)
def time_erosion(self, shape, footprint, radius, *args):
morphology.binary_erosion(self.image, self.footprint) |
def get_args():
parser = argparse.ArgumentParser()
train_inten.add_inten_train_args(parser)
nn_utils.add_hyperopt_args(parser)
return parser.parse_args() |
class Cipher(Element):
def __init__(self, parent, key):
Element.__init__(self, parent)
self._key = key
def __eq__(self, right):
return ((type(self) is type(right)) and (self.parent() == right.parent()) and (self._key == right._key))
def _repr_(self):
return ('Cipher on %s' % self.parent().cipher_domain())
def key(self):
return self._key
def domain(self):
return self.parent().cipher_domain()
def codomain(self):
return self.parent().cipher_codomain() |
def test_data_dependency_5():
module_block = BasicBlock([Instr('LOAD_BUILD_CLASS'), Instr('LOAD_CONST', arg=dummy_code_object), Instr('LOAD_CONST', arg='Foo'), Instr('MAKE_FUNCTION', arg=0), Instr('LOAD_CONST', arg='Foo'), Instr('CALL_FUNCTION', arg=2), Instr('STORE_NAME', arg='Foo'), Instr('LOAD_GLOBAL', arg='Foo'), Instr('CALL_FUNCTION', arg=0), Instr('STORE_FAST', arg='ob'), Instr('LOAD_CONST', arg=1), Instr('LOAD_FAST', arg='ob'), Instr('STORE_ATTR', arg='attr1'), Instr('LOAD_FAST', arg='ob'), Instr('STORE_FAST', arg='result'), Instr('LOAD_FAST', arg='result'), Instr('RETURN_VALUE')])
class_attr_block = BasicBlock([Instr('LOAD_CONST', arg=None), Instr('RETURN_VALUE')])
expected_instructions = []
expected_instructions.extend(module_block)
expected_instructions.extend(class_attr_block)
module = 'tests.fixtures.slicer.partial_cover_dependency'
sliced_instructions = slice_module_at_return(module)
assert (len(sliced_instructions) == len(expected_instructions))
assert compare(sliced_instructions, expected_instructions) |
class InputFeatures_eval(object):
def __init__(self, input_ids, input_mask, segment_ids, label_id, label_disf_id, label_sing_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.label_disf_id = label_disf_id
self.label_sing_id = label_sing_id |
def decode_pose(root_score, root_id, root_image_coord, scores, offsets, output_stride, displacements_fwd, displacements_bwd):
num_parts = scores.shape[2]
num_edges = len(PARENT_CHILD_TUPLES)
instance_keypoint_scores = np.zeros(num_parts)
instance_keypoint_coords = np.zeros((num_parts, 2))
instance_keypoint_scores[root_id] = root_score
instance_keypoint_coords[root_id] = root_image_coord
for edge in reversed(range(num_edges)):
(target_keypoint_id, source_keypoint_id) = PARENT_CHILD_TUPLES[edge]
if ((instance_keypoint_scores[source_keypoint_id] > 0.0) and (instance_keypoint_scores[target_keypoint_id] == 0.0)):
(score, coords) = traverse_to_targ_keypoint(edge, instance_keypoint_coords[source_keypoint_id], target_keypoint_id, scores, offsets, output_stride, displacements_bwd)
instance_keypoint_scores[target_keypoint_id] = score
instance_keypoint_coords[target_keypoint_id] = coords
for edge in range(num_edges):
(source_keypoint_id, target_keypoint_id) = PARENT_CHILD_TUPLES[edge]
if ((instance_keypoint_scores[source_keypoint_id] > 0.0) and (instance_keypoint_scores[target_keypoint_id] == 0.0)):
(score, coords) = traverse_to_targ_keypoint(edge, instance_keypoint_coords[source_keypoint_id], target_keypoint_id, scores, offsets, output_stride, displacements_fwd)
instance_keypoint_scores[target_keypoint_id] = score
instance_keypoint_coords[target_keypoint_id] = coords
return (instance_keypoint_scores, instance_keypoint_coords) |
def xla_available() -> bool:
try:
return (find_spec('torch_xla') is not None)
except ModuleNotFoundError:
return False |
class Extractor(ModelBase):
def __init__(self, config: ExtractorConfig):
super().__init__(config)
def _get_full_embedded(self, batch: Batch):
embedded = []
if hasattr(self, 'ohots'):
ohots_embedded = [self.ohots[f](batch.ohots[f]) for f in self.ohots]
embedded.extend(ohots_embedded)
if hasattr(self, 'mhots'):
mhots_embedded = [self.mhots[f](batch.mhots[f]) for f in self.mhots]
embedded.extend(mhots_embedded)
if hasattr(self, 'nested_ohots'):
nested_ohots_embedded = [self.nested_ohots[f](**batch.nested_ohots[f], seq_lens=batch.seq_lens) for f in self.nested_ohots]
embedded.extend(nested_ohots_embedded)
return torch.cat(embedded, dim=(- 1))
def _get_full_hidden(self, batch: Batch):
full_hidden = []
if any([hasattr(self, name) for name in ExtractorConfig._embedder_names]):
embedded = self._get_full_embedded(batch)
if hasattr(self, 'intermediate1'):
full_hidden.append(self.intermediate1(embedded, batch.mask))
else:
full_hidden.append(embedded)
for name in ExtractorConfig._pretrained_names:
if hasattr(self, name):
full_hidden.append(getattr(self, name)(**getattr(batch, name)))
full_hidden = torch.cat(full_hidden, dim=(- 1))
if hasattr(self, 'intermediate2'):
return self.intermediate2(full_hidden, batch.mask)
else:
return full_hidden
def pretrained_parameters(self):
params = []
if hasattr(self, 'elmo'):
params.extend(self.elmo.elmo._elmo_lstm.parameters())
if hasattr(self, 'bert_like'):
params.extend(self.bert_like.bert_like.parameters())
if hasattr(self, 'flair_fw'):
params.extend(self.flair_fw.flair_lm.parameters())
if hasattr(self, 'flair_bw'):
params.extend(self.flair_bw.flair_lm.parameters())
return params
def forward2states(self, batch: Batch):
return {'full_hidden': self._get_full_hidden(batch)} |
class MBv3LatencyTable(LatencyTable):
def query(self, l_type: str, input_shape, output_shape, mid=None, ks=None, stride=None, id_skip=None, se=None, h_swish=None):
infos = [l_type, ('input:%s' % self.repr_shape(input_shape)), ('output:%s' % self.repr_shape(output_shape))]
if (l_type in ('expanded_conv',)):
assert (None not in (mid, ks, stride, id_skip, se, h_swish))
infos += [('expand:%d' % mid), ('kernel:%d' % ks), ('stride:%d' % stride), ('idskip:%d' % id_skip), ('se:%d' % se), ('hs:%d' % h_swish)]
key = '-'.join(infos)
return self.lut[key]['mean']
def predict_network_latency(self, net, image_size=224):
predicted_latency = 0
predicted_latency += self.query('Conv', [image_size, image_size, 3], [((image_size + 1) // 2), ((image_size + 1) // 2), net.first_conv.out_channels])
fsize = ((image_size + 1) // 2)
for block in net.blocks:
mb_conv = block.conv
shortcut = block.shortcut
if (mb_conv is None):
continue
if (shortcut is None):
idskip = 0
else:
idskip = 1
out_fz = int((((fsize - 1) / mb_conv.stride) + 1))
block_latency = self.query('expanded_conv', [fsize, fsize, mb_conv.in_channels], [out_fz, out_fz, mb_conv.out_channels], mid=mb_conv.depth_conv.conv.in_channels, ks=mb_conv.kernel_size, stride=mb_conv.stride, id_skip=idskip, se=(1 if mb_conv.use_se else 0), h_swish=(1 if (mb_conv.act_func == 'h_swish') else 0))
predicted_latency += block_latency
fsize = out_fz
predicted_latency += self.query('Conv_1', [fsize, fsize, net.final_expand_layer.in_channels], [fsize, fsize, net.final_expand_layer.out_channels])
predicted_latency += self.query('AvgPool2D', [fsize, fsize, net.final_expand_layer.out_channels], [1, 1, net.final_expand_layer.out_channels])
predicted_latency += self.query('Conv_2', [1, 1, net.feature_mix_layer.in_channels], [1, 1, net.feature_mix_layer.out_channels])
predicted_latency += self.query('Logits', [1, 1, net.classifier.in_features], [net.classifier.out_features])
return predicted_latency
def predict_network_latency_given_config(self, net_config, image_size=224):
predicted_latency = 0
predicted_latency += self.query('Conv', [image_size, image_size, 3], [((image_size + 1) // 2), ((image_size + 1) // 2), net_config['first_conv']['out_channels']])
fsize = ((image_size + 1) // 2)
for block in net_config['blocks']:
mb_conv = (block['mobile_inverted_conv'] if ('mobile_inverted_conv' in block) else block['conv'])
shortcut = block['shortcut']
if (mb_conv is None):
continue
if (shortcut is None):
idskip = 0
else:
idskip = 1
out_fz = int((((fsize - 1) / mb_conv['stride']) + 1))
if (mb_conv['mid_channels'] is None):
mb_conv['mid_channels'] = round((mb_conv['in_channels'] * mb_conv['expand_ratio']))
block_latency = self.query('expanded_conv', [fsize, fsize, mb_conv['in_channels']], [out_fz, out_fz, mb_conv['out_channels']], mid=mb_conv['mid_channels'], ks=mb_conv['kernel_size'], stride=mb_conv['stride'], id_skip=idskip, se=(1 if mb_conv['use_se'] else 0), h_swish=(1 if (mb_conv['act_func'] == 'h_swish') else 0))
predicted_latency += block_latency
fsize = out_fz
predicted_latency += self.query('Conv_1', [fsize, fsize, net_config['final_expand_layer']['in_channels']], [fsize, fsize, net_config['final_expand_layer']['out_channels']])
predicted_latency += self.query('AvgPool2D', [fsize, fsize, net_config['final_expand_layer']['out_channels']], [1, 1, net_config['final_expand_layer']['out_channels']])
predicted_latency += self.query('Conv_2', [1, 1, net_config['feature_mix_layer']['in_channels']], [1, 1, net_config['feature_mix_layer']['out_channels']])
predicted_latency += self.query('Logits', [1, 1, net_config['classifier']['in_features']], [net_config['classifier']['out_features']])
return predicted_latency
def count_flops_given_config(net_config, image_size=224):
flops = 0
flops += count_conv_flop(((image_size + 1) // 2), 3, net_config['first_conv']['out_channels'], 3, 1)
fsize = ((image_size + 1) // 2)
for block in net_config['blocks']:
mb_conv = (block['mobile_inverted_conv'] if ('mobile_inverted_conv' in block) else block['conv'])
if (mb_conv is None):
continue
out_fz = int((((fsize - 1) / mb_conv['stride']) + 1))
if (mb_conv['mid_channels'] is None):
mb_conv['mid_channels'] = round((mb_conv['in_channels'] * mb_conv['expand_ratio']))
if (mb_conv['expand_ratio'] != 1):
flops += count_conv_flop(fsize, mb_conv['in_channels'], mb_conv['mid_channels'], 1, 1)
flops += count_conv_flop(out_fz, mb_conv['mid_channels'], mb_conv['mid_channels'], mb_conv['kernel_size'], mb_conv['mid_channels'])
if mb_conv['use_se']:
se_mid = make_divisible((mb_conv['mid_channels'] // 4), divisor=MyNetwork.CHANNEL_DIVISIBLE)
flops += count_conv_flop(1, mb_conv['mid_channels'], se_mid, 1, 1)
flops += count_conv_flop(1, se_mid, mb_conv['mid_channels'], 1, 1)
flops += count_conv_flop(out_fz, mb_conv['mid_channels'], mb_conv['out_channels'], 1, 1)
fsize = out_fz
flops += count_conv_flop(fsize, net_config['final_expand_layer']['in_channels'], net_config['final_expand_layer']['out_channels'], 1, 1)
flops += count_conv_flop(1, net_config['feature_mix_layer']['in_channels'], net_config['feature_mix_layer']['out_channels'], 1, 1)
flops += count_conv_flop(1, net_config['classifier']['in_features'], net_config['classifier']['out_features'], 1, 1)
return (flops / 1000000.0) |
class AutoUpliftTX(BaseAutoUplift):
__MAP_META_TO_STAGES__: Dict[(str, List[MetaLearnerStage])] = {'TLearner': [MetaLearnerStage(name='outcome_control'), MetaLearnerStage(name='outcome_treatment')], 'XLearner': [MetaLearnerStage(name='outcome_control'), MetaLearnerStage(name='outcome_treatment'), MetaLearnerStage(name='propensity', params={'task': Task('binary')}), MetaLearnerStage(name='effect_control', params={'task': Task('reg')}, prev_stage=MetaLearnerStage(name='outcome_treatment')), MetaLearnerStage(name='effect_treatment', params={'task': Task('reg')}, prev_stage=MetaLearnerStage(name='outcome_control'))]}
def __init__(self, base_task: Task, baselearners: Optional[Union[(List[BaseLearnerWrapper], Dict[(MLStageFullName, List[BaseLearnerWrapper])])]]=None, metalearners: List[str]=[], metric: Union[(str, TUpliftMetric, Callable)]='adj_qini', increasing_metric: bool=True, test_size: float=0.2, timeout: Optional[int]=None, timeout_single_learner: Optional[int]=None, cpu_limit: int=4, gpu_ids: Optional[str]='all', random_state: int=42):
assert all(((ml in self.__MAP_META_TO_STAGES__) for ml in metalearners)), 'Currently available for {}.'.format(self.__MAP_META_TO_STAGES__)
super().__init__(base_task, metric, True, increasing_metric, test_size, timeout, None, timeout_single_learner, cpu_limit, gpu_ids, random_state)
self.baselearners = baselearners
self.metalearners = (metalearners if (len(metalearners) > 0) else list(self.__MAP_META_TO_STAGES__))
self._best_metalearner: MetaLearner
self._best_metalearner_wrap: MetaLearnerWrapper
self._trained_stage_baselearners: Dict[(MLStageFullName, List[TrainedStageBaseLearner])] = defaultdict(list)
self._metalearner_metrics: Dict[(TrainedMetaLearnerFullName, float)] = {}
self._n_run_l2 = 3
def fit(self, data: DataFrame, roles: dict, verbose: int=0):
(train_data, test_data, test_treatment, test_target) = self._prepare_data(data, roles)
self._timer.start()
for stage_info in self._generate_stage_baselearner_candidates():
self._evaluate(stage_info, train_data, test_data, roles, verbose)
if self._timer.time_limit_exceeded():
logger.warning("Time of training exceeds 'timeout': {} > {}.".format(self._timer.time_spent, self.timeout))
break
self._calculate_metalearners_metrics(test_treatment, test_target)
self._set_best_metalearner()
def predict(self, data: DataFrame) -> Tuple[(np.ndarray, ...)]:
assert (self._best_metalearner is not None), "First call 'self.fit(...)', to choose best metalearner."
return self._best_metalearner.predict(data)
def create_best_metalearner(self, need_report: bool=True, update_metalearner_params: Dict[(str, Any)]={}, update_baselearner_params: Dict[(str, Any)]={}) -> Union[(MetaLearner, ReportDecoUplift)]:
assert (len(self._trained_stage_baselearners) > 0), "First call 'self.fit(...), to choose best metalearner."
ml_wrap = deepcopy(self._best_metalearner_wrap)
if (len(update_metalearner_params) > 0):
ml_wrap.update_params(update_metalearner_params)
if (len(update_baselearner_params) > 0):
ml_wrap.update_baselearner_params(update_baselearner_params)
best_metalearner_raw = ml_wrap()
if need_report:
if isinstance(self.metric, str):
rdu = ReportDecoUplift()
best_metalearner_raw = rdu(best_metalearner_raw)
else:
logger.warning("Report doesn't work with custom metric, return just best_metalearner.")
return best_metalearner_raw
def get_metalearners_ranting(self) -> DataFrame:
(metalearner_names, params, metrics) = ([], [], [])
for (ml_name, metric) in self._metalearner_metrics.items():
metalearner_names.append(ml_name[0])
params.append(ml_name[1])
metrics.append(metric)
rating_table = DataFrame({'MetaLearner': metalearner_names, 'Parameters': params, 'Metrics': metrics})
rating_table['Rank'] = rating_table['Metrics'].rank(method='first', ascending=(not self.increasing_metric))
rating_table.sort_values('Rank', inplace=True)
rating_table.reset_index(drop=True, inplace=True)
return rating_table
def _generate_stage_baselearner_candidates(self) -> Generator[(Tuple[(Tuple[(MetaLearnerStage, BaseLearnerWrapper)], ...)], None, None)]:
stage_baselearners = self._set_stage_baselearners()
stage_by_levels = defaultdict(list)
for stage in stage_baselearners:
stage_by_levels[len(stage)].append(stage)
pool_iter_levels = defaultdict(list)
bls_level_1 = zip_longest(*(deepcopy(bls) for (stage, bls) in stage_baselearners.items() if (stage in stage_by_levels[1])))
first_run = True
n_runs = 0
for bls in bls_level_1:
for (stage_name_l1, bl_l1) in zip(stage_by_levels[1], bls):
stage_l1 = self._extract_stage(stage_name_l1)
for stage_name_l2 in stage_by_levels[2]:
if (stage_name_l2[0:1] == stage_name_l1):
pool_iter_levels[2].append((stage_name_l2, bl_l1, iter(deepcopy(stage_baselearners[stage_name_l2]))))
n_runs += 1
(yield ((stage_l1, bl_l1),))
if (not first_run):
n_stage_iters = len(pool_iter_levels[2])
if (n_stage_iters == 0):
continue
for _ in range(min(n_stage_iters, self._n_run_l2)):
n_stage_iters = len(pool_iter_levels[2])
idx = np.random.randint(0, n_stage_iters, 1)[0]
try:
(stage_name_l2, bl_l1, bls_iter) = pool_iter_levels[2][idx]
bl_l2 = next(bls_iter)
stage_l1 = self._extract_stage(stage_name_l2[0:1])
stage_l2 = self._extract_stage(stage_name_l2)
n_runs += 1
(yield ((stage_l1, bl_l1), (stage_l2, bl_l2)))
except StopIteration:
pool_iter_levels[2].pop(idx)
if (n_runs >= len(stage_by_levels[1])):
first_run = False
while (len(pool_iter_levels[2]) > 0):
n_stage_iters = len(pool_iter_levels[2])
try:
idx = np.random.randint(0, n_stage_iters, 1)[0]
(stage_name_l2, bl_l1, bls_iter) = pool_iter_levels[2][idx]
bl_l2 = next(bls_iter)
stage_l1 = self._extract_stage(stage_name_l2[0:1])
stage_l2 = self._extract_stage(stage_name_l2)
(yield ((stage_l1, bl_l1), (stage_l2, bl_l2)))
except StopIteration:
pool_iter_levels[2].pop(idx)
def _extract_stages(self) -> Generator[(Tuple[(str, MetaLearnerStage)], None, None)]:
for (ml_name, ml_stages) in self.__MAP_META_TO_STAGES__.items():
for stage in ml_stages:
(yield (ml_name, stage))
def _extract_stage(self, full_name: MLStageFullName) -> MetaLearnerStage:
for (ml_name, stage) in self._extract_stages():
if (stage.full_name() == full_name):
return stage
raise Exception("Can't find stage {}".format(full_name))
def _set_stage_baselearners(self) -> Dict[(MLStageFullName, List[BaseLearnerWrapper])]:
stage_baselearners = {}
if isinstance(self.baselearners, dict):
stage_baselearners = {k: deepcopy(self._validate_baselearners(v)) for (k, v) in self.baselearners.items()}
all_stages_full_names = set((stage.full_name() for (ml, ml_stages) in self.__MAP_META_TO_STAGES__.items() for stage in ml_stages))
if (len(stage_baselearners) != len(all_stages_full_names)):
timeout = self._calculate_single_bl_timeout((len(stage_baselearners) > 0))
if (isinstance(self.baselearners, list) and (len(self.baselearners) > 0)):
baselearners = deepcopy(self._validate_baselearners(self.baselearners))
elif ((self.baselearners is None) or isinstance(self.baselearners, dict)):
baselearners = deepcopy(self.__default_learners(tab_params={'timeout': timeout}))
remain_stages_full_names = (all_stages_full_names - set(stage_baselearners))
bin_baselearners: List[BaseLearnerWrapper] = []
reg_baselearners: List[BaseLearnerWrapper] = []
raw_baselearners: List[BaseLearnerWrapper] = []
for bl in baselearners:
if ('task' in bl.params):
if (bl.params['task'].name == 'binary'):
bin_baselearners.append(bl)
if (bl.params['task'].name == 'reg'):
reg_baselearners.append(bl)
else:
raw_baselearners.append(bl)
for full_name in remain_stages_full_names:
stage = self._extract_stage(full_name)
stage_task = (stage.params['task'] if ('task' in stage.params) else self.base_task)
filled_baselearners = deepcopy(raw_baselearners)
for idx in range(len(filled_baselearners)):
filled_baselearners[idx].params['task'] = stage_task
baselearners_on_stage = filled_baselearners
if (stage_task.name == 'binary'):
baselearners_on_stage.extend(bin_baselearners)
elif (stage_task.name == 'reg'):
baselearners_on_stage.extend(reg_baselearners)
stage_baselearners[full_name] = baselearners_on_stage
return stage_baselearners
def _calculate_single_bl_timeout(self, specify_stages: bool) -> Optional[int]:
timeout: Optional[int] = None
if (not specify_stages):
if (self.timeout is not None):
timeout = int((self.timeout / ((2 * ('TLearner' in self.metalearners)) + (5 * ('XLearner' in self.metalearners)))))
elif (self.timeout_single_learner is not None):
timeout = self.timeout_single_learner
else:
timeout = self.timeout_single_learner
return timeout
def _validate_baselearners(self, baselearners: List[BaseLearnerWrapper]):
k2n: Dict[(str, List[int])] = {}
for (idx, bl) in enumerate(baselearners):
bl_name = bl.name
k2n.setdefault(bl_name, [])
k2n[bl_name].append(idx)
is_name_duplicates = any(((len(idxs) > 1) for (bl_name, idxs) in k2n.items()))
if is_name_duplicates:
logger.warning('Naming of baselearner should be unique.')
logger.warning("Name of baselearner would be updated with postfix '__order_idx_bl__'.")
renaming_by_idxs: Dict[(int, str)] = {}
for (bl_name, idxs) in k2n.items():
if (len(idxs) > 1):
for idx in idxs:
renaming_by_idxs[idx] = '{}__#{}__'.format(bl_name, idx)
baselearners_t = []
for (idx, bl) in enumerate(baselearners):
if (idx in renaming_by_idxs):
bl.name = renaming_by_idxs[idx]
baselearners_t.append(bl)
return baselearners_t
else:
return baselearners
def _evaluate(self, stage_info: Tuple[(Tuple[(MetaLearnerStage, BaseLearnerWrapper)], ...)], train: DataFrame, test: DataFrame, roles: dict, verbose: int=0):
(train_data, train_roles) = self._prepare_data_for_stage(stage_info, train, roles)
(ml_stage, bl_wrap) = stage_info[(- 1)]
(prev_stage, prev_bl_wrap) = (None, None)
if (len(stage_info) == 2):
(prev_stage, prev_bl_wrap) = stage_info[0]
bl = bl_wrap()
bl.fit_predict(train_data, train_roles, verbose)
test_pred = bl.predict(test).data.ravel()
tsbl = TrainedStageBaseLearner(stage_bl=bl_wrap, prev_stage_bl=prev_bl_wrap, trained_model=bl, pred=test_pred)
self._trained_stage_baselearners[ml_stage.full_name()].append(tsbl)
def _prepare_data_for_stage(self, stage_info: Tuple[(Tuple[(MetaLearnerStage, BaseLearnerWrapper)], ...)], train: DataFrame, roles: dict) -> Tuple[(DataFrame, Dict)]:
(treatment_role, treatment_col) = uplift_utils._get_treatment_role(roles)
(target_role, target_col) = uplift_utils._get_target_role(roles)
stage_name = stage_info[(- 1)][0].name
if (len(stage_info) == 1):
if (stage_name == 'propensity'):
train_roles = deepcopy(roles)
train_roles.pop(treatment_role)
train_roles.pop(target_role)
train_roles['target'] = treatment_col
train_data = train.drop(target_col, axis=1)
elif (stage_name == 'outcome_control'):
train_roles = deepcopy(roles)
train_roles.pop(treatment_role)
train_data = train[(train[treatment_col] == 0)].drop(treatment_col, axis=1)
elif (stage_name == 'outcome_treatment'):
train_roles = deepcopy(roles)
train_roles.pop(treatment_role)
train_data = train[(train[treatment_col] == 1)].drop(treatment_col, axis=1)
else:
raise Exception('Wrong l1 stage name')
elif (len(stage_info) == 2):
train_roles = deepcopy(roles)
train_roles.pop(treatment_role)
(prev_ml_stage, prev_bl_wrap) = stage_info[0]
prev_stage_bl = [bl.trained_model for bl in self._trained_stage_baselearners[prev_ml_stage.full_name()] if (bl.stage_bl.name == prev_bl_wrap.name)][0]
if (stage_name == 'effect_control'):
train_data = train[(train[treatment_col] == 0)].drop(treatment_col, axis=1)
opposite_gr_pred = prev_stage_bl.predict(train_data).data.ravel()
train_data[target_col] = (opposite_gr_pred - train_data[target_col])
elif (stage_name == 'effect_treatment'):
train_data = train[(train[treatment_col] == 1)].drop(treatment_col, axis=1)
opposite_gr_pred = prev_stage_bl.predict(train_data).data.ravel()
train_data[target_col] = (train_data[target_col] - opposite_gr_pred)
else:
raise Exception('Wrong l2 stage name')
return (train_data, train_roles)
def _calculate_metalearners_metrics(self, test_target: np.ndarray, test_treatment: np.ndarray):
for set_ml_stage_bls in self._bl_for_ml():
for (ml_name, stage_bls) in set_ml_stage_bls.items():
sbls = tuple(sorted([(stage_name, bl.stage_bl.name) for (stage_name, bl) in stage_bls.items()]))
trained_ml_full_name = (ml_name, sbls)
uplift_pred = self._metalearner_predict(ml_name, stage_bls)
metric_value = self.calculate_metric(test_target, uplift_pred, test_treatment)
self._metalearner_metrics[trained_ml_full_name] = metric_value
def _bl_for_ml(self) -> Generator[(Dict[(str, Dict[(MLStageFullName, TrainedStageBaseLearner)])], None, None)]:
ready_metalearners = []
for (ml_name, ml_stages) in self.__MAP_META_TO_STAGES__.items():
ready_metalearners.append(all(((s.full_name() in self._trained_stage_baselearners) for s in ml_stages)))
if (not any(ready_metalearners)):
raise Exception('No one metalearner can predict.')
stage_baselearners = {}
for (stage_fullname, bls) in self._trained_stage_baselearners.items():
stage_baselearners[stage_fullname] = bls
stage_names = list(stage_baselearners.keys())
bls_prd = product(*(bls for (_, bls) in stage_baselearners.items()))
for bl in bls_prd:
set_bls = dict(zip(stage_names, bl))
set_ml_with_sbls = {}
for (ml_name, ml_stages) in self.__MAP_META_TO_STAGES__.items():
ml_bls = {}
for ml_stage in ml_stages:
ml_stage_full_name = ml_stage.full_name()
if (ml_stage_full_name in set_bls):
trained_sbl = set_bls[ml_stage_full_name]
if (trained_sbl.prev_stage_bl is None):
ml_bls[ml_stage_full_name] = trained_sbl
else:
if (not (ml_stage_full_name[0:1] in set_bls)):
continue
if (trained_sbl.prev_stage_bl.name == set_bls[ml_stage_full_name[0:1]].stage_bl.name):
ml_bls[ml_stage_full_name] = set_bls[ml_stage_full_name]
if (len(ml_bls) == len(ml_stages)):
set_ml_with_sbls[ml_name] = ml_bls
(yield set_ml_with_sbls)
def _metalearner_predict(self, metalearner: str, baselearners: Dict[(MLStageFullName, TrainedStageBaseLearner)]) -> np.ndarray:
if (metalearner == 'TLearner'):
control_pred = baselearners[('outcome_control',)].pred
treatment_pred = baselearners[('outcome_treatment',)].pred
uplift_pred = (treatment_pred - control_pred)
elif (metalearner == 'XLearner'):
control_pred = baselearners[('outcome_treatment', 'effect_control')].pred
treatment_pred = baselearners[('outcome_control', 'effect_treatment')].pred
propensity_pred = baselearners[('propensity',)].pred
uplift_pred = ((propensity_pred * treatment_pred) + ((1 - propensity_pred) * control_pred))
else:
raise Exception()
return uplift_pred.ravel()
def _set_best_metalearner(self):
best_metric_value = None
best_candidate = None
for (k, v) in self._metalearner_metrics.items():
if (best_metric_value is None):
best_candidate = k
best_metric_value = v
elif ((self.increasing_metric and (best_metric_value < v)) or ((not self.increasing_metric) and (best_metric_value > v))):
best_candidate = k
best_metric_value = v
(ml_name, stages_params) = best_candidate
stages_params = dict(stages_params)
self._best_metalearner = self._init_metalearner(ml_name, stages_params)
self._best_metalearner_wrap = self._create_metalearner_wrap(ml_name, stages_params)
def _init_metalearner(self, metalearner_name: str, bls: Dict[(MLStageFullName, str)]) -> MetaLearner:
ml: Optional[MetaLearner] = None
if (metalearner_name == 'TLearner'):
ocl = self._get_trained_bl(('outcome_control',), bls[('outcome_control',)]).trained_model
otl = self._get_trained_bl(('outcome_treatment',), bls[('outcome_treatment',)]).trained_model
ml = TLearner(control_learner=ocl, treatment_learner=otl)
elif (metalearner_name == 'XLearner'):
ocl = self._get_trained_bl(('outcome_control',), bls[('outcome_control',)]).trained_model
otl = self._get_trained_bl(('outcome_treatment',), bls[('outcome_treatment',)]).trained_model
pl = self._get_trained_bl(('propensity',), bls[('propensity',)]).trained_model
ecl = self._get_trained_bl(('outcome_treatment', 'effect_control'), bls[('outcome_treatment', 'effect_control')]).trained_model
etl = self._get_trained_bl(('outcome_control', 'effect_treatment'), bls[('outcome_control', 'effect_treatment')]).trained_model
ml = XLearner(outcome_learners=[ocl, otl], effect_learners=[ecl, etl], propensity_learner=pl)
else:
raise Exception()
return ml
def _get_trained_bl(self, metalearner_stage: MLStageFullName, baselearner_name: str):
for bl in self._trained_stage_baselearners[metalearner_stage]:
if (bl.stage_bl.name == baselearner_name):
return bl
raise Exception("There isn't baselearner {}".format(baselearner_name))
def _create_metalearner_wrap(self, metalearner_name: str, bls: Dict[(MLStageFullName, str)]) -> MetaLearnerWrapper:
ml_wrap_name = '__ML__{ML}'.format(ML=metalearner_name)
ml_wrap: Optional[MetaLearnerWrapper] = None
if (metalearner_name == 'TLearner'):
ocl = self._get_trained_bl('outcome_control', bls[('outcome_control',)]).stage_bl
otl = self._get_trained_bl(('outcome_treatment',), bls[('outcome_treatment',)]).stage_bl
ml_wrap = MetaLearnerWrapper(name=ml_wrap_name, klass=TLearner, params={'control_learner': ocl, 'treatment_learner': otl})
elif (metalearner_name == 'XLearner'):
ocl = self._get_trained_bl(('outcome_control',), bls[('outcome_control',)]).stage_bl
otl = self._get_trained_bl(('outcome_treatment',), bls[('outcome_treatment',)]).stage_bl
pl = self._get_trained_bl(('propensity',), bls[('propensity',)]).stage_bl
ecl = self._get_trained_bl(('outcome_treatment', 'effect_control'), bls[('outcome_treatment', 'effect_control')]).stage_bl
etl = self._get_trained_bl(('outcome_control', 'effect_treatment'), bls[('outcome_control', 'effect_treatment')]).stage_bl
ml_wrap = MetaLearnerWrapper(name=ml_wrap_name, klass=XLearner, params={'outcome_learners': [ocl, otl], 'effect_learners': [ecl, etl], 'propensity_learner': pl})
else:
raise Exception()
return ml_wrap
def __default_learners(self, lin_params: Optional[Dict[(str, Any)]]=None, tab_params: Optional[Dict[(str, Any)]]=None) -> List[BaseLearnerWrapper]:
default_lin_params = {'cpu_limit': self.cpu_limit}
default_tab_params = {'timeout': None, 'cpu_limit': self.cpu_limit, 'gpu_ids': self.gpu_ids}
return [BaseLearnerWrapper(name='__Linear__', klass=uplift_utils.create_linear_automl, params=(default_lin_params if (lin_params is None) else lin_params)), BaseLearnerWrapper(name='__Tabular__', klass=TabularAutoML, params=(default_tab_params if (tab_params is None) else tab_params))] |
class AutoContrast(DauphinTransform):
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return (ImageOps.autocontrast(pil_img), label) |
class CAtlas():
def __init__(self, cdbg_directory, catlas_directory, load_domfile=True, load_sizefile=False, min_abund=0.0):
self.cdbg_dir = cdbg_directory
self.name = catlas_directory
self.parent = {}
self.children = defaultdict(set)
self.levels = {}
self._cdbg_to_catlas = {}
catlas_file = os.path.join(catlas_directory, 'catlas.csv')
self.__load_catlas(catlas_file)
if load_domfile:
domfile = os.path.join(catlas_directory, 'first_doms.txt')
self.__load_first_level(domfile)
if (load_sizefile is not None):
sizefile = os.path.join(cdbg_directory, 'contigs.info.csv')
self.__load_size_info(sizefile, min_abund)
def __load_catlas(self, catlas_file):
self.max_level = (- 1)
self.root = (- 1)
fp = open(catlas_file, 'rt')
for line in fp:
(node_id, cdbg_id, level, children) = line.strip().split(',')
node_id = int(node_id)
children = children.strip()
if children:
children = children.split(' ')
children = set(map(int, children))
self.children[node_id] = children
for child in children:
self.parent[child] = node_id
level = int(level)
self.levels[node_id] = level
if (level > self.max_level):
self.max_level = level
self.root = node_id
if (level == 1):
self._cdbg_to_catlas[int(cdbg_id)] = node_id
fp.close()
def __load_first_level(self, domfile):
self.layer1_to_cdbg = {}
self.cdbg_to_layer1 = {}
fp = open(domfile, 'rt')
for line in fp:
(dom_node, *beneath) = line.strip().split(' ')
dom_node = int(dom_node)
beneath = set(map(int, beneath))
equiv_cdbg_to_catlas = self._cdbg_to_catlas[dom_node]
self.layer1_to_cdbg[equiv_cdbg_to_catlas] = beneath
for cdbg_id in beneath:
self.cdbg_to_layer1[cdbg_id] = equiv_cdbg_to_catlas
fp.close()
def __load_size_info(self, sizefile, min_abund):
cdbg_sizes = defaultdict(int)
weighted_cdbg_sizes = defaultdict(float)
with open(sizefile, 'rt') as fp:
reader = csv.DictReader(fp)
for row in reader:
contig_id = int(row['contig_id'])
n_kmers = int(row['n_kmers'])
mean_abund = float(row['mean_abund'])
if ((not min_abund) or (mean_abund >= min_abund)):
cdbg_sizes[contig_id] = n_kmers
weighted_cdbg_sizes[contig_id] = (mean_abund * n_kmers)
self.cdbg_sizes = cdbg_sizes
self.weighted_cdbg_sizes = weighted_cdbg_sizes
self.kmer_sizes = {}
self.weighted_kmer_sizes = {}
for node_id in self:
level = self.levels[node_id]
if (level == 1):
total_kmers = 0
total_weighted_kmers = 0
for cdbg_node in self.layer1_to_cdbg.get(node_id):
total_kmers += cdbg_sizes[cdbg_node]
total_weighted_kmers += weighted_cdbg_sizes[cdbg_node]
self.kmer_sizes[node_id] = total_kmers
self.weighted_kmer_sizes[node_id] = total_weighted_kmers
else:
sub_size = 0
sub_weighted_size = 0
for child_id in self.children[node_id]:
sub_size += self.kmer_sizes[child_id]
sub_weighted_size += self.weighted_kmer_sizes[child_id]
self.kmer_sizes[node_id] = sub_size
self.weighted_kmer_sizes[node_id] = sub_weighted_size
def __iter__(self):
Q = [self.root]
pos = 0
while (pos < len(self.levels)):
v = Q[pos]
Q.extend(self.children[v])
pos += 1
for v in reversed(Q):
(yield v)
def __len__(self):
return len(self.parent)
def decorate_with_shadow_sizes(self):
self.shadow_sizes = {}
for node_id in self:
level = self.levels[node_id]
if (level == 1):
self.shadow_sizes[node_id] = len(self.layer1_to_cdbg[node_id])
else:
sub_size = 0
for child_id in self.children[node_id]:
sub_size += self.shadow_sizes[child_id]
self.shadow_sizes[node_id] = sub_size
def decorate_with_index_sizes(self, index):
self.index_sizes = index.build_catlas_node_sizes(self)
def leaves(self, nodes: List[int]=None) -> Set[int]:
if (nodes is None):
nodes = [self.root]
leaves = set()
seen_nodes = set()
def add_to_shadow(node_id: int):
if (node_id in seen_nodes):
return
seen_nodes.add(node_id)
children_ids = self.children[node_id]
if (len(children_ids) == 0):
leaves.add(node_id)
else:
for child in children_ids:
add_to_shadow(child)
for node in nodes:
add_to_shadow(node)
return leaves
def shadow(self, nodes: List[int]) -> Set[int]:
leaves = self.leaves(nodes)
shadow = set()
for leaf in leaves:
shadow.update(self.layer1_to_cdbg[leaf])
return shadow |
class MultiEnvWrapper(gym.Wrapper):
def __init__(self, envs, sample_strategy=uniform_random_strategy):
self._sample_strategy = sample_strategy
self._num_tasks = len(envs)
self._active_task_index = None
self._observation_space = None
super().__init__(envs[0])
self._task_envs = []
for env in envs:
if (env.observation_space.shape != self.env.observation_space.shape):
raise ValueError('Observation space of all envs should be same.')
if (env.action_space.shape != self.env.action_space.shape):
raise ValueError('Action space of all envs should be same.')
self._task_envs.append(env)
self.env.spec.observation_space = self.observation_space
def num_tasks(self):
return len(self._task_envs)
def task_space(self):
one_hot_ub = np.ones(self.num_tasks)
one_hot_lb = np.zeros(self.num_tasks)
return akro.Box(one_hot_lb, one_hot_ub)
def active_task_index(self):
return self._active_task_index
def observation_space(self):
(task_lb, task_ub) = self.task_space.bounds
(env_lb, env_ub) = self._observation_space.bounds
return akro.Box(np.concatenate([task_lb, env_lb]), np.concatenate([task_ub, env_ub]))
_space.setter
def observation_space(self, observation_space):
self._observation_space = observation_space
def active_task_one_hot(self):
one_hot = np.zeros(self.task_space.shape)
index = (self.active_task_index or 0)
one_hot[index] = self.task_space.high[index]
return one_hot
def reset(self, **kwargs):
self._active_task_index = self._sample_strategy(self._num_tasks, self._active_task_index)
self.env = self._task_envs[self._active_task_index]
obs = self.env.reset(**kwargs)
oh_obs = self._obs_with_one_hot(obs)
return oh_obs
def step(self, action):
(obs, reward, done, info) = self.env.step(action)
oh_obs = self._obs_with_one_hot(obs)
info['task_id'] = self._active_task_index
return (oh_obs, reward, done, info)
def close(self):
for env in self._task_envs:
env.close()
def _obs_with_one_hot(self, obs):
oh_obs = np.concatenate([self.active_task_one_hot, obs])
return oh_obs |
class foldnorm_gen(rv_continuous):
def _argcheck(self, c):
return (c >= 0)
def _shape_info(self):
return [_ShapeInfo('c', False, (0, np.inf), (True, False))]
def _rvs(self, c, size=None, random_state=None):
return abs((random_state.standard_normal(size) + c))
def _pdf(self, x, c):
return (_norm_pdf((x + c)) + _norm_pdf((x - c)))
def _cdf(self, x, c):
sqrt_two = np.sqrt(2)
return (0.5 * (sc.erf(((x - c) / sqrt_two)) + sc.erf(((x + c) / sqrt_two))))
def _sf(self, x, c):
return (_norm_sf((x - c)) + _norm_sf((x + c)))
def _stats(self, c):
c2 = (c * c)
expfac = (np.exp(((- 0.5) * c2)) / np.sqrt((2.0 * np.pi)))
mu = ((2.0 * expfac) + (c * sc.erf((c / np.sqrt(2)))))
mu2 = ((c2 + 1) - (mu * mu))
g1 = (2.0 * ((((mu * mu) * mu) - (c2 * mu)) - expfac))
g1 /= np.power(mu2, 1.5)
g2 = (((c2 * (c2 + 6.0)) + 3) + ((8.0 * expfac) * mu))
g2 += (((2.0 * (c2 - 3.0)) - (3.0 * (mu ** 2))) * (mu ** 2))
g2 = ((g2 / (mu2 ** 2.0)) - 3.0)
return (mu, mu2, g1, g2) |
_REGISTRY.register()
class PartialiLIDS(ImageDataset):
dataset_name = 'partialilids'
def __init__(self, root='datasets'):
self.root = root
self.query_dir = osp.join(self.root, 'PartialiLIDS/query')
self.gallery_dir = osp.join(self.root, 'PartialiLIDS/gallery')
(query, gallery) = process_test(self.query_dir, self.gallery_dir)
ImageDataset.__init__(self, [], query, gallery) |
class EvaluatorConfig(metaclass=AutodocABCMeta):
_timedelta_keys = ['train_window', 'retrain_freq', 'cadence']
def __init__(self, train_window: float=None, retrain_freq: float=None, cadence: float=None):
self.train_window = train_window
self.retrain_freq = retrain_freq
self.cadence = cadence
def train_window(self) -> Union[(pd.Timedelta, pd.DateOffset, None)]:
return self._train_window
_window.setter
def train_window(self, train_window):
self._train_window = to_offset(train_window)
def retrain_freq(self) -> Union[(pd.Timedelta, pd.DateOffset, None)]:
return self._retrain_freq
_freq.setter
def retrain_freq(self, retrain_freq):
self._retrain_freq = to_offset(retrain_freq)
def cadence(self) -> Union[(pd.Timedelta, pd.DateOffset)]:
if (self._cadence is None):
return self.retrain_freq
return self._cadence
def cadence(self, cadence):
self._cadence = to_offset(cadence)
def horizon(self) -> pd.DateOffset:
return self.cadence
def to_dict(self):
config_dict = {}
for (key, value) in self.__dict__.items():
k_strip = key.lstrip('_')
if ((k_strip in self._timedelta_keys) and (value is not None)):
config_dict[k_strip] = ((value.microseconds / 1000000.0) if isinstance(value, pd.Timedelta) else value.freqstr)
else:
config_dict[k_strip] = value
return config_dict |
def get_joint_slot_correctness(preds, class_types, label_maps, key_class_label_id='class_label_id', key_class_prediction='class_prediction', key_start_pos='start_pos', key_start_prediction='start_prediction', key_end_pos='end_pos', key_end_prediction='end_prediction', key_refer_id='refer_id', key_refer_prediction='refer_prediction', key_slot_groundtruth='slot_groundtruth', key_slot_prediction='slot_prediction'):
for pred in preds:
guid = pred['guid']
turn_gt_class = pred[key_class_label_id]
turn_pd_class = pred[key_class_prediction]
gt_start_pos = pred[key_start_pos]
pd_start_pos = pred[key_start_prediction]
gt_end_pos = pred[key_end_pos]
pd_end_pos = pred[key_end_prediction]
gt_refer = pred[key_refer_id]
pd_refer = pred[key_refer_prediction]
gt_slot = pred[key_slot_groundtruth]
pd_slot = pred[key_slot_prediction]
gt_slot = tokenize(gt_slot)
pd_slot = tokenize(pd_slot)
joint_gt_slot = gt_slot
if (guid[(- 1)] == '0'):
joint_pd_slot = 'none'
if (turn_pd_class == class_types.index('none')):
pass
elif (turn_pd_class == class_types.index('dontcare')):
joint_pd_slot = 'dontcare'
elif (turn_pd_class == class_types.index('copy_value')):
joint_pd_slot = pd_slot
elif (('true' in class_types) and (turn_pd_class == class_types.index('true'))):
joint_pd_slot = 'true'
elif (('false' in class_types) and (turn_pd_class == class_types.index('false'))):
joint_pd_slot = 'false'
elif (('refer' in class_types) and (turn_pd_class == class_types.index('refer'))):
if (pd_slot[0:3] == ' '):
if (pd_slot[3:] != 'none'):
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[3:], label_maps)
elif (pd_slot[0:2] == ''):
if (pd_slot[2:] != 'none'):
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[2:], label_maps)
elif (pd_slot != 'none'):
joint_pd_slot = pd_slot
elif (('inform' in class_types) and (turn_pd_class == class_types.index('inform'))):
if (pd_slot[0:3] == ' '):
if (pd_slot[3:] != 'none'):
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[3:], label_maps)
elif (pd_slot[0:2] == ''):
if (pd_slot[2:] != 'none'):
joint_pd_slot = check_slot_inform(joint_gt_slot, pd_slot[2:], label_maps)
else:
print('ERROR: Unexpected slot value format. Aborting.')
exit()
else:
print('ERROR: Unexpected class_type. Aborting.')
exit()
if (joint_gt_slot == joint_pd_slot):
pred_flag = 1.0
elif ((joint_gt_slot != 'none') and (joint_gt_slot != 'dontcare') and (joint_gt_slot != 'true') and (joint_gt_slot != 'false') and (joint_gt_slot in label_maps)):
no_match = True
for variant in label_maps[joint_gt_slot]:
if (variant == joint_pd_slot):
no_match = False
pred_flag = 1.0
break
if no_match:
pred_flag = 0.0
else:
pred_flag = 0.0
return (pred_flag, joint_pd_slot, joint_gt_slot) |
class RawData(TypedDict):
label: ThreeLabels
supporting_sentences: list[list[int]]
claim: str
evidence: list[str]
meta: dict |
class local_mem(Structure):
_fields_ = [('raw_ptr', POINTER(ctypes.c_char)), ('mem_arr', POINTER(POINTER(ctypes.c_uint32))), ('count', ctypes.c_int32), ('size_per_mem', ctypes.c_int32), ('align_num', ctypes.c_int32), ('need_free', ctypes.c_int32)] |
class GeneralEdgeAttConvv1(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GeneralEdgeAttConvv1, self).__init__()
self.model = GeneralEdgeAttConvv1Layer(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.node_feature = self.model(batch.node_feature, batch.edge_index, edge_feature=batch.edge_feature)
return batch |
def main(unused_argv):
def _is_valid_num_shards(num_shards):
return ((num_shards < FLAGS.num_threads) or (not (num_shards % FLAGS.num_threads)))
assert _is_valid_num_shards(FLAGS.train_shards), 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards'
assert _is_valid_num_shards(FLAGS.val_shards), 'Please make the FLAGS.num_threads commensurate with FLAGS.val_shards'
assert _is_valid_num_shards(FLAGS.test_shards), 'Please make the FLAGS.num_threads commensurate with FLAGS.test_shards'
if (not tf.gfile.IsDirectory(FLAGS.output_dir)):
tf.gfile.MakeDirs(FLAGS.output_dir)
mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file, FLAGS.train_image_dir)
mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file, FLAGS.val_image_dir)
train_cutoff = int((0.85 * len(mscoco_val_dataset)))
val_cutoff = int((0.9 * len(mscoco_val_dataset)))
train_dataset = (mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff])
val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff]
test_dataset = mscoco_val_dataset[val_cutoff:]
train_captions = [c for image in train_dataset for c in image.captions]
vocab = _create_vocab(train_captions)
_process_dataset('train', train_dataset, vocab, FLAGS.train_shards)
_process_dataset('val', val_dataset, vocab, FLAGS.val_shards)
_process_dataset('test', test_dataset, vocab, FLAGS.test_shards) |
def dconv_flops_counter_hook(dconv_module, input, output):
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
(m_channels, in_channels, kernel_dim1, _) = dconv_module.weight.shape
(out_channels, _, kernel_dim2, _) = dconv_module.projection.shape
conv_per_position_flops1 = (((kernel_dim1 ** 2) * in_channels) * m_channels)
conv_per_position_flops2 = (((kernel_dim2 ** 2) * out_channels) * m_channels)
active_elements_count = (batch_size * np.prod(output_dims))
overall_conv_flops = ((conv_per_position_flops1 + conv_per_position_flops2) * active_elements_count)
overall_flops = overall_conv_flops
dconv_module.__flops__ += int(overall_flops) |
class TypecastNode(ExprNode):
subexprs = ['operand']
base_type = declarator = type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if (self.type is None):
base_type = self.base_type.analyse(env)
(_, self.type) = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if (self.type is None):
base_type = self.base_type.analyse(env)
(_, self.type) = self.declarator.analyse(base_type, env)
if self.operand.has_constant_result():
self.calculate_constant_result()
if self.type.is_cfunction:
error(self.pos, 'Cannot cast to a function type')
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
if (self.type is PyrexTypes.c_bint_type):
return self.operand.coerce_to_boolean(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if (from_py and (not to_py) and self.operand.is_ephemeral()):
if ((not self.type.is_numeric) and (not self.type.is_cpp_class)):
error(self.pos, 'Casting temporary Python object to non-numeric non-Python type')
if (to_py and (not from_py)):
if ((self.type is bytes_type) and self.operand.type.is_int):
return CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
self.operand = self.operand.coerce_to(self.type, env)
else:
if self.operand.type.is_ptr:
if (not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct)):
error(self.pos, 'Python objects cannot be cast from pointers of primitive types')
else:
warning(self.pos, ('No conversion from %s to %s, python object pointer used.' % (self.operand.type, self.type)))
self.operand = self.operand.coerce_to_simple(env)
elif (from_py and (not to_py)):
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if (not (self.type.base_type.is_void or self.type.base_type.is_struct)):
error(self.pos, 'Python objects cannot be cast to pointers of primitive types')
else:
warning(self.pos, ('No conversion from %s to %s, python object pointer used.' % (self.type, self.operand.type)))
elif (from_py and to_py):
if self.typecheck:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
self.operand = self.operand.coerce_to(self.type, env)
elif (self.type.is_complex and self.operand.type.is_complex):
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
if (self.type.is_ptr and self.type.base_type.is_cfunction and self.type.base_type.nogil):
op_type = self.operand.type
if op_type.is_ptr:
op_type = op_type.base_type
if (op_type.is_cfunction and (not op_type.nogil)):
warning(self.pos, 'Casting a GIL-requiring function into a nogil function circumvents GIL validation', 1)
return self
def is_simple(self):
return self.operand.is_simple()
def is_ephemeral(self):
return self.operand.is_ephemeral()
def nonlocally_immutable(self):
return (self.is_temp or self.operand.nonlocally_immutable())
def nogil_check(self, env):
if (self.type and self.type.is_pyobject and self.is_temp):
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
self.constant_result = self.calculate_result_code(self.operand.constant_result)
def calculate_result_code(self, operand_result=None):
if (operand_result is None):
operand_result = self.operand.result()
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code(('__Pyx_CREAL(%s)' % operand_result))
imag_part = self.type.real_type.cast_code(('__Pyx_CIMAG(%s)' % operand_result))
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = '0'
return ('%s(%s, %s)' % (self.type.from_parts, real_part, imag_part))
else:
return self.type.cast_code(operand_result)
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if (self.type.is_pyobject and (not self.is_temp)):
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
code.putln(('%s = (PyObject *)%s;' % (self.result(), self.operand.result())))
code.put_incref(self.result(), self.ctype()) |
def set_working_device(device_name: str):
device_manager = DeviceManager()
device_manager.set_device(device_name) |
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if (annotation.__module__ in ('builtins', '__builtin__', base_module)):
return annotation.__name__
return ((annotation.__module__ + '.') + annotation.__name__)
return repr(annotation) |
def build_model():
g = tf.Graph()
with g.as_default(), tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
(inputs, labels) = imagenet_input(is_training=True)
with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)):
(logits, _) = mobilenet_v1.mobilenet_v1(inputs, is_training=True, depth_multiplier=FLAGS.depth_multiplier, num_classes=FLAGS.num_classes)
tf.losses.softmax_cross_entropy(labels, logits)
if FLAGS.quantize:
tf.contrib.quantize.create_training_graph(quant_delay=get_quant_delay())
total_loss = tf.losses.get_total_loss(name='total_loss')
num_epochs_per_decay = 2.5
imagenet_size = 1271167
decay_steps = int(((imagenet_size / FLAGS.batch_size) * num_epochs_per_decay))
learning_rate = tf.train.exponential_decay(get_learning_rate(), tf.train.get_or_create_global_step(), decay_steps, _LEARNING_RATE_DECAY_FACTOR, staircase=True)
opt = tf.train.GradientDescentOptimizer(learning_rate)
train_tensor = slim.learning.create_train_op(total_loss, optimizer=opt)
slim.summaries.add_scalar_summary(total_loss, 'total_loss', 'losses')
slim.summaries.add_scalar_summary(learning_rate, 'learning_rate', 'training')
return (g, train_tensor) |
def dstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, workers=None, orthogonalize=None):
return _execute(_pocketfft.dstn, x, type, s, axes, norm, overwrite_x, workers, orthogonalize) |
def load_conv2d(state_dict, name_pth, name_tf):
h5f = h5py.File((('dump/InceptionV4/' + name_tf) + '.h5'), 'r')
state_dict[(name_pth + '.conv.weight')] = torch.from_numpy(h5f['weights'][()]).permute(3, 2, 0, 1)
out_planes = state_dict[(name_pth + '.conv.weight')].size(0)
state_dict[(name_pth + '.bn.weight')] = torch.ones(out_planes)
state_dict[(name_pth + '.bn.bias')] = torch.from_numpy(h5f['beta'][()])
state_dict[(name_pth + '.bn.running_mean')] = torch.from_numpy(h5f['mean'][()])
state_dict[(name_pth + '.bn.running_var')] = torch.from_numpy(h5f['var'][()])
h5f.close() |
def capture_time():
start = time.perf_counter()
done = False
def fn():
if done:
return (end - start)
else:
return (time.perf_counter() - start)
(yield fn)
end = time.time() |
_module()
class SRREDSMultipleGTDataset(BaseSRDataset):
def __init__(self, lq_folder, gt_folder, num_input_frames, pipeline, scale, val_partition='official', repeat=1, test_mode=False):
self.repeat = repeat
if (not isinstance(repeat, int)):
raise TypeError(f'"repeat" must be an integer, but got {type(repeat)}.')
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.num_input_frames = num_input_frames
self.val_partition = val_partition
self.data_infos = self.load_annotations()
def load_annotations(self):
keys = [f'{i:03d}' for i in range(0, 270)]
if (self.val_partition == 'REDS4'):
val_partition = ['000', '011', '015', '020']
elif (self.val_partition == 'official'):
val_partition = [f'{i:03d}' for i in range(240, 270)]
else:
raise ValueError(f'Wrong validation partition {self.val_partition}.Supported ones are ["official", "REDS4"]')
if self.test_mode:
keys = [v for v in keys if (v in val_partition)]
keys *= self.repeat
else:
keys = [v for v in keys if (v not in val_partition)]
data_infos = []
for key in keys:
data_infos.append(dict(lq_path=self.lq_folder, gt_path=self.gt_folder, key=key, sequence_length=100, num_input_frames=self.num_input_frames))
return data_infos |
class IndicatorMin(OptimizationFunction):
def __init__(self, objective: OptimizationFunction, beta: float=0, power: float=2):
super().__init__(objective)
self.obj = objective
self.beta = beta
self.power = power
def eval(self, input_vals: List[np.ndarray]) -> np.ndarray:
return ((input_vals[0] < self.beta) * (abs((self.beta - input_vals[0])) ** self.power))
def grad(self, input_vals: List[np.ndarray], grad_val: np.ndarray) -> List[np.ndarray]:
beta_diff = (self.beta - input_vals[0])
graph_beta = (((((input_vals[0] < self.beta) * self.power) * (abs(beta_diff) ** (self.power - 1))) * np.sign(beta_diff)) * (- 1))
return [(grad_val * graph_beta)]
def __str__(self):
return 'I_min({0}-{1})**{2}'.format(self.obj, self.beta, self.power) |
class Inferencer(ABC):
def load_model(self, path: (str | Path)) -> Any:
raise NotImplementedError
def pre_process(self, image: np.ndarray) -> (np.ndarray | Tensor):
raise NotImplementedError
def forward(self, image: (np.ndarray | Tensor)) -> (np.ndarray | Tensor):
raise NotImplementedError
def post_process(self, predictions: (np.ndarray | Tensor), metadata: (dict[(str, Any)] | None)) -> dict[(str, Any)]:
raise NotImplementedError
def predict(self, image: ((str | Path) | np.ndarray), metadata: (dict[(str, Any)] | None)=None) -> ImageResult:
if (metadata is None):
if hasattr(self, 'metadata'):
metadata = getattr(self, 'metadata')
else:
metadata = {}
if isinstance(image, (str, Path)):
image_arr: np.ndarray = read_image(image)
else:
image_arr = image
metadata['image_shape'] = image_arr.shape[:2]
processed_image = self.pre_process(image_arr)
predictions = self.forward(processed_image)
output = self.post_process(predictions, metadata=metadata)
return ImageResult(image=image_arr, pred_score=output['pred_score'], pred_label=output['pred_label'], anomaly_map=output['anomaly_map'], pred_mask=output['pred_mask'], pred_boxes=output['pred_boxes'], box_labels=output['box_labels'])
def _superimpose_segmentation_mask(metadata: dict, anomaly_map: np.ndarray, image: np.ndarray) -> np.ndarray:
pred_mask = compute_mask(anomaly_map, 0.5)
image_height = metadata['image_shape'][0]
image_width = metadata['image_shape'][1]
pred_mask = cv2.resize(pred_mask, (image_width, image_height))
boundaries = find_boundaries(pred_mask)
outlines = dilation(boundaries, np.ones((7, 7)))
image[outlines] = [255, 0, 0]
return image
def __call__(self, image: np.ndarray) -> ImageResult:
return self.predict(image)
def _normalize(pred_scores: (Tensor | np.float32), metadata: (dict | DictConfig), anomaly_maps: ((Tensor | np.ndarray) | None)=None) -> tuple[(((np.ndarray | Tensor) | None), float)]:
if (('min' in metadata) and ('max' in metadata)):
if (anomaly_maps is not None):
anomaly_maps = normalize_min_max(anomaly_maps, metadata['pixel_threshold'], metadata['min'], metadata['max'])
pred_scores = normalize_min_max(pred_scores, metadata['image_threshold'], metadata['min'], metadata['max'])
if (('pixel_mean' in metadata.keys()) and ('pixel_std' in metadata.keys())):
if (anomaly_maps is not None):
anomaly_maps = standardize(anomaly_maps, metadata['pixel_mean'], metadata['pixel_std'], center_at=metadata['image_mean'])
anomaly_maps = normalize_cdf(anomaly_maps, metadata['pixel_threshold'])
if (('image_mean' in metadata.keys()) and ('image_std' in metadata.keys())):
pred_scores = standardize(pred_scores, metadata['image_mean'], metadata['image_std'])
pred_scores = normalize_cdf(pred_scores, metadata['image_threshold'])
return (anomaly_maps, float(pred_scores))
def _load_metadata(self, path: ((str | Path) | None)=None) -> (dict | DictConfig):
metadata: (dict[(str, ((float | np.ndarray) | Tensor))] | DictConfig) = {}
if (path is not None):
config = OmegaConf.load(path)
metadata = cast(DictConfig, config)
return metadata |
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.contiguous().view(x.size(0), (- 1)))
return x |
class BipedalWalkerExperiment(QDExperiment):
def reinit(self):
super().reinit()
self.env_name = self.config['game']['env_name']
self.init_model()
self.update_dimension()
def init_model(self):
self.model = Model(self.config['game'])
def update_dimension(self):
self.algo.dimension = self.model.param_count
def eval_fn(self, ind, render_mode=False):
env = make_env(self.env_name)
self.model.set_model_params(ind)
scores = simulate(self.model, env, render_mode=render_mode, num_episode=self.config['indv_eps'])
ind.fitness.values = (scores[self.fitness_type],)
ind.features.values = [scores[x] for x in self.features_list]
return ind |
def get_model(model_type: str, **kwargs: Union[(int, float)]) -> torch.nn.Module:
if (model_type == 'deeptime'):
model = deeptime(datetime_feats=kwargs['datetime_feats'])
else:
raise ValueError(f'Unknown model type {model_type}')
return model |
def add_model_args(parser):
group = parser.add_argument_group('Model configuration')
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', default='fconv', metavar='ARCH', required=True, choices=ARCH_MODEL_REGISTRY.keys(), help='Model Architecture')
return group |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.