code stringlengths 101 5.91M |
|---|
class Cifar100():
def __init__(self):
with open('cifar100/train', 'rb') as f:
self.train = pickle.load(f, encoding='latin1')
with open('cifar100/test', 'rb') as f:
self.test = pickle.load(f, encoding='latin1')
self.train_data = self.train['data']
self.train_labels = self.train['fine_labels']
self.test_data = self.test['data']
self.test_labels = self.test['fine_labels']
(self.train_groups, self.val_groups, self.test_groups) = self.initialize()
self.batch_num = 5
def initialize(self):
train_groups = [[], [], [], [], []]
for (train_data, train_label) in zip(self.train_data, self.train_labels):
train_data_r = train_data[:1024].reshape(32, 32)
train_data_g = train_data[1024:2048].reshape(32, 32)
train_data_b = train_data[2048:].reshape(32, 32)
train_data = np.dstack((train_data_r, train_data_g, train_data_b))
if (train_label < 20):
train_groups[0].append((train_data, train_label))
elif (20 <= train_label < 40):
train_groups[1].append((train_data, train_label))
elif (40 <= train_label < 60):
train_groups[2].append((train_data, train_label))
elif (60 <= train_label < 80):
train_groups[3].append((train_data, train_label))
elif (80 <= train_label < 100):
train_groups[4].append((train_data, train_label))
assert (len(train_groups[0]) == 10000), len(train_groups[0])
assert (len(train_groups[1]) == 10000), len(train_groups[1])
assert (len(train_groups[2]) == 10000), len(train_groups[2])
assert (len(train_groups[3]) == 10000), len(train_groups[3])
assert (len(train_groups[4]) == 10000), len(train_groups[4])
val_groups = [[], [], [], [], []]
for (i, train_group) in enumerate(train_groups):
val_groups[i] = train_groups[i][9000:]
train_groups[i] = train_groups[i][:9000]
assert (len(train_groups[0]) == 9000)
assert (len(train_groups[1]) == 9000)
assert (len(train_groups[2]) == 9000)
assert (len(train_groups[3]) == 9000)
assert (len(train_groups[4]) == 9000)
assert (len(val_groups[0]) == 1000)
assert (len(val_groups[1]) == 1000)
assert (len(val_groups[2]) == 1000)
assert (len(val_groups[3]) == 1000)
assert (len(val_groups[4]) == 1000)
test_groups = [[], [], [], [], []]
for (test_data, test_label) in zip(self.test_data, self.test_labels):
test_data_r = test_data[:1024].reshape(32, 32)
test_data_g = test_data[1024:2048].reshape(32, 32)
test_data_b = test_data[2048:].reshape(32, 32)
test_data = np.dstack((test_data_r, test_data_g, test_data_b))
if (test_label < 20):
test_groups[0].append((test_data, test_label))
elif (20 <= test_label < 40):
test_groups[1].append((test_data, test_label))
elif (40 <= test_label < 60):
test_groups[2].append((test_data, test_label))
elif (60 <= test_label < 80):
test_groups[3].append((test_data, test_label))
elif (80 <= test_label < 100):
test_groups[4].append((test_data, test_label))
assert (len(test_groups[0]) == 2000)
assert (len(test_groups[1]) == 2000)
assert (len(test_groups[2]) == 2000)
assert (len(test_groups[3]) == 2000)
assert (len(test_groups[4]) == 2000)
return (train_groups, val_groups, test_groups)
def getNextClasses(self, i):
return (self.train_groups[i], self.val_groups[i], self.test_groups[i]) |
class EnvSpecMeta(ABCMeta):
def __new__(cls: Any, name: str, parents: Tuple, attrs: Dict) -> Any:
base = parents[0]
parents = (base, EnvSpecMixin)
config_keys = base._config_keys
check_key_duplication(name, 'config', config_keys)
config_keys: List[str] = list(map((lambda s: s.replace('.', '_')), config_keys))
defaults: Tuple = base._default_config_values
attrs['gen_config'] = namedtuple('Config', config_keys, defaults=defaults)
return super().__new__(cls, name, parents, attrs) |
def indices_values_to_sparse_tensor(indices, values, shape, return_idx=False):
indices = torch.from_numpy(indices)
values = torch.from_numpy(values)
shape = torch.Size(shape)
if (return_idx is True):
return (torch.sparse.FloatTensor(indices, values, shape), row2idx, col2idx)
else:
return torch.sparse.FloatTensor(indices, values, shape) |
()
def random_seed(request) -> int:
manual_seed = request.config.getoption('--random-seed')
if (manual_seed is not None):
return int(manual_seed)
else:
rs = np.random.RandomState()
return rs.randint(0, 1000) |
class PapiloSolver(SCIPSolver):
solverId = 'PaPILO'
recognition_expr = re.compile('starting presolve of problem')
version_expr = re.compile('PaPILO version (\\S+)')
presolving_time_expr = re.compile('presolving finished after\\s+(\\S+)')
presolving_time_inf_expr = re.compile('presolving detected infeasible problem after\\s+(\\S+)')
presolving_time_unb_expr = re.compile('presolving detected unbounded problem after\\s+(\\S+)')
presolving_time_unb_inf_expr = re.compile('presolving detected unbounded or infeasible problem after\\s+(\\S+)')
floating_point_expr = '[-+]?[0-9]*\\.?[0-9]*'
rows_expr = re.compile('\\s+rows:\\s+(\\S+)')
columns_expr = re.compile('\\s+columns:\\s+(\\S+)')
int_columns_expr = re.compile('\\s+int. columns:\\s+(\\S+)')
cont_columns_expr = re.compile('\\s+cont. columns:\\s+(\\S+)')
non_zeros_expr = re.compile('\\s+nonzeros:\\s+(\\S+)')
fast_rounds_expr = re.compile('^(.*?)Fast\\s+')
medium_rounds_expr = re.compile('^(.*?)Medium\\s+')
exhaustive_rounds_expr = re.compile('^(.*?)Exhaustive')
final_rounds_expr = re.compile('^(.*?)Final\\s+')
trivial_rounds_expr = re.compile('^(.*?)Trivial\\s+')
unchanged_rounds_expr = re.compile('^(.*?)Unchanged\\s+')
red_rows_expr = re.compile('\\s+reduced rows:\\s+(\\S+)')
red_columns_expr = re.compile('\\s+reduced columns:\\s+(\\S+)')
red_int_columns_expr = re.compile('\\s+reduced int. columns:\\s+(\\S+)')
red_cont_columns_expr = re.compile('\\s+reduced cont. columns:\\s+(\\S+)')
red_non_zeros_expr = re.compile('\\s+reduced nonzeros:\\s+(\\S+)')
presolving_rounds = re.compile('presolved\\s+(\\S+)')
columns_deleted = re.compile('presolved \\d+ rounds:\\s+(\\S+)')
rows_deleted = re.compile('presolved \\d+ rounds:\\s+\\d+ del cols,\\s+(\\S+)')
bound_changes = re.compile('presolved \\d+ rounds:\\s+\\d+ del cols,\\s+\\d+ del rows,\\s+(\\S+)')
changed_sides = re.compile('presolved \\d+ rounds:\\s+\\d+ del cols,\\s+\\d+ del rows,\\s+\\d+ chg bounds,\\s+(\\S+)')
changed_coefficients = re.compile('presolved \\d+ rounds:\\s+\\d+ del cols,\\s+\\d+ del rows,\\s+\\d+ chg bounds,\\s+\\d+ chg sides,\\s+(\\S+)')
transactions_applied = re.compile('presolved \\d+ rounds:\\s+\\d+ del cols,\\s+\\d+ del rows,\\s+\\d+ chg bounds,\\s+\\d+ chg sides,\\s+\\d+ chg coeffs,\\s+(\\S+)')
transactions_conflicts = re.compile('presolved \\d+ rounds:\\s+\\d+ del cols,\\s+\\d+ del rows,\\s+\\d+ chg bounds,\\s+\\d+ chg sides,\\s+\\d+ chg coeffs,\\s+\\d+ tsx applied,\\s+(\\S+)')
fast_rounds = 0
medium_rounds = 0
exhaustive_rounds = 0
def __init__(self, **kw):
super(PapiloSolver, self).__init__(**kw)
def reset(self):
super(PapiloSolver, self).reset()
self.addData(PRESOLVE_TIME_NAME, DEFAULT_VALUE)
self.addData(ROWS_NAME, DEFAULT_VALUE)
self.addData(COLUMNS_NAME, DEFAULT_VALUE)
self.addData(INT_COLUMNS_NAME, DEFAULT_VALUE)
self.addData(CONT_COLUMNS_NAME, DEFAULT_VALUE)
self.addData(NON_ZEROS_NAME, DEFAULT_VALUE)
self.addData(RED_ROWS_NAME, DEFAULT_VALUE)
self.addData(RED_COLUMNS_NAME, DEFAULT_VALUE)
self.addData(RED_INT_COLUMNS_NAME, DEFAULT_VALUE)
self.addData(RED_CONT_COLUMNS_NAME, DEFAULT_VALUE)
self.addData(RED_NON_ZEROS_NAME, DEFAULT_VALUE)
self.addData(PRESOLVING_ROUNDS_NAME, DEFAULT_VALUE)
self.addData(COLUMNS_DELETED_NAME, DEFAULT_VALUE)
self.addData(ROWS_DELETED_NAME, DEFAULT_VALUE)
self.addData(CHANGES_SIDES_NAME, DEFAULT_VALUE)
self.addData(CHANGED_COEFFICIENTS_NAME, DEFAULT_VALUE)
self.addData(BOUND_CHANGES_NAME, DEFAULT_VALUE)
self.addData(TSX_APPLIED_NAME, DEFAULT_VALUE)
self.addData(TSX_CONFLICTS_NAME, DEFAULT_VALUE)
fast_rounds = 0
medium_rounds = 0
exhaustive_rounds = 0
def extractPrimalboundHistory(self, line):
pass
def extractDualboundHistory(self, line):
pass
def extractHistory(self, line):
pass
def extractOptionalInformation(self, line: str):
if line.startswith('presolving finished'):
self.extractByExpression(line, self.presolving_time_expr, PRESOLVE_TIME_NAME)
elif line.startswith('presolving detected infeasible problem'):
self.extractByExpression(line, self.presolving_time_inf_expr, PRESOLVE_TIME_NAME)
elif line.startswith('presolving detected unbounded or infeasible problem'):
self.extractByExpression(line, self.presolving_time_unb_inf_expr, PRESOLVE_TIME_NAME)
elif line.startswith('presolving detected unbounded problem'):
self.extractByExpression(line, self.presolving_time_unb_expr, PRESOLVE_TIME_NAME)
self.extractByExpression(line, self.rows_expr, ROWS_NAME)
self.extractByExpression(line, self.columns_expr, COLUMNS_NAME)
self.extractByExpression(line, self.int_columns_expr, INT_COLUMNS_NAME)
self.extractByExpression(line, self.cont_columns_expr, CONT_COLUMNS_NAME)
self.extractByExpression(line, self.non_zeros_expr, NON_ZEROS_NAME)
self.extractByExpression(line, self.red_rows_expr, RED_ROWS_NAME)
self.extractByExpression(line, self.red_columns_expr, RED_COLUMNS_NAME)
self.extractByExpression(line, self.red_int_columns_expr, RED_INT_COLUMNS_NAME)
self.extractByExpression(line, self.red_cont_columns_expr, RED_CONT_COLUMNS_NAME)
self.extractByExpression(line, self.red_non_zeros_expr, RED_NON_ZEROS_NAME)
self.extractByExpression(line, self.presolving_rounds, PRESOLVING_ROUNDS_NAME)
self.extractByExpression(line, self.columns_deleted, COLUMNS_DELETED_NAME)
self.extractByExpression(line, self.rows_deleted, ROWS_DELETED_NAME)
self.extractByExpression(line, self.bound_changes, BOUND_CHANGES_NAME)
self.extractByExpression(line, self.changed_sides, CHANGES_SIDES_NAME)
self.extractByExpression(line, self.changed_coefficients, CHANGED_COEFFICIENTS_NAME)
self.extractByExpression(line, self.transactions_applied, TSX_APPLIED_NAME)
self.extractByExpression(line, self.transactions_conflicts, TSX_CONFLICTS_NAME)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_column_singleton), TIME_column_singleton)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_coeff_tightening), TIME_coeff_tightening)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_probing), TIME_probing)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_stuffing), TIME_stuffing)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_sparsify), TIME_sparsify)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_substitution), TIME_substitution)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_dual_fix), TIME_dual_fix)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_dual_infer), TIME_dual_infer)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_dom_colums), TIME_dom_colums)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_fix_continuous), TIME_fix_continuous)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_simplify_inequality), TIME_simplify_inequality)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_doubletoneq), TIME_doubletoneq)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_parallel_columns), TIME_parallel_columns)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_parallel_rows), TIME_parallel_rows)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_propagation), TIME_propagation)
self.extractByExpression(line, self.setup_time_expr_for_solver(SOLVER_simple_probing), TIME_simple_probing)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_column_singleton), TSX_RATE_column_singleton)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_coeff_tightening), TSX_RATE_coeff_tightening)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_probing), TSX_RATE_probing)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_stuffing), TSX_RATE_stuffing)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_sparsify), TSX_RATE_sparsify)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_substitution), TSX_RATE_substitution)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_dual_fix), TSX_RATE_dual_fix)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_dual_infer), TSX_RATE_dual_infer)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_dom_colums), TSX_RATE_dom_colums)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_fix_continuous), TSX_RATE_fix_continuous)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_simplify_inequality), TSX_RATE_simplify_inequality)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_doubletoneq), TSX_RATE_doubletoneq)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_parallel_columns), TSX_RATE_parallel_columns)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_parallel_rows), TSX_RATE_parallel_rows)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_propagation), TSX_RATE_propagation)
self.extractByExpression(line, self.setup_tsx_rate_expr_for_solver(SOLVER_simple_probing), TSX_RATE_simple_probing)
if (self.trivial_rounds_expr.match(line) is not None):
self.fast_rounds = 0
self.medium_rounds = 0
self.exhaustive_rounds = 0
if (self.fast_rounds_expr.match(line) is not None):
self.fast_rounds = (self.fast_rounds + 1)
if (self.medium_rounds_expr.match(line) is not None):
self.medium_rounds = (self.medium_rounds + 1)
if (self.exhaustive_rounds_expr.match(line) is not None):
if (self.unchanged_rounds_expr.match(line) is None):
self.exhaustive_rounds = (self.exhaustive_rounds + 1)
if (self.final_rounds_expr.match(line) is not None):
self.addData(FAST_ROUNDS_NAME, self.fast_rounds)
self.addData(MEDIUM_ROUNDS_NAME, self.medium_rounds)
self.addData(EXHAUSTIVE_ROUNDS_NAME, self.exhaustive_rounds)
def setup_tsx_rate_expr_for_solver(self, name):
return re.compile((((('\\s+' + name) + '\\s+\\d+\\s+') + self.floating_point_expr) + '\\s+\\d+\\s+(\\S+)'))
def setup_time_expr_for_solver(self, name):
return re.compile((((((('\\s+' + name) + '\\s+\\d+\\s+') + self.floating_point_expr) + '\\s+\\d+\\s+') + self.floating_point_expr) + '\\s+(\\S+)')) |
def normalize_tensor_image(inp):
out = tf.convert_to_tensor(inp)
out = tf.dtypes.cast(out, tf.float32)
out = ((out - 127.5) / 127.5)
return out |
def convert(image_folder, video_file, fps, width, height):
images = sorted(glob.glob(os.path.join(image_folder, '*.jpg')), key=os.path.getmtime)
vw = cv2.VideoWriter(video_file, cv2.VideoWriter_fourcc(*'XVID'), fps, (width, height))
for i in trange(len(images)):
try:
I = cv2.imread(images[i])
I = cv2.resize(I, (width, height))
vw.write(I)
cv2.waitKey(int((1000 / fps)))
except:
continue |
_input(sky_area=units.sr)
def schechter_smf(redshift, m_star, phi_star, alpha, m_min, m_max, sky_area, cosmology, noise=True):
z = schechter_smf_redshift(redshift, m_star, phi_star, alpha, m_min, m_max, sky_area, cosmology, noise)
if ((not callable(m_star)) and (np.ndim(m_star) > 0)):
m_star = np.interp(z, redshift, m_star)
if ((not callable(phi_star)) and (np.ndim(phi_star) > 0)):
phi_star = np.interp(z, redshift, phi_star)
if ((not callable(alpha)) and (np.ndim(alpha) > 0)):
alpha = np.interp(z, redshift, alpha)
m = schechter_smf_mass(z, alpha, m_star, m_min, m_max)
return (z, m) |
def train_interaction_model(x_train, y_train, x_test, y_test):
print('Training interaction model')
model = get_default_model(x_train.shape[1])
compile_model(model)
tf.keras.models.save_model(model, 'models/{}_random.h5'.format(FLAGS.dataset))
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, mode='min')
model.fit(x=x_train, y=y_train, batch_size=128, epochs=FLAGS.epochs, verbose=0, validation_split=0.2, callbacks=[callback])
tf.keras.models.save_model(model, 'models/{}.h5'.format(FLAGS.dataset)) |
.unit
.convert
def test_line_to_cols():
line = ['ID', 'RA', 'dec', 'test1', 'test2']
actual_cols = convert.line_to_cols(line)
expected_cols = line
expected_cols[0] = 'id'
expected_cols[1] = 'ra'
assert (expected_cols == actual_cols) |
class HierarchicalData(Dataset):
def __init__(self, x, act, dialog_used=5):
self.x = x
self.act = act
self.dialog_used = dialog_used
def __getitem__(self, index):
x = (([torch.tensor([101])] * (self.dialog_used - len(self.x[index]))) + [torch.tensor(([101] + item[:64])) for item in self.x[index][(- self.dialog_used):]])
act = self.act[index]
return (x, act)
def __len__(self):
return len(self.x) |
def register_all_mapillary_vistas_panoptic(root):
metadata = get_metadata()
for (prefix, (image_root, panoptic_root, panoptic_json, semantic_root)) in _PREDEFINED_SPLITS_ADE20K_PANOPTIC.items():
register_mapillary_vistas_panoptic(prefix, metadata, os.path.join(root, image_root), os.path.join(root, panoptic_root), os.path.join(root, semantic_root), os.path.join(root, panoptic_json)) |
def create_dataframe(df, json):
sessions = list(json.keys())
session_id = 0
for session in sessions:
sub_section = list(json[session].keys())
for sub in sub_section:
if ((sub != 'noises') and (sub != 'background')):
length = len(json[session][sub])
for i in range(length):
temp_dict = json[session][sub][i]
temp_dict['session_id'] = session_id
df = df.append([temp_dict])
session_id += 1
df = clean_dataframe(df)
return df |
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('plaintext_file', type=str, help='Plaintext file containing the raw input')
parser.add_argument('conllu_file', type=str, help='CoNLL-U file containing tokens and sentence breaks')
parser.add_argument('-o', '--output', default=None, type=str, help='Output file name; output to the console if not specified (the default)')
parser.add_argument('-m', '--mwt_output', default=None, type=str, help='Output file name for MWT expansions; output to the console if not specified (the default)')
args = parser.parse_args(args=args)
with open(args.plaintext_file, 'r') as f:
text = ''.join(f.readlines())
textlen = len(text)
if (args.output is None):
output = sys.stdout
else:
outdir = os.path.split(args.output)[0]
os.makedirs(outdir, exist_ok=True)
output = open(args.output, 'w')
index = 0
mwt_expansions = []
with open(args.conllu_file, 'r') as f:
buf = ''
mwtbegin = 0
mwtend = (- 1)
expanded = []
last_comments = ''
for line in f:
line = line.strip()
if len(line):
if (line[0] == '#'):
if (len(last_comments) == 0):
last_comments = line
continue
line = line.split('\t')
if ('.' in line[0]):
continue
word = line[1]
if ('-' in line[0]):
(mwtbegin, mwtend) = [int(x) for x in line[0].split('-')]
lastmwt = word
expanded = []
elif (mwtbegin <= int(line[0]) < mwtend):
expanded += [word]
continue
elif (int(line[0]) == mwtend):
expanded += [word]
expanded = [x.lower() for x in expanded]
mwt_expansions += [(lastmwt, tuple(expanded))]
if (lastmwt[0].islower() and (not expanded[0][0].islower())):
print('Sentence ID with potential wrong MWT expansion: ', last_comments, file=sys.stderr)
mwtbegin = 0
mwtend = (- 1)
lastmwt = None
continue
if len(buf):
output.write(buf)
(index, word_found) = find_next_word(index, text, word, output)
buf = (('0' * (len(word_found) - 1)) + ('1' if ('-' not in line[0]) else '3'))
else:
if len(buf):
assert (int(buf[(- 1)]) >= 1)
output.write((buf[:(- 1)] + '{}'.format((int(buf[(- 1)]) + 1))))
buf = ''
last_comments = ''
output.close()
mwts = Counter(mwt_expansions)
if (args.mwt_output is None):
print('MWTs:', mwts)
else:
with open(args.mwt_output, 'w') as f:
json.dump(list(mwts.items()), f)
print('{} unique MWTs found in data'.format(len(mwts))) |
def optimal_epsilon_integral():
def fp(eps, a, b, x, phi):
eps_a = np.power((1.0 * eps), (- a))
return ((((eps * np.cos(phi)) - (((a * x) * eps_a) * np.cos((a * phi)))) + 1) - b)
def arclength(eps, a, b, x, epsrel=0.01, limit=100):
return quad((lambda phi: np.sqrt((1 + (fp(eps, a, b, x, phi) ** 2)))), 0, np.pi, epsrel=epsrel, limit=100)[0]
data_a = [0.001, 0.1, 0.5, 0.9, 1, 2, 4, 5, 6, 8]
data_b = [0, 1, 4, 7, 10]
data_x = [1, 1.5, 2, 4, 10, 20, 50, 100, 200, 500, 1000.0, 5000.0, 10000.0]
(data_a, data_b, data_x) = np.meshgrid(data_a, data_b, data_x)
(data_a, data_b, data_x) = (data_a.flatten(), data_b.flatten(), data_x.flatten())
best_eps = []
for i in range(data_x.size):
best_eps.append(minimize_scalar((lambda eps: arclength(eps, data_a[i], data_b[i], data_x[i])), bounds=(0.001, 1000), method='Bounded', options={'xatol': 0.001}).x)
best_eps = np.array(best_eps)
df = {'a': data_a, 'b': data_b, 'x': data_x, 'eps': best_eps}
def func(data, A0, A1, A2, A3, A4, A5):
a = data['a']
b = data['b']
x = data['x']
return (((A0 * b) * np.exp(((- 0.5) * a))) + np.exp((((A1 + ((1 / (1 + a)) * np.log(x))) - (A2 * np.exp(((- A3) * a)))) + (A4 / (1 + np.exp((A5 * a)))))))
func_params = list(curve_fit(func, df, df['eps'], method='trf')[0])
s = 'Fit optimal eps for integrand P via minimal arc length\n'
s += 'with parametric function:\n'
s += 'optimal_eps = (A0 * b * exp(-a/2) + exp(A1 + 1 / (1 + a) * log(x)\n'
s += ' - A2 * exp(-A3 * a) + A4 / (1 + exp(A5 * a)))\n\n'
s += 'Fitted parameters A0 to A5 are:\n'
s += ', '.join([f'{x:.5g}' for x in func_params])
return s |
def SignExt(n, a):
if z3_debug():
_z3_assert(_is_int(n), 'First argument must be an integer')
_z3_assert(is_bv(a), 'Second argument must be a Z3 bit-vector expression')
return BitVecRef(Z3_mk_sign_ext(a.ctx_ref(), n, a.as_ast()), a.ctx) |
def plot_results(result_path, legend=False, post_processing=None, key='AverageReturn', title=''):
if (not isinstance(result_path, (list, tuple))):
name_or_patterns = [result_path]
files = []
for name_or_pattern in name_or_patterns:
if name_or_pattern.startswith('/'):
target_path = name_or_pattern
else:
target_path = osp.abspath(osp.join(osp.dirname(__file__), '../..', name_or_pattern))
matched_files = glob((target_path + '/*'))
files += matched_files
files = sorted(files)
print('plotting the following experiments:')
for f in files:
print(f)
plots = []
legends = []
for f in files:
targetfile = ''
if os.path.isdir(f):
targetfile = osp.join(f, 'progress.csv')
elif ('progress.csv' in f):
targetfile = f
else:
continue
exp_name = osp.basename(f)
returns = []
with open(targetfile, 'rt') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row[key]:
returns.append(float(row[key]))
returns = np.array(returns)
if post_processing:
returns = post_processing(returns)
plots.append(plt.plot(returns)[0])
legends.append(exp_name)
if legend:
plt.legend(plots, legends)
plt.title(title) |
def create_exp_name(exp_prefix, exp_id=0, seed=0):
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
return ('%s_%s-s-%d--%s' % (exp_prefix, timestamp, seed, str(exp_id))) |
def build(setup_kwargs):
setup_kwargs.update(ext_modules=cythonize(['auto_martini/optimization.pyx']), include_dirs=numpy.get_include()) |
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.finetuning = False
def enable_finetuning(self, _=None):
self.finetuning = True
def forward(self, _):
pass |
def sample_function(session, session_id, itemnum, maxlen, neg_sample_num, neighbor_dict):
neg_sample_num = 20
seq = np.zeros([maxlen], dtype=np.int32)
pos = np.zeros([maxlen], dtype=np.int32)
neg = np.zeros([maxlen], dtype=np.int32)
ts = set(session)
for i in range(neg_sample_num):
neg[i] = random_neq(1, (itemnum + 1), ts)
groudTruth = session[(- 1)]
idx = (maxlen - 1)
pos[idx] = groudTruth
for i in reversed(session[:(- 1)]):
seq[idx] = i
if (idx > 0):
pos[(idx - 1)] = i
idx -= 1
if (idx == (- 1)):
break
return (seq, pos, neg) |
def get_vocabs(vocab_path, vocab_type):
mappings = read_pickle(vocab_path)
mapping_key = '{}_vocab'.format(vocab_type)
vocab = mappings[mapping_key]
print('{0} vocab size: {1}'.format(vocab_type, len(vocab)))
return vocab |
def tensor2array(image: torch.Tensor) -> np.ndarray:
image = image.detach().cpu().numpy()
image = normalize(image, origin_value_range=(0, 1), out_value_range=(0, 255), dtype=np.uint8)
return image |
def create(model_type_func, train=False, gpu_id=0):
model = DetectionModelHelper(name=model_type_func, train=train, num_classes=cfg.MODEL.NUM_CLASSES, init_params=train)
model.only_build_forward_pass = False
model.target_gpu_id = gpu_id
return get_func(model_type_func)(model) |
def check_empty(list_):
lists = [i for i in list_ if (len(i) > 0)]
if (len(lists) == 0):
return True
return False |
def make_optimizer(cfg, model):
params = []
for (key, value) in model.named_parameters():
if (not value.requires_grad):
continue
lr = cfg.lr
weight_decay = cfg.weight_decay
if ('bias' in key):
lr = (cfg.lr * cfg.bias_lr_factor)
weight_decay = cfg.weight_decay_bias
params += [{'params': [value], 'lr': lr, 'weight_decay': weight_decay}]
if (cfg.optimizer_name == 'SGD'):
optimizer = getattr(torch.optim, cfg.optimizer_name)(params, momentum=cfg.momentum)
else:
optimizer = getattr(torch.optim, cfg.optimizer_name)(params)
return optimizer |
class SelfAttention(nn.Module):
def __init__(self, dim: int, nhead: int, dropout: float=0.0, batch_first: bool=True, add_pe_to_qkv: List[bool]=[True, True, False]):
super().__init__()
self.self_attn = nn.MultiheadAttention(dim, nhead, dropout=dropout, batch_first=batch_first)
self.norm = nn.LayerNorm(dim)
self.dropout = nn.Dropout(dropout)
self.add_pe_to_qkv = add_pe_to_qkv
def forward(self, x: torch.Tensor, pe: torch.Tensor, attn_mask: bool=None, key_padding_mask: bool=None) -> torch.Tensor:
x = self.norm(x)
if any(self.add_pe_to_qkv):
x_with_pe = (x + pe)
q = (x_with_pe if self.add_pe_to_qkv[0] else x)
k = (x_with_pe if self.add_pe_to_qkv[1] else x)
v = (x_with_pe if self.add_pe_to_qkv[2] else x)
else:
q = k = v = x
r = x
x = self.self_attn(q, k, v, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0]
return (r + self.dropout(x)) |
.parametrize('sparse_container', ((CSC_CONTAINERS + DOK_CONTAINERS) + LIL_CONTAINERS))
def test_silhouette_reduce(sparse_container):
X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T
pdist_dense = pairwise_distances(X)
pdist_sparse = sparse_container(pdist_dense)
y = [0, 0, 0, 0, 1, 1, 1, 1]
label_freqs = np.bincount(y)
with pytest.raises(TypeError, match='Expected CSR matrix. Please pass sparse matrix in CSR format.'):
_silhouette_reduce(pdist_sparse, start=0, labels=y, label_freqs=label_freqs) |
def validate_vatin(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(vatin.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(vatin.is_valid)
else:
return df.applymap(vatin.is_valid)
return vatin.is_valid(df) |
class LeNetPHNTargetWrapper(PHNTarget):
def forward(self, x, weights=None):
logits = super().forward(x, weights)
return dict(logits_l=logits[0], logits_r=logits[1]) |
class CaseSource():
case: Case
response: GenericResponse
elapsed: float
def partial_deepcopy(self) -> CaseSource:
return self.__class__(case=self.case.partial_deepcopy(), response=self.response, elapsed=self.elapsed) |
def Subsets(s, k=None, submultiset=False):
if (k is not None):
k = Integer(k)
if isinstance(s, (int, Integer)):
if (s < 0):
raise ValueError('s must be non-negative')
from sage.sets.integer_range import IntegerRange
s = IntegerRange(1, (s + 1))
if (k is None):
if submultiset:
return SubMultiset_s(s)
else:
return Subsets_s(s)
elif submultiset:
return SubMultiset_sk(s, k)
else:
return Subsets_sk(s, k) |
def start_preproc(python_args_dict=None):
args = get_basic_args(python_args_dict)
args.world_size = args.nprocs
cache = None
for rank in range(args.world_size):
print(f'-I- preprocessing data for rank {rank}/{(args.world_size - 1)} (word size is {args.world_size})...')
local_rank = rank
args.rank = rank
args.local_rank = local_rank
args.is_multiprocessing_worker = False
cache = preproc_data(args, cache, save_cache=True) |
('SoftmaxWithLoss')
def TranslateSoftmaxWithLoss(layer, pretrained_blobs, is_test, **kwargs):
softmax_op = core.CreateOperator('Softmax', [layer.bottom[0]], (layer.bottom[0] + '_translator_autogen_softmax'))
xent_op = core.CreateOperator('LabelCrossEntropy', [softmax_op.output[0], layer.bottom[1]], (layer.bottom[0] + '_translator_autogen_xent'))
loss_op = core.CreateOperator('AveragedLoss', xent_op.output[0], layer.top[0])
return ([softmax_op, xent_op, loss_op], []) |
def guess_pl(code):
if code:
return Guess().language_name(code.strip())
else:
return 'unknown' |
class Task():
def __init__(self, domain_name, task_name, requirements, types, objects, predicates, functions, init, goal, actions, axioms, use_metric):
self.domain_name = domain_name
self.task_name = task_name
self.requirements = requirements
self.types = types
self.objects = objects
self.predicates = predicates
self.functions = functions
self.init = init
self.goal = goal
self.actions = actions
self.axioms = axioms
self.axiom_counter = 0
self.use_min_cost_metric = use_metric
def add_axiom(self, parameters, condition):
name = ('new-%d' % self.axiom_counter)
self.axiom_counter += 1
axiom = axioms.Axiom(name, parameters, len(parameters), condition)
self.predicates.append(predicates.Predicate(name, parameters))
self.axioms.append(axiom)
return axiom
def dump(self):
print(('Problem %s: %s [%s]' % (self.domain_name, self.task_name, self.requirements)))
print('Types:')
for type in self.types:
print((' %s' % type))
print('Objects:')
for obj in self.objects:
print((' %s' % obj))
print('Predicates:')
for pred in self.predicates:
print((' %s' % pred))
print('Functions:')
for func in self.functions:
print((' %s' % func))
print('Init:')
for fact in self.init:
print((' %s' % fact))
print('Goal:')
self.goal.dump()
print('Actions:')
for action in self.actions:
action.dump()
if self.axioms:
print('Axioms:')
for axiom in self.axioms:
axiom.dump() |
class KRRCSimplyLacedElement(KRRiggedConfigurationElement):
_method
def cocharge(self):
cc = 0
rigging_sum = 0
for (a, p) in enumerate(self):
for (pos, i) in enumerate(p._list):
rigging_sum += p.rigging[pos]
for dim in self.parent().dims:
if (dim[0] == (a + 1)):
cc += min(dim[1], i)
cc -= p.vacancy_numbers[pos]
return ((cc // 2) + rigging_sum)
cc = cocharge
_method
def charge(self):
B = self.parent()
if (not hasattr(B, '_max_charge')):
B._max_charge = max((b.cocharge() for b in B.module_generators))
return (B._max_charge - self.cocharge()) |
class HeisenbergAlgebra(HeisenbergAlgebra_fd, HeisenbergAlgebra_abstract, LieAlgebraWithGenerators):
def __init__(self, R, n):
HeisenbergAlgebra_fd.__init__(self, n)
names = tuple((([('p%s' % i) for i in range(1, (n + 1))] + [('q%s' % i) for i in range(1, (n + 1))]) + ['z']))
LieAlgebraWithGenerators.__init__(self, R, names=names, index_set=names, category=LieAlgebras(R).Nilpotent().FiniteDimensional().WithBasis())
HeisenbergAlgebra_abstract.__init__(self, names)
def _repr_(self):
return 'Heisenberg algebra of rank {0} over {1}'.format(self._n, self.base_ring()) |
def parse_args():
parser = argparse.ArgumentParser(description='process parameters')
parser.add_argument('--input_data_dir', default='../data/synthetic/drug', help='input data directory')
parser.add_argument('--output_data_dir', default='pickles/cad_prescription_taken_by_patient.pkl', help='output data directory')
args = parser.parse_args()
return args |
def test_load_gds_diff_units():
with open(os.path.join(TESTDATA, 'rect_um.gds'), 'rb') as fp:
gds_file = gds.GDSImport(fp)
polygons = gds_file.get_polygons((100, 0))
assert (len(polygons) == 1)
np.testing.assert_almost_equal(polygons[0], [[(- 1000), 700], [(- 5000), 700], [(- 5000), 200], [(- 1000), 200]]) |
class TFRegNetXLayer(tf.keras.layers.Layer):
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int=1, **kwargs):
super().__init__(**kwargs)
should_apply_shortcut = ((in_channels != out_channels) or (stride != 1))
groups = max(1, (out_channels // config.groups_width))
self.shortcut = (TFRegNetShortCut(out_channels, stride=stride, name='shortcut') if should_apply_shortcut else tf.keras.layers.Activation('linear', name='shortcut'))
self.layers = [TFRegNetConvLayer(out_channels, kernel_size=1, activation=config.hidden_act, name='layer.0'), TFRegNetConvLayer(out_channels, stride=stride, groups=groups, activation=config.hidden_act, name='layer.1'), TFRegNetConvLayer(out_channels, kernel_size=1, activation=None, name='layer.2')]
self.activation = ACT2FN[config.hidden_act]
def call(self, hidden_state):
residual = hidden_state
for layer_module in self.layers:
hidden_state = layer_module(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state |
def eval_supernet(valid_queue, model, criterion, theta):
model._arch_parameters.data.copy_(theta)
genotype = model.genotype()
(valid_acc, valid_obj) = infer(valid_queue, model, criterion, log=False, eval=False, theta=theta)
logging.info('valid_acc %f', valid_acc)
logging.info('valid_loss %f', valid_obj)
return (genotype, valid_acc) |
class HookBase():
trainer: 'TrainerBase' = None
def before_train(self):
pass
def after_train(self):
pass
def before_step(self):
pass
def after_step(self):
pass
def state_dict(self):
return {} |
class TranslationModule(SummarizationModule):
mode = 'translation'
loss_names = ['loss']
metric_names = ['bleu']
default_val_metric = 'bleu'
def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs)
self.dataset_kwargs['src_lang'] = hparams.src_lang
self.dataset_kwargs['tgt_lang'] = hparams.tgt_lang
def calc_generative_metrics(self, preds, target) -> dict:
return calculate_bleu(preds, target) |
def _get_axis_wb(axis_wo_b, batch_dim_axis):
if (batch_dim_axis is None):
return axis_wo_b
if (axis_wo_b >= batch_dim_axis):
return (axis_wo_b + 1)
return axis_wo_b |
def _array_repr_dispatcher(arr, max_line_width=None, precision=None, suppress_small=None):
return (arr,) |
def create_binaural_wsj0mix3_csv(datapath, savepath, fs, version, savename='binaural_wsj0-3mix_', set_types=['tr', 'cv', 'tt']):
if (fs == 8000):
sample_rate = '8k'
elif (fs == 16000):
sample_rate = '16k'
else:
raise ValueError('Unsupported sampling rate')
for set_type in set_types:
mix_path = os.path.join(datapath, 'wav{}'.format(sample_rate), version, set_type, 'mix/')
s1_path = os.path.join(datapath, 'wav{}'.format(sample_rate), version, set_type, 's1/')
s2_path = os.path.join(datapath, 'wav{}'.format(sample_rate), version, set_type, 's2/')
s3_path = os.path.join(datapath, 'wav{}'.format(sample_rate), version, set_type, 's3/')
files = os.listdir(mix_path)
mix_fl_paths = [(mix_path + fl) for fl in files]
s1_fl_paths = [(s1_path + fl) for fl in files]
s2_fl_paths = [(s2_path + fl) for fl in files]
s3_fl_paths = [(s3_path + fl) for fl in files]
csv_columns = ['ID', 'duration', 'mix_wav', 'mix_wav_format', 'mix_wav_opts', 's1_wav', 's1_wav_format', 's1_wav_opts', 's2_wav', 's2_wav_format', 's2_wav_opts', 's3_wav', 's3_wav_format', 's3_wav_opts']
with open(os.path.join(savepath, ((savename + set_type) + '.csv')), 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for (i, (mix_path, s1_path, s2_path, s3_path)) in enumerate(zip(mix_fl_paths, s1_fl_paths, s2_fl_paths, s3_fl_paths)):
row = {'ID': i, 'duration': 1.0, 'mix_wav': mix_path, 'mix_wav_format': 'wav', 'mix_wav_opts': None, 's1_wav': s1_path, 's1_wav_format': 'wav', 's1_wav_opts': None, 's2_wav': s2_path, 's2_wav_format': 'wav', 's2_wav_opts': None, 's3_wav': s3_path, 's3_wav_format': 'wav', 's3_wav_opts': None}
writer.writerow(row) |
def module_profiling(self, input, output, verbose):
ins = input[0].size()
outs = output.size()
t = type(self)
if isinstance(self, nn.Conv2d):
self.n_macs = (((((((ins[1] * outs[1]) * self.kernel_size[0]) * self.kernel_size[1]) * outs[2]) * outs[3]) // self.groups) * outs[0])
self.n_params = get_params(self)
self.n_seconds = run_forward(self, input)
self.name = conv_module_name_filter(self.__repr__())
elif isinstance(self, nn.ConvTranspose2d):
self.n_macs = (((((((ins[1] * outs[1]) * self.kernel_size[0]) * self.kernel_size[1]) * outs[2]) * outs[3]) // self.groups) * outs[0])
self.n_params = get_params(self)
self.n_seconds = run_forward(self, input)
self.name = conv_module_name_filter(self.__repr__())
elif isinstance(self, nn.Linear):
self.n_macs = ((ins[1] * outs[1]) * outs[0])
self.n_params = get_params(self)
self.n_seconds = run_forward(self, input)
self.name = self.__repr__()
elif isinstance(self, nn.AvgPool2d):
self.n_macs = (((ins[1] * ins[2]) * ins[3]) * ins[0])
self.n_params = 0
self.n_seconds = run_forward(self, input)
self.name = self.__repr__()
elif isinstance(self, nn.AdaptiveAvgPool2d):
self.n_macs = (((ins[1] * ins[2]) * ins[3]) * ins[0])
self.n_params = 0
self.n_seconds = run_forward(self, input)
self.name = self.__repr__()
else:
self.n_macs = 0
self.n_params = 0
self.n_seconds = 0
num_children = 0
for m in self.children():
self.n_macs += getattr(m, 'n_macs', 0)
self.n_params += getattr(m, 'n_params', 0)
self.n_seconds += getattr(m, 'n_seconds', 0)
num_children += 1
ignore_zeros_t = [nn.BatchNorm2d, nn.Dropout2d, nn.Dropout, nn.Sequential, nn.ReLU6, nn.ReLU, nn.MaxPool2d, nn.modules.padding.ZeroPad2d, nn.modules.activation.Sigmoid]
if ((not getattr(self, 'ignore_model_profiling', False)) and (self.n_macs == 0) and (t not in ignore_zeros_t)):
print('WARNING: leaf module {} has zero n_macs.'.format(type(self)))
return
if verbose:
print((((self.name.ljust(name_space, ' ') + '{:,}'.format(self.n_params).rjust(params_space, ' ')) + '{:,}'.format(self.n_macs).rjust(macs_space, ' ')) + '{:,}'.format(self.n_seconds).rjust(seconds_space, ' ')))
return |
def init_process_group(rank: (int | str), world_size: (int | str), backend: Optional[str]=None) -> None:
import torch
import torch.distributed as dist
if torch.cuda.is_available():
backend = ('nccl' if (backend is None) else str(backend))
else:
backend = ('gloo' if (backend is None) else str(backend))
if (not dist.is_initialized()):
dist.init_process_group(backend=backend, rank=int(rank), world_size=int(world_size), init_method='env://') |
.skip('Temporarily skipped because of out-of-memory error')
.mujoco
.no_cover
def test_pearl_metaworld_ml45():
assert (subprocess.run([str((EXAMPLES_ROOT_DIR / 'torch/pearl_metaworld_ml45.py')), '--num_epochs', '1', '--num_train_tasks', '1', '--num_test_tasks', '1', '--encoder_hidden_size', '1', '--net_size', '2', '--num_steps_per_epoch', '2', '--num_initial_steps', '2', '--num_steps_prior', '1', '--num_extra_rl_steps_posterior', '1', '--batch_size', '2', '--embedding_batch_size', '1', '--embedding_mini_batch_size', '1', '--max_path_length', '1'], check=False).returncode == 0) |
class ConvNet64(nn.Module):
def __init__(self, in_chan=3, out_chan=64, nh=32, out_activation='linear', activation='relu', num_groups=None, use_bn=False):
super().__init__()
self.conv1 = nn.Conv2d(in_chan, (nh * 4), kernel_size=5, bias=True, stride=2)
self.conv2 = nn.Conv2d((nh * 4), (nh * 8), kernel_size=5, bias=True, stride=2)
self.conv3 = nn.Conv2d((nh * 8), (nh * 16), kernel_size=5, bias=True, stride=2)
self.conv4 = nn.Conv2d((nh * 16), (nh * 32), kernel_size=5, bias=True, stride=2)
self.fc1 = nn.Conv2d((nh * 32), out_chan, kernel_size=1, bias=True)
(self.in_chan, self.out_chan) = (in_chan, out_chan)
self.num_groups = num_groups
self.use_bn = use_bn
layers = []
layers.append(self.conv1)
if (num_groups is not None):
layers.append(self.get_norm_layer(num_channels=(nh * 4)))
layers.append(get_activation(activation))
layers.append(self.conv2)
if (num_groups is not None):
layers.append(self.get_norm_layer(num_channels=(nh * 8)))
layers.append(get_activation(activation))
layers.append(self.conv3)
if (num_groups is not None):
layers.append(self.get_norm_layer(num_channels=(nh * 16)))
layers.append(get_activation(activation))
layers.append(self.conv4)
if (num_groups is not None):
layers.append(self.get_norm_layer(num_channels=(nh * 32)))
layers.append(get_activation(activation))
layers.append(self.fc1)
out_activation = get_activation(out_activation)
if (out_activation is not None):
layers.append(out_activation)
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
def get_norm_layer(self, num_channels):
if (self.num_groups is not None):
return nn.GroupNorm(num_groups=self.num_groups, num_channels=num_channels)
elif self.use_bn:
return nn.BatchNorm2d(num_channels) |
def produce_all_results():
(datasets, Tranges) = read_all_as_events()
results = dict()
for data_name in datasets.keys():
results_data = dict()
for algo_name in datasets[data_name].keys():
if (algo_name != 'groundtruth'):
results_data[algo_name] = pr_from_events(datasets[data_name][algo_name], datasets[data_name]['groundtruth'], Tranges[data_name])
results[data_name] = results_data
return results |
_numpy_output(check_dtype=True)
def test_ufunc_minimum_nan_ff(A: dace.float32[10], B: dace.float32[10]):
C = np.true_divide(A, 0)
return np.minimum(C, B) |
.ort
def test_bn_in_import():
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
self.bn = nn.BatchNorm2d(3, track_running_stats=False)
def forward(self, x):
return self.bn(x)
pt_module = Module()
dace_module = Module()
dace_module.load_state_dict(pt_module.state_dict())
dace_module = DaceModule(dace_module)
X = torch.randn(8, 3, 32, 32)
pt_result = pt_module(X)
dace_result = dace_module(X)
torch_tensors_close('output', pt_result, dace_result) |
def test_neldermead_adaptive():
def func(x):
return np.sum((x ** 2))
p0 = [0., 0., 0., 0.4223638, 0., 0., 0.9692297, 0.4471682, 0., 0., 0., 0., 0., 0., 0.]
res = optimize.minimize(func, p0, method='Nelder-Mead')
assert_equal(res.success, False)
res = optimize.minimize(func, p0, method='Nelder-Mead', options={'adaptive': True})
assert_equal(res.success, True) |
def test_load_audio():
dataset = tau2020sse_nigens.Dataset(TEST_DATA_HOME)
clip = dataset.clip('foa_dev/fold1_room1_mix001_ov1')
audio_path = clip.audio_path
(audio, sr) = tau2020sse_nigens.load_audio(audio_path)
assert (sr == 24000)
assert (type(audio) is np.ndarray)
assert (len(audio.shape) == 2)
assert (audio.shape[0] == 4)
assert (audio.shape[1] == 24000) |
def get_args(parser):
parser.add('--iteration', type=int, default=0, help='Optional iteration number to start from')
parser.add('--log_frequency_loss', type=int, default=1)
parser.add('--log_frequency_images', type=int, default=100)
parser.add('--log_frequency_fixed_images', type=int, default=2500)
parser.add('--detailed_metrics', action='store_bool', default=True)
parser.add('--num_visuals_per_img', default=2, type=int)
parser.add('--fixed_val_ids', action='append', type=int, default=[50, 100, 200, 250, 300])
parser.add('--batch_size_inference', default=5, type=int, help="Batch size for processing 'fixed_val_ids' during visualization. Different from 'batch_size', this number is for one GPU (visualization inference is currently done on 1 GPU anyway).")
return parser |
def seed_test_case2():
var0 = {1, 2, 3}
var1 = module0.i_take_set(var0)
assert (var1 == 'not empty!') |
def wavlm_base_plus(refresh=False, *args, **kwargs):
kwargs['ckpt'] = '
return wavlm_url(*args, refresh=refresh, **kwargs) |
_sz(6)
def lanczos3(x):
(fw, to_dtype, eps) = set_framework_dependencies(x)
return ((((fw.sin((pi * x)) * fw.sin(((pi * x) / 3))) + eps) / ((((pi ** 2) * (x ** 2)) / 3) + eps)) * to_dtype((abs(x) < 3))) |
class BlobProtoVector(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BLOBPROTOVECTOR |
def loading_testset(datasetname, test_interval, mode='test'):
datasetname = datasetname.upper()
cfg_data = getattr(setting, datasetname).cfg_data
Dataset = dataset.TestDataset
test_loader = createValTestData(datasetname, Dataset, cfg_data, test_interval, mode=mode)
restore_transform = createRestore(cfg_data.MEAN_STD)
return (test_loader, restore_transform) |
def save_state(model, filename):
torch.save(model.state_dict(), filename)
print('Model saved to:', filename) |
def _load_arg_defaults(kwargs, app=None):
if (app is None):
app = current_app
if app:
bp = (app.blueprints.get(request.blueprint) if request else None)
kwargs.setdefault('cls', (bp.json_decoder if (bp and bp.json_decoder) else app.json_decoder))
else:
kwargs.setdefault('cls', JSONDecoder) |
.parametrize('dt,val', [(ti.u32, ), (ti.u64, )])
_utils.test(require=ti.extension.data64)
def test_uint_max(dt, val):
impl.get_runtime().default_ip = dt
N = 16
f = ti.field(dt, shape=N)
def run():
for i in f:
f[i] = val
run()
fs = f.to_numpy()
for f in fs:
assert (f == val) |
def _upgrade_columns_and_keys(old_metadata):
new_metadata = {}
columns = {}
fields = old_metadata.get('fields')
alternate_keys = []
primary_key = old_metadata.get('primary_key')
for (field, field_meta) in fields.items():
column_meta = {}
old_type = field_meta['type']
subtype = field_meta.get('subtype')
column_meta['sdtype'] = old_type
if (old_type == 'numerical'):
if (subtype == 'float'):
column_meta['computer_representation'] = 'Float'
elif (subtype == 'integer'):
column_meta['computer_representation'] = 'Int64'
elif (old_type == 'datetime'):
datetime_format = field_meta.get('format')
if datetime_format:
column_meta['datetime_format'] = datetime_format
elif (old_type == 'id'):
column_meta['sdtype'] = 'id'
if (subtype == 'integer'):
regex_format = '\\d{30}'
else:
regex_format = field_meta.get('regex', '[A-Za-z]{5}')
if (not field_meta.get('pii')):
column_meta['regex_format'] = regex_format
if ((field != primary_key) and (field_meta.get('ref') is None)):
alternate_keys.append(field)
if field_meta.get('pii'):
column_meta['pii'] = True
pii_category = field_meta.get('pii_category')
if isinstance(pii_category, list):
column_meta['sdtype'] = pii_category[0]
elif pii_category:
column_meta['sdtype'] = pii_category
columns[field] = column_meta
new_metadata['columns'] = columns
new_metadata['primary_key'] = primary_key
if alternate_keys:
new_metadata['alternate_keys'] = alternate_keys
return new_metadata |
_registry.register('fake_job_postings')
class FakeJobPostings(BaseMultiModalDataset):
_SOURCE = '
_INFO = {'train': {'url': (get_repo_url() + 'fake_job_postings/train.csv'), 'sha1sum': '78c37e46e844c9e268aa8eb6da6168b04a9e6556'}, 'test': {'url': (get_repo_url() + 'fake_job_postings/test.csv'), 'sha1sum': '30fb93df0a7b22ea006a812f2ae46674'}}
def __init__(self, split='train'):
super().__init__()
self._split = split
self._path = os.path.join(get_data_home_dir(), 'fake_job_postings', f'{split}.pq')
download(self._INFO[split]['url'], path=self._path, sha1_hash=self._INFO[split]['sha1sum'])
self._data = pd.read_csv(self._path)
def data(self):
return self._data
def splits(cls):
return cls._INFO.keys()
def data(self):
return self._data
def label_columns(self):
return ['fraudulent']
def label_types(self):
return [_CATEGORICAL]
def feature_columns(self):
return [col for col in list(self.data.columns) if (col not in self.label_columns)]
def metric(self):
return 'roc_auc'
def problem_type(self):
return _BINARY |
_converter_regitstry('DMA_matrix')
def DMA_matrix_converter(context: 'BM1688Context', reg: DMA_matrix_reg):
(res0, attr, opd0) = dma_reg_fmt_base(reg)
lane_mask = opd0['layout'].args[0]
(l, r) = memmap[MType.R]
s_addr = opd0['address']
is_trans = (reg.cmd_special_function == 1)
if ((s_addr >= l) and (s_addr < r) and (reg.fill_constant_en == 0)):
(_, _, H, W) = res0['shape']
res0['shape'] = (1, 1, H, W)
(_, _, h, _) = res0['stride']
res0['stride'] = (0, 0, h, 1)
if is_trans:
(H, W) = (W, H)
opd0['shape'] = (H, W)
opd0['layout'] = Layout.DMAmatrix(lane_mask, reg.src_wsize)
(n, c, _, _) = opd0['stride']
opd0['stride'] = (n, c, 0, 1)
else:
(_, _, H, W) = opd0['shape']
opd0['shape'] = (1, 1, H, W)
(_, _, h, _) = opd0['stride']
opd0['stride'] = (0, 0, h, 1)
if is_trans:
(H, W) = (W, H)
res0['shape'] = (H, W)
(n, c, _, _) = res0['stride']
res0['stride'] = (n, c, 0, 1)
res0['layout'] = Layout.DMAmatrix(lane_mask, reg.dst_wsize)
operands = [get_value(context, **opd0)]
results = [get_value(context, **res0)]
return (results, attr, operands) |
class BaseSubsetBatchMiner(BaseMiner):
def __init__(self, output_batch_size, **kwargs):
super().__init__(**kwargs)
self.output_batch_size = output_batch_size
def output_assertion(self, output):
assert (len(output) == self.output_batch_size) |
def test_valid_Armenteros_Podolanski_variables():
d1 = Vector3D(1.0, 2.0, 3.0)
d2 = Vector3D(1.0, (- 2.0), 3.0)
assert (Armenteros_Podolanski_variables(d1, d2) == (2.0, 0.0)) |
class BackwardDifferenceEncoder(BaseContrastEncoder):
def get_contrast_matrix(self, values_to_encode: np.array) -> ContrastMatrix:
return Diff().code_without_intercept(values_to_encode) |
def softsel(attn_to_input, align_scores, attn_to_mask, mask_add_head_dim_for_scores=False, input_add_multi_head_dim=False, score_add_hn_dim=False, axis=(- 2), name=None):
with tf.name_scope((name or 'softsel')):
if input_add_multi_head_dim:
attn_to_input = tf.expand_dims(attn_to_input, 1)
attn_to_input = tf.expand_dims(attn_to_input, (- 3))
attn_to_mask = tf.expand_dims(attn_to_mask, (- 2))
if score_add_hn_dim:
align_scores = tf.expand_dims(align_scores, (- 1))
masked_align_scores = exp_mask_v3(align_scores, attn_to_mask, multi_head=mask_add_head_dim_for_scores, high_dim=True)
attn_probs = tf.nn.softmax(masked_align_scores, axis)
attn_res = tf.reduce_sum((attn_probs * attn_to_input), axis=(- 2))
return attn_res |
def get_dense_input(features, feature_columns):
from . import feature_column as fc_lib
dense_feature_columns = (list(filter((lambda x: isinstance(x, fc_lib.DenseFeat)), feature_columns)) if feature_columns else [])
dense_input_list = []
for fc in dense_feature_columns:
if (fc.transform_fn is None):
dense_input_list.append(features[fc.name])
else:
transform_result = Lambda(fc.transform_fn)(features[fc.name])
dense_input_list.append(transform_result)
return dense_input_list |
def execute(chunk: np.ndarray, size: tuple=(3, 1, 1), mode: str='reflect'):
print('median filtering of chunk...')
chunk = median_filter(chunk, size=size, mode=mode)
return [chunk] |
_model_architecture('cmlm_transformer', 'cmlm_transformer_wmt_en_de')
def iter_nat_wmt_en_de(args):
base_architecture(args) |
class TemporalMaxPooling(Module):
def __init__(self, kW, dW=None):
super(TemporalMaxPooling, self).__init__()
self.kW = kW
self.dW = (dW or kW)
self.indices = None
def updateOutput(self, input):
if (self.indices is None):
self.indices = input.new()
self._backend.TemporalMaxPooling_updateOutput(self._backend.library_state, input, self.output, self.indices, self.kW, self.dW)
return self.output
def updateGradInput(self, input, gradOutput):
if (self.gradInput is None):
return
self._backend.TemporalMaxPooling_updateGradInput(self._backend.library_state, input, gradOutput, self.gradInput, self.indices, self.kW, self.dW)
return self.gradInput
def clearState(self):
clear(self, 'indices')
return super(TemporalMaxPooling, self).clearState() |
_if_pypy
def test_vectorizer_stop_words_inconsistent():
lstr = "\\['and', 'll', 've'\\]"
message = ('Your stop_words may be inconsistent with your preprocessing. Tokenizing the stop words generated tokens %s not in stop_words.' % lstr)
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
vec.set_params(stop_words=["you've", 'you', "you'll", 'AND'])
with pytest.warns(UserWarning, match=message):
vec.fit_transform(['hello world'])
del vec._stop_words_id
assert (_check_stop_words_consistency(vec) is False)
with warnings.catch_warnings():
warnings.simplefilter('error', UserWarning)
vec.fit_transform(['hello world'])
assert (_check_stop_words_consistency(vec) is None)
vec.set_params(stop_words=["you've", 'you', "you'll", 'blah', 'AND'])
with pytest.warns(UserWarning, match=message):
vec.fit_transform(['hello world']) |
class gen_dataset(Dataset):
def __init__(self, ann_file, transform, image_root, split='train', max_words=30, prompt=''):
self.ann = json.load(open(ann_file, 'r'))
self.transform = transform
self.image_root = image_root
self.max_words = max_words
self.split = split
self.prompt = prompt
def __len__(self):
return len(self.ann)
def __getitem__(self, index):
ann = self.ann[index]
image_path = os.path.join(self.image_root, ann['image'])
image = Image.open(image_path).convert('RGB')
image = self.transform(image)
if (self.split == 'train'):
caption = (self.prompt + pre_caption(ann['caption'], self.max_words))
return (image, caption)
else:
if ('nocaps' in image_path):
fname = ann['id']
else:
fname = ann['image']
caption = (self.prompt + pre_caption(ann['caption'][0], self.max_words))
return (image, caption, fname) |
def sentence_tokenize(text_document: str) -> List[str]:
segments = segmenter.split(iter(Tokenizer().split(text_document)))
sentences = [''.join([(token.spacing + token.value) for token in sentence]).strip() for sentence in segments]
return sentences |
def create_uncertainty(args, questions):
result = []
count = 0
for qes in questions:
if (count == args.qes_limit):
break
uncertainty_record = generate_uncertainty_qes(args, qes)
result.append(uncertainty_record)
count += 1
if (args.sort_by == 'disagreement'):
if (args.dataset == 'strategyqa'):
try:
result.sort(key=(lambda x: abs((x['occurrence']['yes'] - x['occurrence']['no']))))
except:
result.sort(key=(lambda x: (- len(x['occurrence']))))
else:
result.sort(key=(lambda x: (- len(x['occurrence']))))
elif ((args.sort_by == 'variance') and (args.dataset in ('gsm8k', 'asdiv', 'svamp', 'singleeq', 'addsub', 'multiarith'))):
result.sort(key=(lambda x: (- x['variance'])))
elif (args.sort_by == 'entropy'):
result.sort(key=(lambda x: (- x['entropy'])))
return result |
def load_dataset(name: str) -> pd.DataFrame:
path = _get_dataset_path(name)
df = pd.read_csv(path)
return df |
def unobserved_intrinsic_latencies_anomalous(num_samples):
return {'Product Service': halfnorm.rvs(size=num_samples, loc=0.1, scale=0.2), 'Shipping Cost Service': halfnorm.rvs(size=num_samples, loc=0.1, scale=0.2), 'Caching Service': (2 + halfnorm.rvs(size=num_samples, loc=0.1, scale=0.1)), 'Order DB': truncexpon.rvs(size=num_samples, b=5, scale=0.2), 'Customer DB': truncexpon.rvs(size=num_samples, b=6, scale=0.2), 'Product DB': truncexpon.rvs(size=num_samples, b=10, scale=0.2)} |
def ref_det(x):
y = np.zeros(x.shape[0], dtype=np.float32)
for i in range(x.shape[0]):
y[i] = np.linalg.det(x[i])
return y |
def get_inference_args():
parser = argparse.ArgumentParser(description='OpenUnmix_CrossNet(X-UMX)/OpenUnmix(UMX) Inference/Evaluation')
parser.add_argument('--inputs', type=str, nargs='+', help='List of paths to any audio files supported by FFMPEG.')
parser.add_argument('--targets', nargs='+', default=['bass', 'drums', 'vocals', 'other'], type=str, help='provide targets to be processed. If none, all available targets will be computed')
parser.add_argument('--out-dir', type=str, help='Path to save separated sources')
parser.add_argument('--start', type=float, default=0.0, help='Audio chunk start in seconds')
parser.add_argument('--duration', type=float, default=(- 1.0), help='Audio chunk duration in seconds, negative values load full track')
parser.add_argument('--model', type=str, required=True, help='path to pretrained weights (weight filename in case of X-UMX Inference or directory of weight files for UMX Inference')
parser.add_argument('--context', default='cudnn', type=str, help='Execution on CUDA')
parser.add_argument('--softmask', dest='softmask', action='store_true', help='if enabled, will initialize separation with softmask.otherwise, will use mixture phase with spectrogram')
parser.add_argument('--niter', type=int, default=1, help='number of iterations for refining results.')
parser.add_argument('--alpha', type=float, default=1.0, help='exponent in case of softmask separation')
parser.add_argument('--sample-rate', type=int, default=44100, help='model sample rate')
parser.add_argument('--residual-model', action='store_true', help='create a model for the residual')
parser.add_argument('--chunk-dur', type=int, default=None, help='window length in seconds - reduce this if Inference fails with SegFault')
parser.add_argument('--umx-infer', action='store_true', default=False, help='If True, OpenUnmix(UMX) network is used for Inference/Evaluation')
parser.add_argument('--root', type=str, help='root path of MUSDB18 dataset')
parser.add_argument('--cores', type=int, default=1)
parser.add_argument('--is-wav', action='store_true', default=False, help='flags wav version of the dataset')
(args, _) = parser.parse_known_args()
return args |
class VGG(nn.Module):
def __init__(self, features, output_dim, k_lipschitz=None, p_drop=None):
super(VGG, self).__init__()
self.features = features
if (k_lipschitz is not None):
(l_1, l_2, l_3) = (SpectralLinear(512, 512, k_lipschitz), SpectralLinear(512, 512, k_lipschitz), SpectralLinear(512, output_dim, k_lipschitz))
self.classifier = nn.Sequential(nn.Dropout(p=p_drop), l_1, nn.ReLU(True), nn.Dropout(p=p_drop), l_2, nn.ReLU(True), l_3)
else:
self.classifier = nn.Sequential(nn.Dropout(p=p_drop), nn.Linear(512, 512), nn.ReLU(True), nn.Dropout(p=p_drop), nn.Linear(512, 512), nn.ReLU(True), nn.Linear(512, output_dim))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x |
class UserSim():
def __init__(self, error_evaluator):
self.user_type = 'sim'
self.patience = 3
self.error_evaluator = error_evaluator
self.ground_truth = None
self.tag_seq = None
self.dec_seq = None
self.eval_outputs = None
self.true_selections = None
self.q_counter = 0
self.questioned_pointers = []
self.questioned_tags = []
self.option_selections = []
self.feedback_records = []
def update_truth(self, ground_truth):
self.ground_truth = ground_truth
def update_pred(self, tag_seq, dec_seq):
self.tag_seq = tag_seq
self.dec_seq = dec_seq
(_, self.eval_outputs, self.true_selections) = self.error_evaluator.compare(self.ground_truth, 0, self.tag_seq, bool_return_true_selections=True)
def record_user_feedback(self, context, user_answer, bool_qa=False):
self.feedback_records.append((context, user_answer))
if bool_qa:
self.questioned_tags.append((context, user_answer))
self.q_counter += 1
def clear_counter(self):
self.q_counter = 0
self.questioned_pointers = []
self.questioned_tags = []
self.option_selections = []
self.feedback_records = []
def get_answer(self, pointer, answer_sheet):
self.questioned_pointers.append(pointer)
pointer_eval_output = self.eval_outputs[pointer]
reverse_answer_sheet = {bool_right: ans for (ans, (bool_right, _)) in answer_sheet.items()}
answer = reverse_answer_sheet[pointer_eval_output]
valid_eval_outputs = [item for (item_idx, item) in enumerate(self.eval_outputs[:(pointer + 1)]) if ((item is not None) and (item_idx in self.questioned_pointers))]
if (valid_eval_outputs[(- 3):].count(False) == self.patience):
answer = 'exit'
print(('User answer: %s.\n' % answer))
return answer
def get_selection(self, pointer, answer_sheet, sel_none_of_above):
pointer_truth = self.true_selections[pointer]
selections = []
if (pointer_truth is not None):
for (select_id, select_val) in answer_sheet.items():
if (len(pointer_truth) and (select_val in pointer_truth)):
selections.append(select_id)
elif ((len(pointer_truth) == 0) and (select_val is None)):
selections.append(select_id)
if (len(selections) == 0):
selections.append(sel_none_of_above)
print(('User answer: %s.\n' % str(selections)))
return selections |
def make_sample_her_transitions_prioritized_replay(replay_strategy, replay_k, reward_fun):
if ((replay_strategy == 'future') or (replay_strategy == 'final')):
future_p = (1 - (1.0 / (1 + replay_k)))
else:
future_p = 0
def _sample_proportional(self, rollout_batch_size, batch_size, T):
episode_idxs = []
t_samples = []
for _ in range(batch_size):
self.n_transitions_stored = min(self.n_transitions_stored, self.size_in_transitions)
mass = (random.random() * self._it_sum.sum(0, (self.n_transitions_stored - 1)))
idx = self._it_sum.find_prefixsum_idx(mass)
assert (idx < self.n_transitions_stored)
episode_idx = (idx // T)
assert (episode_idx < rollout_batch_size)
t_sample = (idx % T)
episode_idxs.append(episode_idx)
t_samples.append(t_sample)
return (episode_idxs, t_samples)
def _sample_her_transitions(self, episode_batch, batch_size_in_transitions, beta):
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
if (rollout_batch_size < self.current_size):
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
else:
assert (beta >= 0)
(episode_idxs, t_samples) = _sample_proportional(self, rollout_batch_size, batch_size, T)
episode_idxs = np.array(episode_idxs)
t_samples = np.array(t_samples)
weights = []
p_min = (self._it_min.min() / self._it_sum.sum())
max_weight = ((p_min * self.n_transitions_stored) ** (- beta))
for (episode_idx, t_sample) in zip(episode_idxs, t_samples):
p_sample = (self._it_sum[((episode_idx * T) + t_sample)] / self._it_sum.sum())
weight = ((p_sample * self.n_transitions_stored) ** (- beta))
weights.append((weight / max_weight))
weights = np.array(weights)
transitions = {}
for key in episode_batch.keys():
if ((not (key == 'td')) and (not (key == 'e'))):
episode_batch_key = episode_batch[key].copy()
transitions[key] = episode_batch_key[(episode_idxs, t_samples)].copy()
her_indexes = np.where((np.random.uniform(size=batch_size) < future_p))
future_offset = (np.random.uniform(size=batch_size) * (T - t_samples))
future_offset = future_offset.astype(int)
future_t = ((t_samples + 1) + future_offset)[her_indexes]
if (replay_strategy == 'final'):
future_t[:] = T
future_ag = episode_batch['ag'][(episode_idxs[her_indexes], future_t)]
info = {}
for (key, value) in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['g'][her_indexes] = future_ag
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = reward_fun(**reward_params)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}
assert (transitions['u'].shape[0] == batch_size_in_transitions)
idxs = ((episode_idxs * T) + t_samples)
return (transitions, weights, idxs)
return _sample_her_transitions |
def module_init():
root_module = Module('ns.config_store', cpp_namespace='::ns3')
return root_module |
def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_backbone):
if (output_stride == 8):
replace_stride_with_dilation = [False, True, True]
aspp_dilate = [12, 24, 36]
else:
replace_stride_with_dilation = [False, False, True]
aspp_dilate = [6, 12, 18]
backbone = resnet.__dict__[backbone_name](pretrained=pretrained_backbone, replace_stride_with_dilation=replace_stride_with_dilation)
inplanes = 2048
low_level_planes = 256
if (name == 'deeplabv3plus'):
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif (name == 'deeplabv3'):
return_layers = {'layer4': 'out'}
classifier = DeepLabHead(inplanes, num_classes, aspp_dilate)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model = DeepLabV3(backbone, classifier)
return model |
class VideoRecorder(object):
def __init__(self, env, path=None, metadata=None, enabled=True, base_path=None):
modes = env.metadata.get('render.modes', [])
self._async = env.metadata.get('semantics.async')
self.enabled = enabled
if (not self.enabled):
return
self.ansi_mode = False
if ('rgb_array' not in modes):
if ('ansi' in modes):
self.ansi_mode = True
else:
logger.info('Disabling video recorder because {} neither supports video mode "rgb_array" nor "ansi".'.format(env))
self.enabled = False
return
if ((path is not None) and (base_path is not None)):
raise error.Error('You can pass at most one of `path` or `base_path`.')
self.last_frame = None
self.env = env
required_ext = ('.json' if self.ansi_mode else '.mp4')
if (path is None):
if (base_path is not None):
path = (base_path + required_ext)
else:
with tempfile.NamedTemporaryFile(suffix=required_ext, delete=False) as f:
path = f.name
self.path = path
(path_base, actual_ext) = os.path.splitext(self.path)
if (actual_ext != required_ext):
hint = (" HINT: The environment is text-only, therefore we're recording its text output in a structured JSON format." if self.ansi_mode else '')
raise error.Error('Invalid path given: {} -- must have file extension {}.{}'.format(self.path, required_ext, hint))
touch(path)
self.frames_per_sec = env.metadata.get('video.frames_per_second', 30)
self.encoder = None
self.broken = False
self.metadata = (metadata or {})
self.metadata['content_type'] = ('video/vnd.openai.ansivid' if self.ansi_mode else 'video/mp4')
self.metadata_path = '{}.meta.json'.format(path_base)
logger.info('Starting new video recorder writing to %s', self.path)
self.empty = True
def functional(self):
return (self.enabled and (not self.broken))
def capture_frame(self):
if (not self.functional):
return
logger.debug('Capturing video frame: path=%s', self.path)
render_mode = ('ansi' if self.ansi_mode else 'rgb_array')
frame = self.env.render(mode=render_mode)
if (frame is None):
if self._async:
return
else:
logger.warn('Env returned None on render(). Disabling further rendering for video recorder by marking as disabled: path=%s metadata_path=%s', self.path, self.metadata_path)
self.broken = True
else:
self.last_frame = frame
if self.ansi_mode:
self._encode_ansi_frame(frame)
else:
self._encode_image_frame(frame)
def close(self):
if (not self.enabled):
return
if self.encoder:
logger.debug('Closing video encoder: path=%s', self.path)
self.encoder.close()
self.encoder = None
else:
os.remove(self.path)
if (self.metadata is None):
self.metadata = {}
self.metadata['empty'] = True
if self.broken:
logger.info('Cleaning up paths for broken video recorder: path=%s metadata_path=%s', self.path, self.metadata_path)
if os.path.exists(self.path):
os.remove(self.path)
if (self.metadata is None):
self.metadata = {}
self.metadata['broken'] = True
def write_metadata(self):
pass
def _encode_ansi_frame(self, frame):
if (not self.encoder):
self.encoder = TextEncoder(self.path, self.frames_per_sec)
self.metadata['encoder_version'] = self.encoder.version_info
self.encoder.capture_frame(frame)
self.empty = False
def _encode_image_frame(self, frame):
if (not self.encoder):
self.encoder = ImageEncoder(self.path, frame.shape, self.frames_per_sec)
self.metadata['encoder_version'] = self.encoder.version_info
try:
self.encoder.capture_frame(frame)
except error.InvalidFrame as e:
logger.warn('Tried to pass invalid video frame, marking as broken: %s', e)
self.broken = True
else:
self.empty = False |
class TestDataset(Dataset):
def __init__(self, triples, args, mode, random_sampling):
self.len = len(triples['head'])
self.triples = triples
self.nentity = args.nentity
self.nrelation = args.nrelation
self.mode = mode
self.random_sampling = random_sampling
if random_sampling:
self.neg_size = args.neg_size_eval_train
def __len__(self):
return self.len
def __getitem__(self, idx):
(head, relation, tail) = (self.triples['head'][idx], self.triples['relation'][idx], self.triples['tail'][idx])
positive_sample = torch.LongTensor((head, relation, tail))
if (self.mode == 'head-batch'):
if (not self.random_sampling):
negative_sample = torch.cat([torch.LongTensor([head]), torch.from_numpy(self.triples['head_neg'][idx])])
else:
negative_sample = torch.cat([torch.LongTensor([head]), torch.randint(0, self.nentity, size=(self.neg_size,))])
elif (self.mode == 'tail-batch'):
if (not self.random_sampling):
negative_sample = torch.cat([torch.LongTensor([tail]), torch.from_numpy(self.triples['tail_neg'][idx])])
else:
negative_sample = torch.cat([torch.LongTensor([tail]), torch.randint(0, self.nentity, size=(self.neg_size,))])
return (positive_sample, negative_sample, self.mode)
def collate_fn(data):
positive_sample = torch.stack([_[0] for _ in data], dim=0)
negative_sample = torch.stack([_[1] for _ in data], dim=0)
mode = data[0][2]
return (positive_sample, negative_sample, mode) |
def _get_type_string(attr_type):
if isinstance(attr_type, (list, tuple)):
if (len(attr_type) > 1):
return ((', '.join([x.__name__ for x in attr_type[:(- 1)]]) + ' or ') + attr_type[(- 1)].__name__)
return attr_type[0].__name__
return attr_type.__name__ |
def element_segmentation(measure, soup, staff=None):
(voice_starts, voice_ends) = ({}, {})
position = 0
for element in measure.contents:
if (element.name == 'note'):
if (element.duration is None):
continue
voice = element.voice.text
duration = int(element.duration.text)
if element.chord:
position -= last_duration
if (element.staff and (int(element.staff.text) == staff)):
voice_starts[voice] = (min(voice_starts[voice], position) if (voice in voice_starts) else position)
start_tag = soup.new_tag('start')
start_tag.string = str(position)
element.append(start_tag)
position += duration
if (element.staff and (int(element.staff.text) == staff)):
voice_ends[voice] = (max(voice_ends[voice], position) if (voice in voice_ends) else position)
end_tag = soup.new_tag('end')
end_tag.string = str(position)
element.append(end_tag)
last_duration = duration
elif (element.name == 'backup'):
position -= int(element.duration.text)
elif (element.name == 'forward'):
position += int(element.duration.text)
else:
start_tag = soup.new_tag('start')
end_tag = soup.new_tag('end')
start_tag.string = str(position)
end_tag.string = str(position)
element.append(start_tag)
element.append(end_tag)
voice_start = (sorted(voice_starts.values())[1] if voice_starts else 0)
voice_end = (sorted(voice_ends.values(), reverse=True)[1] if voice_ends else 0)
(pre_voice_elements, post_voice_elements, voice_elements) = ([], [], [])
for element in measure.contents:
if (element.name in ('backup', 'forward')):
continue
if ((element.name == 'note') and (element.duration is None)):
continue
if (staff is not None):
if (element.staff and (int(element.staff.text) != staff)):
continue
if (voice_starts or voice_ends):
if (int(element.end.text) <= voice_start):
pre_voice_elements.append(element)
elif (voice_end <= int(element.start.text)):
post_voice_elements.append(element)
else:
voice_elements.append(element)
else:
pre_voice_elements.append(element)
return (pre_voice_elements, voice_elements, post_voice_elements) |
_criterion('cross_entropy')
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
(loss, _) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.sentence_avg else sample['ntokens'])
logging_output = {'loss': loss.data, 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = model.get_targets(sample, net_output).view((- 1))
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction=('sum' if reduce else 'none'))
return (loss, loss)
def reduce_metrics(logging_outputs) -> None:
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
if (sample_size != ntokens):
metrics.log_scalar('nll_loss', ((loss_sum / ntokens) / math.log(2)), ntokens, round=3)
metrics.log_derived('ppl', (lambda meters: utils.get_perplexity(meters['nll_loss'].avg)))
else:
metrics.log_derived('ppl', (lambda meters: utils.get_perplexity(meters['loss'].avg)))
def logging_outputs_can_be_summed() -> bool:
return True |
class LSMDCChoiceDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def dataset_cls(self):
return LSMDCChoiceDataset
def dataset_cls_no_false(self):
return LSMDCChoiceDataset
def dataset_name(self):
return 'lsmdc_choice' |
(frozen=True)
class LightScenarioKey():
scenario_spec: ScenarioSpec
split: str
def __hash__(self):
return hash((self.scenario_spec, self.split)) |
def predict_shap(model, data, boosting=None):
assert (boosting is not None)
if (boosting == 'xgboost'):
return model.predict(data, pred_contribs=True)
elif (boosting == 'lightgbm'):
return model.predict(data, pred_contrib=True)
elif (boosting == 'catboost'):
return model.get_feature_importance(data, fstr_type='ShapValues') |
def show_boxes_from_standard_json(json_file_path, classes, img_folder_path=None, output_folder_path=None, track_id=(- 1)):
dets = read_json_from_file(json_file_path)
for det in dets:
python_data = det
if (img_folder_path is None):
img_path = os.path.join(python_data['image']['folder'], python_data['image']['name'])
else:
img_path = os.path.join(img_folder_path, python_data['image']['name'])
if is_image(img_path):
img = cv2.imread(img_path)
candidates = python_data['candidates']
for candidate in candidates:
bbox = np.array(candidate['det_bbox']).astype(int)
score = candidate['det_score']
if (score >= bbox_thresh):
img = draw_bbox(img, bbox, score, classes, track_id=track_id)
if (output_folder_path is not None):
create_folder(output_folder_path)
img_output_path = os.path.join(output_folder_path, python_data['image']['name'])
cv2.imwrite(img_output_path, img)
return True |
def add_node(G, func_prefix, node, id, ids_in_basic_block):
node_check = ''
if (len(node_check) > 0):
if ((node_check in node) or (node_check == node)):
print('Found node', node)
assert (node is not None), 'Node none'
G.add_node((func_prefix + node), id=id)
if (ids_in_basic_block is not None):
if ((node[0] == '%') and (id != 'label') and (node not in ids_in_basic_block)):
ids_in_basic_block.append((func_prefix + node)) |
def determine_source_details(configurator):
global _source
if _source:
return _source
result = {}
git_cmd = ['git']
if (configurator and configurator.options and configurator.options.git_repo):
git_cmd += ['-C', configurator.options.git_repo]
is_git_repo = (_exec((git_cmd + ['rev-parse'])) is not None)
if (not is_git_repo):
result['repoURL'] = None
result['branchOrTag'] = None
result['commitId'] = None
result['commitMsg'] = None
result['authorName'] = None
result['committerName'] = None
result['authorEmail'] = None
result['committerEmail'] = None
_source = result
return result
repo_url = (_exec((git_cmd + ['ls-remote', '--get-url'])) if is_git_repo else None)
if (repo_url is None):
repo_url = ''
parsed = urlparse(repo_url)
if parsed.password:
parsed = parsed._replace(netloc='{}{}'.format(parsed.username, parsed.hostname))
result['repoURL'] = _encode_str(parsed.geturl())
result['branchOrTag'] = extract_base(_exec((git_cmd + ['show', '-s', '--format=%D', 'HEAD'])))
result['commitId'] = _exec((git_cmd + ['rev-parse', 'HEAD']))
result['commitMsg'] = _exec((git_cmd + ['show', '-s', '--format=%B', 'HEAD']))
result['authorName'] = _exec((git_cmd + ['show', '-s', '--format=%aN', 'HEAD']))
result['committerName'] = _exec((git_cmd + ['show', '-s', '--format=%cN', 'HEAD']))
result['authorEmail'] = _exec((git_cmd + ['show', '-s', '--format=%aE', 'HEAD']))
result['committerEmail'] = _exec((git_cmd + ['show', '-s', '--format=%cE', 'HEAD']))
_source = result
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.