code stringlengths 101 5.91M |
|---|
class igEnv():
def __init__(self, args):
self.args = args
self.config_filename = self.args.ig_config
self.env = iGibsonEnv(config_file=self.config_filename, mode=self.args.ig_render_mode)
p.resetBasePositionAndOrientation(self.env.robots[0].robot_ids[0], [(- 0.75), (- 0.4), 1.1], quaternion_from_euler(0.0, 0.0, np.pi))
p.resetBasePositionAndOrientation(self.env.robots[3].robot_ids[0], [0.75, (- 0.4), 0.1], quaternion_from_euler(0.0, np.pi, 0.0))
self.env.robots[0].base_reset([(- 0.75), (- 0.4), 1.1], quaternion_from_euler(0.0, 0.0, np.pi))
self.env.robots[3].base_reset([0.75, (- 0.4), 0.1], quaternion_from_euler(0.0, np.pi, 0.0))
self.env.simulator_step()
self.env.robots[0].pose_reset([0.2, (- 0.2), 0.3], [0.0, 0.0, 0.0], 5000.0)
self.env.robots[3].pose_reset([0.4, 0.0, 1.2], [0.0, 0.0, 0.0], 5000.0)
self.env.simulator_step()
self.env.robots[1].base_reset([(- 0.75), (- 0.4), 2.2], quaternion_from_euler(0.0, (- 1.0), np.pi))
self.env.robots[2].base_reset([0.75, (- 0.4), 2.2], quaternion_from_euler(np.pi, (- 1.0), np.pi))
self.env.simulator_step()
self.env.robots[0].base_change([(- 0.75), (- 0.4), 1.1], quaternion_from_euler(0.0, 0.0, np.pi))
self.env.robots[3].base_change([0.75, (- 0.4), 0.1], quaternion_from_euler(0.0, np.pi, 0.0))
self.env.simulator_step()
self.env.robots[0].robot_specific_reset()
self.env.robots[3].robot_specific_reset()
obj = YCBObject('003_cracker_box')
self.obj_id = self.env.simulator.import_object(obj)
p.changeDynamics(self.obj_id, (- 1), mass=0.1)
self.obj_cons = p.createConstraint(parentBodyUniqueId=self.obj_id, parentLinkIndex=(- 1), childBodyUniqueId=(- 1), childLinkIndex=(- 1), jointType=p.JOINT_FIXED, jointAxis=(0, 0, 0), parentFramePosition=[0, 0, 0], childFramePosition=[0, 0, 0], parentFrameOrientation=(0, 0, 0.707, 0.707), childFrameOrientation=(0, 0, 0, 1))
obj_pos = self.env.robots[0].get_a_random_target_pos()
p.changeConstraint(self.obj_cons, obj_pos, maxForce=30000.0)
self.env.robots[0].give_target(self.obj_id)
self.env.robots[3].give_target(self.obj_id)
self.env.simulator_step()
self.step_count = 0
self.linspace = np.linspace((- 0.8), 0.8, 5)
self.pivot = torch.FloatTensor(np.array([[i, j] for i in self.linspace for j in self.linspace])).view((- 1), 2)
self.pivot_num = len(self.pivot)
self.pivot_id = 0
self.pivot_id = ((self.pivot_id + 1) % self.pivot_num)
self.random_variable_noise = torch.FloatTensor(np.array([random.uniform((- 0.2), 0.2), random.uniform((- 0.2), 0.2)])).view(1, 2)
self.random_variable = (self.pivot[self.pivot_id].view(1, 2) + self.random_variable_noise)
self.feature_size = 15
self.seq_length = 10
self.padding = torch.FloatTensor(np.array([[0.0 for i in range(self.feature_size)] for _ in range(self.seq_length)]))
self.observation_space = spaces.Box((- 1.0), 1.0, (150,))
self.random_seed_space = spaces.Box((- 1.0), 1.0, (2,))
self.action_space = spaces.Box((- 1.0), 1.0, (14,))
self.dataset = self.args.gail_experts_dir
self.dataset_id = (([i for i in range(1, 117)] + [i for i in range(118, 149)]) + [i for i in range(150, 156)])
self.current_step_size = 10
self.max_step_size = 300
self.reset_ratio = 0.01
self.eval_mode = False
def check_succ(self):
if self.env.robots[3].hold_cons:
pos_1 = np.array(p.getLinkState(self.env.robots[0].robot_ids[0], self.env.robots[0].hand_id)[0])
pos_2 = np.array(p.getLinkState(self.env.robots[3].robot_ids[0], self.env.robots[3].hand_id)[0])
if (np.linalg.norm((pos_1 - pos_2)) > 0.3):
return True
return False
def check_done(self):
pos = p.getBasePositionAndOrientation(self.obj_id)[0]
return (pos[2] < 0.2)
def get_states(self):
base_1 = np.array(list(p.getLinkState(self.env.robots[0].robot_ids[0], self.env.robots[0].pelvis_id)[0]))
base_2 = np.array(list(p.getLinkState(self.env.robots[3].robot_ids[0], self.env.robots[3].pelvis_id)[0]))
pos_1 = np.array(list(p.getLinkState(self.env.robots[0].robot_ids[0], self.env.robots[0].hand_id)[0]))
pos_2 = np.array(list(p.getLinkState(self.env.robots[3].robot_ids[0], self.env.robots[3].hand_id)[0]))
tip_1 = list(p.getBasePositionAndOrientation(self.env.robots[0].tip)[0])
tip_2 = list(p.getBasePositionAndOrientation(self.env.robots[3].tip)[0])
obj_pos = np.array(list(p.getBasePositionAndOrientation(self.obj_id)[0]))
obj_ori = np.array(list(p.getEulerFromQuaternion(p.getBasePositionAndOrientation(self.obj_id)[1])))
dis_1 = np.linalg.norm((np.array(tip_1) - np.array(obj_pos)))
if ((dis_1 < 0.06) and (self.env.robots[0].hold_cons == None)):
if self.obj_cons:
p.removeConstraint(self.obj_cons)
self.obj_cons = None
self.env.robots[0].hold_target()
dis_2 = np.linalg.norm((np.array(tip_2) - np.array(obj_pos)))
if ((dis_2 < 0.06) and (self.env.robots[3].hold_cons == None) and (self.env.robots[3].gripper == self.env.robots[3].gripper_close)):
if self.env.robots[0].hold_cons:
self.env.robots[3].hold_target()
self.env.robots[0].clear()
new_state = np.concatenate(((pos_1 - base_1), (pos_2 - base_2), (obj_pos - pos_1), (obj_pos - pos_2), obj_ori), axis=0)
new_state = torch.FloatTensor(np.array([new_state]))
self.padding = torch.cat((self.padding, new_state), dim=0)[1:]
states = self.padding.view(1, (- 1))
succ = self.check_succ()
done = self.check_done()
if succ:
done = True
return (states, succ, done)
def base_loc(self):
base_1 = np.array(list(p.getLinkState(self.env.robots[0].robot_ids[0], self.env.robots[0].pelvis_id)[0]))
base_2 = np.array(list(p.getBasePositionAndOrientation(self.env.robots[3].robot_ids[0])[0]))
return (base_1, base_2)
def reset(self, start_begin=False, provide=False, provide_states=None, provide_joints=None):
self.env.robots[0].clear()
self.env.robots[3].clear()
if self.obj_cons:
p.removeConstraint(self.obj_cons)
self.obj_cons = None
self.env.simulator_step()
self.env.robots[0].pose_reset([0.2, (- 0.2), 0.3], [0.0, 0.0, 0.0], 5000.0)
self.env.robots[3].pose_reset([0.4, 0.0, 1.2], [0.0, 0.0, 0.0], 5000.0)
p.resetBasePositionAndOrientation(self.obj_id, [0.0, 0.0, 0.2], [0, 0, 0.707, 0.707])
self.env.simulator_step()
self.env.robots[0].give_target(self.obj_id)
self.env.robots[3].give_target(self.obj_id)
if provide:
selected_states = provide_states
selected_joints = provide_joints
else:
selected_id = random.choice(self.dataset_id)
selected_states = np.load('{0}/{1}_states.npy'.format(self.dataset, selected_id))
selected_joints = np.load('{0}/{1}_joints.npy'.format(self.dataset, selected_id))
if (start_begin or self.eval_mode):
selected_frame = 0
else:
selected_frame = random.randint(0, max(1, int((self.reset_ratio * float((len(selected_states) - 1))))))
self.env.robots[0].give_initial_ee_pose(selected_states[selected_frame][12:15], selected_states[selected_frame][15:18])
self.env.robots[3].give_initial_ee_pose(selected_states[selected_frame][18:21], selected_states[selected_frame][21:24])
now = 0
for i in range(p.getNumJoints(self.env.robots[0].robot_ids[0])):
p.resetJointState(self.env.robots[0].robot_ids[0], i, selected_joints[selected_frame][now], 0.0)
now += 1
for i in range(p.getNumJoints(self.env.robots[3].robot_ids[0])):
p.resetJointState(self.env.robots[3].robot_ids[0], i, selected_joints[selected_frame][now], 0.0)
now += 1
p.setJointMotorControlArray(self.env.robots[0].robot_ids[0], [i for i in range(p.getNumJoints(self.env.robots[0].robot_ids[0]))], p.POSITION_CONTROL, selected_joints[selected_frame][:42], forces=[500.0 for _ in range(p.getNumJoints(self.env.robots[0].robot_ids[0]))])
p.setJointMotorControlArray(self.env.robots[3].robot_ids[0], [i for i in range(p.getNumJoints(self.env.robots[3].robot_ids[0]))], p.POSITION_CONTROL, selected_joints[selected_frame][42:], forces=[500.0 for _ in range(p.getNumJoints(self.env.robots[3].robot_ids[0]))])
p.resetBasePositionAndOrientation(self.obj_id, selected_states[selected_frame][(- 7):(- 4)], p.getQuaternionFromEuler(selected_states[selected_frame][(- 4):(- 1)]))
self.env.simulator_step()
if (abs((selected_states[selected_frame][(- 1)] - 0)) < 0.1):
self.obj_cons = p.createConstraint(parentBodyUniqueId=self.obj_id, parentLinkIndex=(- 1), childBodyUniqueId=(- 1), childLinkIndex=(- 1), jointType=p.JOINT_FIXED, jointAxis=(0, 0, 0), parentFramePosition=[0, 0, 0], childFramePosition=[0, 0, 0], parentFrameOrientation=(0, 0, 0.707, 0.707), childFrameOrientation=(0, 0, 0, 1))
obj_pos = self.env.robots[0].get_a_random_target_pos()
p.changeConstraint(self.obj_cons, obj_pos, maxForce=30000.0)
self.stage = 0
elif (abs((selected_states[selected_frame][(- 1)] - 1)) < 0.1):
self.env.robots[0].hold_target()
self.stage = 1
else:
self.env.robots[3].hold_target()
self.stage = 2
self.env.simulator_step()
self.step_count = 0
self.pivot_id = ((self.pivot_id + 1) % self.pivot_num)
self.random_variable_noise = torch.FloatTensor(np.array([random.uniform((- 0.2), 0.2), random.uniform((- 0.2), 0.2)])).view(1, 2)
self.random_variable = (self.pivot[self.pivot_id].view(1, 2) + self.random_variable_noise)
self.padding = torch.FloatTensor(np.array([[0.0 for i in range(self.feature_size)] for _ in range(self.seq_length)]))
return (self.get_states()[0], self.random_variable)
def start_eval(self):
self.eval_mode = True
self.current_step_size = self.max_step_size
def stop_eval(self):
self.eval_mode = False
def step(self, actions):
success = False
if (self.step_count > self.current_step_size):
done = True
(states, _) = self.reset()
return (states, torch.FloatTensor([[float(success)]]), [done], [{'bad_transition': True}], self.random_variable)
else:
action = np.clip(actions[0].detach().cpu().numpy(), (- 1.0), 1.0)
action_human = action[:7]
action_human[:3] /= 1000.0
rotation_human = np.clip(action_human[3:6], (- 0.9), 0.9)
gripper_human = action_human[(- 1):]
action_robot = action[7:14]
action_robot[:3] /= 1000.0
rotation_robot = np.clip(action_robot[3:6], (- 0.9), 0.9)
gripper_robot = action_robot[(- 1):]
del_action = (np.array([0.0, 0.0, 0.0, float(action_human[0]), float(action_human[1]), float(action_human[2]), 0.0, 0.0, 0.0, 0.0]) * 6.0)
del_action[6:(- 1)] = rotation_human
del_action[(- 1)] = gripper_human
del_action_2 = (np.array([0.0, 0.0, 0.0, float(action_robot[0]), float(action_robot[1]), float(action_robot[2]), 0.0, 0.0, 0.0, 0.0]) * 6.0)
del_action_2[6:(- 1)] = rotation_robot
del_action_2[(- 1)] = gripper_robot
self.env.step(del_action, del_action_2)
self.step_count += 1
(states, succ, done) = self.get_states()
if succ:
success = True
done = True
(states, _) = self.reset(True)
return (states, torch.FloatTensor([[float(success)]]), [done], [{}], self.random_variable)
elif done:
(states, _) = self.reset()
return (states, torch.FloatTensor([[float(success)]]), [done], [{'bad_transition': True}], self.random_variable)
else:
return (states, torch.FloatTensor([[float(success)]]), [done], [{}], self.random_variable) |
def imtext(image, text, space=(3, 3), color=(0, 0, 0), thickness=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.0):
assert isinstance(text, str), type(text)
size = cv2.getTextSize(text, fontFace, fontScale, thickness)
image = cv2.putText(image, text, (space[0], (size[1] + space[1])), fontFace, fontScale, color, thickness)
return image |
def get_policy_class(config: TrainerConfigDict) -> Optional[Type[Policy]]:
if (config['framework'] == 'torch'):
return NFSPTorchAveragePolicy
else:
raise NotImplementedError(f"NFSP average policy for framework: {config['framework']} not implemented.") |
def load_scene_flow_disp(img_path):
assert img_path.endswith('.pfm'), 'scene flow disparity image must end with .pfmbut got {}'.format(img_path)
(disp_img, __) = load_pfm(img_path)
return disp_img |
class ConfigManager(ConfigBase):
def __init__(self, *args):
super().__init__(config, *args) |
def act(flags, gym_env, actor_index: int, free_queue: mp.SimpleQueue, full_queue: mp.SimpleQueue, buffers: Buffers, actor_buffers: Buffers, actor_model_queues: List[mp.SimpleQueue], actor_env_queues: List[mp.SimpleQueue]):
try:
logging.info('Actor %i started.', actor_index)
timings = prof.Timings()
gym_env = gym_env
if (flags.agent in ['CNN']):
env = environment.Environment(gym_env, 'image')
elif (flags.agent in ['NLM', 'KBMLP', 'GCN']):
if (flags.state in ['relative', 'integer', 'block']):
env = environment.Environment(gym_env, 'VKB')
elif (flags.state == 'absolute'):
env = environment.Environment(gym_env, 'absVKB')
env_output = env.initial()
for key in env_output:
actor_buffers[key][actor_index][0] = env_output[key]
while True:
index = free_queue.get()
if (index is None):
break
for key in actor_buffers:
buffers[key][index][0] = actor_buffers[key][actor_index][0]
for t in range(flags.unroll_length):
timings.reset()
actor_model_queues[actor_index].put(actor_index)
env_info = actor_env_queues[actor_index].get()
if (env_info == 'exit'):
return
timings.time('model')
env_output = env.step(actor_buffers['action'][actor_index][0])
timings.time('step')
for key in actor_buffers:
buffers[key][index][(t + 1)] = actor_buffers[key][actor_index][0]
for key in env_output:
buffers[key][index][((t + 1), ...)] = env_output[key]
for key in env_output:
actor_buffers[key][actor_index][0] = env_output[key]
timings.time('write')
full_queue.put(index)
if (actor_index == 0):
logging.info('Actor %i: %s', actor_index, timings.summary())
except KeyboardInterrupt:
pass
except Exception as e:
logging.error('Exception in worker process %i', actor_index)
traceback.print_exc()
print()
raise e |
def load_vocab(vocab_file):
unit2idx = {}
with open(os.path.join(vocab_file), 'r', encoding='utf-8') as v:
for line in v:
(unit, idx) = line.strip().split()
unit2idx[unit] = int(idx)
return unit2idx |
class Likelihood(FunctionWrapper):
def __add__(self, other):
assert isinstance(other, Likelihood)
if isinstance(other, SumLikelihood):
if isinstance(self, SumLikelihood):
new_f = self.copy()
new_f.operands = (self.operands + other.operands)
return new_f.canonical()
else:
new_f = self.copy()
new_f.operands = ([self] + other.operands)
return new_f.canonical()
else:
return SumLikelihood([self, other]).canonical()
def __mul__(self, other):
assert isinstance(other, Likelihood)
if isinstance(other, ProductLikelihood):
if isinstance(self, ProductLikelihood):
new_f = self.copy()
new_f.operands = (self.operands + other.operands)
return new_f.canonical()
else:
new_f = self.copy()
new_f.operands = ([self] + other.operands)
return new_f.canonical()
else:
return ProductLikelihood([self, other]).canonical()
def gpml_inference_method(self):
return ''
def get_gpml_expression(self, dimensions):
if (not self.is_operator):
if (self.is_thunk or (dimensions == 1)):
return self.gpml_function
else:
assert (self.dimension < dimensions)
dim_vec = np.zeros(dimensions, dtype=int)
dim_vec[self.dimension] = 1
dim_vec_str = (('[' + ' '.join(map(str, dim_vec))) + ']')
return ('{, {%s, %s}}' % (dim_vec_str, self.gpml_function))
else:
raise RuntimeError('Operators must override this method')
def __repr__(self):
return 'Likelihood()' |
def get_iterations_required(xs, c=4.3):
num_iters = (xs + (c * (xs ** (1.0 / 3))))
num_iters = (num_iters.astype(int) + 2)
return num_iters |
def build_dbsampler(cfg, logger=None):
logger = logging.getLogger('build_dbsampler')
prepors = [build_db_preprocess(c, logger=logger) for c in cfg.db_prep_steps]
db_prepor = DataBasePreprocessor(prepors)
rate = cfg.rate
grot_range = cfg.global_random_rotation_range_per_object
groups = cfg.sample_groups
info_path = cfg.db_info_path
with open(info_path, 'rb') as f:
db_infos = pickle.load(f)
grot_range = list(grot_range)
if (len(grot_range) == 0):
grot_range = None
if (cfg.type == 'GT-AUG'):
sampler = DataBaseSamplerV2(db_infos, groups, db_prepor, rate, grot_range, logger=logger)
else:
raise NotImplementedError
return sampler |
_model
def resnet101d(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnet101d', pretrained, **model_args) |
class ThreadServerTrainer(AbstractTrainer):
def __init__(self, name, env_kwargs, model_kwargs, **kwargs):
super().__init__(env_kwargs=env_kwargs, model_kwargs=model_kwargs, **kwargs)
self.name = name
self._report_queue = Queue(maxsize=16)
self._shared_global_t = Value('i', 0)
self._shared_is_stopped = Value('i', False)
self._num_workers = 16
def _global_t(self):
return self._shared_global_t.value
def _child_run(self, id):
worker = self.create_worker(id)
def _process(process, *args, **kwargs):
result = process(*args, **kwargs)
self._report_queue.put(result)
worker._shared_global_t = self._shared_global_t.value
worker._shared_is_stopped = self._shared_is_stopped.value
return result
worker.run(process=partial(_process, process=worker.process))
def _initialize(self, **model_kwargs):
self.workers = [Thread(target=self._child_run, args=(i,)) for i in range(self._num_workers)]
def stop(self):
self._shared_is_stopped.value = True
for t in self.workers:
t.join()
def create_worker(self, id):
pass
def process(self, mode='train', **kwargs):
assert (mode == 'train')
(delta_t, epend, stats) = self._report_queue.get()
return (delta_t, epend, stats)
def run(self, process, **kwargs):
self._sub_create_env = self.create_env
self.create_env = (lambda **env_kwargs: None)
super().run(process, **kwargs)
self.create_env = self._sub_create_env
del self._sub_create_env
self._shared_global_t.value = 0
self._shared_is_stopped.value = False
for t in self.workers:
t.start()
while (not self._shared_is_stopped.value):
(tdiff, _, _) = process(mode='train', context=dict())
self._shared_global_t.value += tdiff
self._finalize()
return None |
def progress_bar(iterator, log_format: Optional[str]=None, log_interval: int=100, log_file: Optional[str]=None, epoch: Optional[int]=None, prefix: Optional[str]=None, aim_repo: Optional[str]=None, aim_run_hash: Optional[str]=None, aim_param_checkpoint_dir: Optional[str]=None, tensorboard_logdir: Optional[str]=None, default_log_format: str='tqdm', wandb_project: Optional[str]=None, wandb_run_name: Optional[str]=None, azureml_logging: Optional[bool]=False):
if (log_format is None):
log_format = default_log_format
if (log_file is not None):
handler = logging.FileHandler(filename=log_file)
logger.addHandler(handler)
if ((log_format == 'tqdm') and (not sys.stderr.isatty())):
log_format = 'simple'
if (log_format == 'json'):
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif (log_format == 'none'):
bar = NoopProgressBar(iterator, epoch, prefix)
elif (log_format == 'simple'):
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif (log_format == 'tqdm'):
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(log_format))
if aim_repo:
bar = AimProgressBarWrapper(bar, aim_repo=aim_repo, aim_run_hash=aim_run_hash, aim_param_checkpoint_dir=aim_param_checkpoint_dir)
if tensorboard_logdir:
try:
import palaas
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
if wandb_project:
bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name)
if azureml_logging:
bar = AzureMLProgressBarWrapper(bar)
return bar |
def _get_interpolate_attributes(g, mode, args):
if (mode == 'nearest'):
align_corners = None
scales = args[0:]
else:
align_corners = args[0]
scales = args[1:]
scales = _interpolate_get_scales_if_available(g, scales)
return (scales, align_corners) |
class TestAlgo(unittest.TestCase):
cfg = config_factory('detection_cvpr_2019')
def _mock_results(nsamples, ngt, npred, detection_name):
def random_attr():
rel_attributes = detection_name_to_rel_attributes(detection_name)
if (len(rel_attributes) == 0):
return ''
else:
return rel_attributes[np.random.randint(0, len(rel_attributes))]
pred = EvalBoxes()
gt = EvalBoxes()
for sample_itt in range(nsamples):
this_gt = []
for box_itt in range(ngt):
translation_xy = tuple((np.random.rand(2) * 15))
this_gt.append(DetectionBox(sample_token=str(sample_itt), translation=(translation_xy[0], translation_xy[1], 0.0), size=tuple((np.random.rand(3) * 4)), rotation=tuple(np.random.rand(4)), velocity=tuple((np.random.rand(3)[:2] * 4)), detection_name=detection_name, detection_score=random.random(), attribute_name=random_attr(), ego_translation=((random.random() * 10), 0, 0)))
gt.add_boxes(str(sample_itt), this_gt)
for sample_itt in range(nsamples):
this_pred = []
for box_itt in range(npred):
translation_xy = tuple((np.random.rand(2) * 10))
this_pred.append(DetectionBox(sample_token=str(sample_itt), translation=(translation_xy[0], translation_xy[1], 0.0), size=tuple((np.random.rand(3) * 4)), rotation=tuple(np.random.rand(4)), velocity=tuple((np.random.rand(3)[:2] * 4)), detection_name=detection_name, detection_score=random.random(), attribute_name=random_attr(), ego_translation=((random.random() * 10), 0, 0)))
pred.add_boxes(str(sample_itt), this_pred)
return (gt, pred)
def test_nd_score(self):
random.seed(42)
np.random.seed(42)
mdl = DetectionMetricDataList()
for class_name in self.cfg.class_names:
(gt, pred) = self._mock_results(30, 3, 25, class_name)
for dist_th in self.cfg.dist_ths:
mdl.set(class_name, dist_th, accumulate(gt, pred, class_name, center_distance, 2))
metrics = DetectionMetrics(self.cfg)
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
ap = calc_ap(mdl[(class_name, dist_th)], self.cfg.min_recall, self.cfg.min_precision)
metrics.add_label_ap(class_name, dist_th, ap)
for metric_name in TP_METRICS:
metric_data = mdl[(class_name, self.cfg.dist_th_tp)]
if ((class_name in ['traffic_cone']) and (metric_name in ['attr_err', 'vel_err', 'orient_err'])):
tp = np.nan
elif ((class_name in ['barrier']) and (metric_name in ['attr_err', 'vel_err'])):
tp = np.nan
else:
tp = calc_tp(metric_data, self.cfg.min_recall, metric_name)
metrics.add_label_tp(class_name, metric_name, tp)
self.assertEqual(0., metrics.nd_score)
def test_calc_tp(self):
random.seed(42)
np.random.seed(42)
md = DetectionMetricData.random_md()
self.assertEqual(1.0, calc_tp(md, min_recall=1, metric_name='trans_err'))
def test_calc_ap(self):
random.seed(42)
np.random.seed(42)
md = DetectionMetricData.random_md()
self.assertRaises(AssertionError, calc_ap, md, (- 0.5), 0.4)
self.assertRaises(AssertionError, calc_ap, md, 0.5, (- 0.8))
self.assertRaises(AssertionError, calc_ap, md, 0.7, 1)
self.assertRaises(AssertionError, calc_ap, md, 1.2, 0) |
def mkdir(path):
path = path.strip()
path = path.rstrip('\\')
isExists = os.path.exists(path)
if (not isExists):
os.makedirs(path)
return True
else:
return False |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
cfg.optimizer['lr'] = ((cfg.optimizer['lr'] * cfg.gpus) / 8)
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
logger = get_root_logger(cfg.log_level)
logger.info('Distributed training: {}'.format(distributed))
if (args.seed is not None):
logger.info('Set random seed to {}'.format(args.seed))
set_random_seed(args.seed)
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
datasets.append(build_dataset(cfg.data.val))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=args.validate, logger=logger) |
class GeneralHead3D(nn.Module, ABC):
def __init__(self, feature_dims=2048, dropout_rate=0.0, num_classes=1000):
super(GeneralHead3D, self).__init__()
self.pool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.dropout = nn.Dropout(p=dropout_rate)
self.fc = nn.Linear(feature_dims, num_classes)
self.init_weights()
def init_weights(self):
nn.init.normal_(self.fc.weight, 0, 0.01)
nn.init.zeros_(self.fc.bias)
def forward(self, x):
x = self.pool(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x)
return x |
def build_deptree_features(df):
with timer('Extracting deptree features'):
deptree = get_deptree_features(df)
columns = ['A_off', 'B_off', 'P_off', 'A_sent', 'B_sent', 'P_sent', 'A_rank', 'B_rank', 'P_rank']
deptree_df = pd.DataFrame(deptree, columns=columns)
return deptree_df |
def reporthook(count, block_size, total_size):
global start_time
if (count == 0):
start_time = time.time()
return
duration = (time.time() - start_time)
progress_size = int((count * block_size))
speed = int((progress_size / (1024 * duration)))
percent = int((((count * block_size) * 100) / total_size))
sys.stdout.write(('\r...%d%%, %d MB, %d KB/s, %d seconds passed' % (percent, (progress_size / (1024 * 1024)), speed, duration)))
sys.stdout.flush() |
def resnet_arg_scope(weight_decay=0.0001, batch_norm_decay=0.997, batch_norm_epsilon=1e-05, batch_norm_scale=True, activation_fn=tf.nn.relu, use_batch_norm=True):
batch_norm_params = {'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'updates_collections': tf.GraphKeys.UPDATE_OPS}
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), activation_fn=activation_fn, normalizer_fn=(slim.batch_norm if use_batch_norm else None), normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc |
class ModelArguments():
model_name_or_path: str = field(metadata={'help': 'Path to dense encoder'})
MCQ_M: int = field(metadata={'help': 'Number of sub-vectors per text.'})
similarity_metric: str = field(default=None, metadata={'help': 'If None, use the original value.', 'choices': ['METRIC_CENTROID_COS', 'METRIC_IP', 'METRIC_COS']})
pooling: str = field(default=None, metadata={'help': 'if None, keep the original values', 'choices': ['cls', 'mean']})
MCQ_K: int = field(default=256, metadata={'help': 'Number of clusters per sub-vector.'}) |
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.pool_method = 'max_pool'
def forward(self, xyz: torch.Tensor, features: torch.Tensor=None, new_xyz=None) -> (torch.Tensor, torch.Tensor):
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if (new_xyz is None):
new_xyz = (pointnet2_utils.gather_operation(xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)).transpose(1, 2).contiguous() if (self.npoint is not None) else None)
for i in range(len(self.groupers)):
new_features = self.groupers[i](xyz, new_xyz, features)
new_features = self.mlps[i](new_features)
if (self.pool_method == 'max_pool'):
new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)])
elif (self.pool_method == 'avg_pool'):
new_features = F.avg_pool2d(new_features, kernel_size=[1, new_features.size(3)])
else:
raise NotImplementedError
new_features = new_features.squeeze((- 1))
new_features_list.append(new_features)
return (new_xyz, torch.cat(new_features_list, dim=1)) |
def example_generator(data_path, single_pass):
while True:
filelist = glob.glob(data_path)
assert filelist, ('Error: Empty filelist at %s' % data_path)
if single_pass:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
for f in filelist:
reader = open(f, 'rb')
while True:
len_bytes = reader.read(8)
if (not len_bytes):
break
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack(('%ds' % str_len), reader.read(str_len))[0]
(yield example_pb2.Example.FromString(example_str))
if single_pass:
print('example_generator completed reading all datafiles. No more data.')
break |
(signature, parallel=False, cache=True, nogil=False)
def weighted_average_C(config, weights, q):
B = config.shape[0]
N = config.shape[1]
out = np.zeros((N, q), dtype=curr_float)
for b in prange(B):
for n in prange(N):
out[(n, config[(b, n)])] += weights[b]
out /= weights.sum()
return out |
def _create_wr_eet(filename, port, fps, user):
w = _writer()
w.open(filename)
w.put(_create_header(port, user))
w.put(hl2ss._create_configuration_for_eet(fps))
return w |
def initialize_logging(experiment, scaffolding):
if (experiment.logger is None):
root_logger = create_basic_stream_logger()
else:
root_logger = experiment.logger
for (sc_path, scaffold) in scaffolding.items():
if sc_path:
scaffold.logger = root_logger.getChild(sc_path)
else:
scaffold.logger = root_logger
return (root_logger, root_logger.getChild(experiment.path)) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--report-to', type=str, default=None, help='Where to report the results, you can choose e.g., WANDB')
parser.add_argument('-e', '--epochs', type=int, default=4, help='Number of epochs of fine-tuning/training')
parser.add_argument('-s', '--seed', type=int, default=7778, help='Seed for training')
parser.add_argument('-tb', '--train-batch-size', type=int, default=16, help='Training batch size')
parser.add_argument('-eb', '--eval-batch-size', type=int, default=10, help='Evaluation batch size')
parser.add_argument('--gradient-accumulation-steps', type=int, default=1, help='Number of gradient accumulation steps')
parser.add_argument('--warmup-steps', type=int, default=500, help='Number of warm up steps')
parser.add_argument('--logging-steps', type=int, default=1000, help='Logging steps size')
parser.add_argument('--save-steps', type=int, default=1000, help='Number of steps to save the model')
parser.add_argument('--eval-steps', type=int, default=500, help='Perform evaluation each N steps')
parser.add_argument('--max-steps', type=int, default=5000, help='Maximum number of steps to train the model')
parser.add_argument('--max-train-samples', type=int, default=(- 1), help='Maximum number of training samples to use during training. pass -1 to use the whole training set')
parser.add_argument('--input-model', default=None, help='Path to a previously trained model, to fine tune it')
parser.add_argument('--val-data', default=None, help='Validation text data for Seq Classification model (utt2spk_id)')
parser.add_argument('--test-data', default=None, help='TEST data for Seq Classification model (utt2spk_id)')
parser.add_argument('train_data', help='Train text data for sequence classification model (utt2spk_id)')
parser.add_argument('output_folder', help='name of the output folder to store the sequence classification model and tokenizer')
return parser.parse_args() |
def sample_discretized_normal(mean, logvar, inverse_bin_width):
y = torch.randn_like(mean)
x = ((torch.exp((0.5 * logvar)) * y) + mean)
x = (torch.round((x * inverse_bin_width)) / inverse_bin_width)
return x |
_registry(pattern_type='Transformer2Dmodel_EncoderHiddenStatesReshape')
class Transformer2Dmodel_EncoderHiddenStatesReshape(Pattern):
def __call__(self, model):
pattern_mapping_config = {'Transformer2Dmodel_EncoderHiddenStatesReshape': [{'patterns': {'in': [[(0, 'Input'), (1, 'MatMulWithBias')]]}}, {'patterns': {'in': [[(0, 'Input'), (1, 'MatMul')]]}}]}
pattern = pattern_mapping_config['Transformer2Dmodel_EncoderHiddenStatesReshape'][1]['patterns']['in']
patterns_nodes_name = util.search_pattern(pattern, model)
if (len(patterns_nodes_name) != 0):
logger.info('Transformer2Dmodel_EncoderHiddenStatesReshape mathched...')
logger.debug('Transformer2Dmodel_EncoderHiddenStatesReshape = {}'.format(patterns_nodes_name))
first_matmul_node_idx = (- 1)
all_dest_op = []
for i in range(len(patterns_nodes_name)):
input_node = model.get_node_by_name(patterns_nodes_name[i][0])
encoder_hidden_states_tensor = input_node.output_tensors[2]
matmul_node_name = patterns_nodes_name[i][1]
matmul_node = model.get_node_by_name(matmul_node_name)
matmul_node_idx = model.get_node_id(matmul_node.name)
if (i == 0):
first_matmul_node_idx = matmul_node_idx
new_node_name = 'encoder_hidden_states/reshape_2d'
new_node_output_tensor_name = (encoder_hidden_states_tensor.name + '_2d')
input_tensors = [encoder_hidden_states_tensor]
output_tensor = [Tensor(name=new_node_output_tensor_name, source_op=[new_node_name], dest_op=encoder_hidden_states_tensor.dest_op, dtype=matmul_node.output_tensors[0].dtype)]
new_node = util.construct_node(node_name=new_node_name, op_type='Reshape', input_tensors=input_tensors, output_tensors=output_tensor)
attr = OrderedDict()
seq_length = encoder_hidden_states_tensor.shape[2]
attr['dst_shape'] = ('-1,' + str(seq_length))
new_node.attr = attr
matmul_node.input_tensors[0] = new_node.output_tensors[0]
all_dest_op.append(matmul_node.name)
new_node.output_tensors[0].dest_op = all_dest_op
assert (first_matmul_node_idx != (- 1))
model.insert_nodes(first_matmul_node_idx, [new_node])
return model
pattern = pattern_mapping_config['Transformer2Dmodel_EncoderHiddenStatesReshape'][0]['patterns']['in']
patterns_nodes_name = util.search_pattern(pattern, model)
if (len(patterns_nodes_name) != 0):
logger.info('Transformer2Dmodel_EncoderHiddenStatesReshape mathched...')
logger.debug('Transformer2Dmodel_EncoderHiddenStatesReshape = {}'.format(patterns_nodes_name))
first_matmul_node_idx = (- 1)
all_dest_op = []
for i in range(len(patterns_nodes_name)):
input_node = model.get_node_by_name(patterns_nodes_name[i][0])
encoder_hidden_states_tensor = input_node.output_tensors[2]
matmul_node_name = patterns_nodes_name[i][1]
matmul_node = model.get_node_by_name(matmul_node_name)
matmul_node_idx = model.get_node_id(matmul_node.name)
if (i == 0):
first_matmul_node_idx = matmul_node_idx
new_node_name = 'encoder_hidden_states/reshape_2d'
new_node_output_tensor_name = (encoder_hidden_states_tensor.name + '_2d')
input_tensors = [encoder_hidden_states_tensor]
output_tensor = [Tensor(name=new_node_output_tensor_name, source_op=[new_node_name], dest_op=encoder_hidden_states_tensor.dest_op, dtype=matmul_node.output_tensors[0].dtype)]
new_node = util.construct_node(node_name=new_node_name, op_type='Reshape', input_tensors=input_tensors, output_tensors=output_tensor)
attr = OrderedDict()
seq_length = encoder_hidden_states_tensor.shape[2]
attr['dst_shape'] = ('-1,' + str(seq_length))
new_node.attr = attr
matmul_node.input_tensors[0] = new_node.output_tensors[0]
all_dest_op.append(matmul_node.name)
new_node.output_tensors[0].dest_op = all_dest_op
assert (first_matmul_node_idx != (- 1))
model.insert_nodes(first_matmul_node_idx, [new_node])
quant_info = util.get_quant_info()
util.insert_quant_info(new_node.output_tensors[0].name, quant_info[input_tensors[0].name])
return model |
def write_file(filename: str, data: torch.Tensor) -> None:
torch.ops.image.write_file(filename, data) |
def load_tf_weights_in_mobilenet_v2(*args, **kwargs):
requires_backends(load_tf_weights_in_mobilenet_v2, ['torch']) |
def ProcessFile(filename, vlevel, extra_check_functions=[]):
_SetVerboseLevel(vlevel)
try:
if (filename == '-'):
lines = codecs.StreamReaderWriter(sys.stdin, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(("Skipping input '%s': Can't open for reading\n" % filename))
return
file_extension = filename[(filename.rfind('.') + 1):]
if ((filename != '-') and (file_extension not in _valid_extensions)):
sys.stderr.write(('Ignoring %s; not a valid file name (%s)\n' % (filename, ', '.join(_valid_extensions))))
else:
ProcessFileData(filename, file_extension, lines, Error, extra_check_functions)
if (carriage_return_found and (os.linesep != '\r\n')):
Error(filename, 0, 'whitespace/newline', 1, 'One or more unexpected \\r (^M) found;better to use only a \\n')
sys.stderr.write(('Done processing %s\n' % filename)) |
_task(name='EQA-v0')
class EQATask(NavigationTask):
def _check_episode_is_active(self, *args, action, episode, action_args=None, **kwargs) -> bool:
return (self.is_valid and (self.answer is None)) |
def update_perf_log(epoch_perf, perf_log_path):
now = time.strftime('%c')
line = 't: {}, '.format(now)
for key in epoch_perf:
line += '{}: {}, '.format(key, epoch_perf[key])
line += '\n'
with open(perf_log_path, 'a') as file:
file.write(line) |
class BenchmarkConfig():
def __init__(self, inputs=[], outputs=[], backend='default', device='cpu', warmup=5, iteration=(- 1), model_name='', cores_per_instance=None, num_of_instance=1, inter_num_of_threads=None, intra_num_of_threads=None, diagnosis=False, ni_workload_name='profiling'):
self.inputs = inputs
self.outputs = outputs
self.backend = backend
self.device = device
self.warmup = warmup
self.iteration = iteration
self.model_name = model_name
self.cores_per_instance = cores_per_instance
self.num_of_instance = num_of_instance
self.inter_num_of_threads = inter_num_of_threads
self.intra_num_of_threads = intra_num_of_threads
self.diagnosis = diagnosis
self.ni_workload_name = ni_workload_name
self._framework = None
def keys(self):
return ('inputs', 'outputs', 'backend', 'device', 'warmup', 'iteration', 'model_name', 'cores_per_instance', 'num_of_instance', 'framework', 'inter_num_of_threads', 'intra_num_of_threads')
def __getitem__(self, item):
return getattr(self, item)
def backend(self):
return self._backend
def backend(self, backend):
if _check_value('backend', backend, str, ['default', 'itex', 'ipex', 'onnxrt_trt_ep', 'onnxrt_cuda_ep', 'onnxrt_dnnl_ep', 'onnxrt_dml_ep']):
self._backend = backend
def device(self):
return self._device
def device(self, device):
if _check_value('device', device, str, ['cpu', 'gpu', 'npu', 'xpu']):
self._device = device
def outputs(self):
return self._outputs
def outputs(self, outputs):
if _check_value('outputs', outputs, str):
self._outputs = outputs
def inputs(self):
return self._inputs
def inputs(self, inputs):
if _check_value('inputs', inputs, str):
self._inputs = inputs
def warmup(self):
return self._warmup
def warmup(self, warmup):
if _check_value('warmup', warmup, int):
self._warmup = warmup
def iteration(self):
return self._iteration
def iteration(self, iteration):
if _check_value('iteration', iteration, int):
self._iteration = iteration
def cores_per_instance(self):
return self._cores_per_instance
_per_instance.setter
def cores_per_instance(self, cores_per_instance):
if ((cores_per_instance is None) or _check_value('cores_per_instance', cores_per_instance, int)):
self._cores_per_instance = cores_per_instance
def num_of_instance(self):
return self._num_of_instance
_of_instance.setter
def num_of_instance(self, num_of_instance):
if _check_value('num_of_instance', num_of_instance, int):
self._num_of_instance = num_of_instance
def inter_num_of_threads(self):
return self._inter_num_of_threads
_num_of_threads.setter
def inter_num_of_threads(self, inter_num_of_threads):
if ((inter_num_of_threads is None) or _check_value('inter_num_of_threads', inter_num_of_threads, int)):
self._inter_num_of_threads = inter_num_of_threads
def intra_num_of_threads(self):
return self._intra_num_of_threads
_num_of_threads.setter
def intra_num_of_threads(self, intra_num_of_threads):
if ((intra_num_of_threads is None) or _check_value('intra_num_of_threads', intra_num_of_threads, int)):
self._intra_num_of_threads = intra_num_of_threads
def diagnosis(self):
return self._diagnosis
def diagnosis(self, diagnosis):
if _check_value('diagnosis', diagnosis, bool):
self._diagnosis = diagnosis
def ni_workload_name(self):
return self._ni_workload_name
_workload_name.setter
def ni_workload_name(self, ni_workload_name):
if _check_value('ni_workload_name', ni_workload_name, str):
self._ni_workload_name = ni_workload_name
def model_name(self):
return self._model_name
_name.setter
def model_name(self, model_name):
if _check_value('model_name', model_name, str):
self._model_name = model_name
def framework(self):
return self._framework
def framework(self, framework):
self._framework = framework |
class FlaxRobertaModel():
def __init__(self, *args, **kwargs):
requires_flax(self)
def from_pretrained(self, *args, **kwargs):
requires_flax(self) |
class JTensor(object):
def __init__(self, storage, shape, bigdl_type='float', indices=None):
if (isinstance(storage, bytes) and isinstance(shape, bytes)):
self.storage = np.frombuffer(storage, dtype=get_dtype(bigdl_type))
self.shape = np.frombuffer(shape, dtype=np.int32)
else:
self.storage = np.array(storage, dtype=get_dtype(bigdl_type))
self.shape = np.array(shape, dtype=np.int32)
if (indices is None):
self.indices = None
elif isinstance(indices, bytes):
self.indices = np.frombuffer(indices, dtype=np.int32)
else:
invalidInputError(isinstance(indices, np.ndarray), f'indices should be a np.ndarray, not ${type(indices)}, ${str(indices)}')
self.indices = np.array(indices, dtype=np.int32)
self.bigdl_type = bigdl_type
def from_ndarray(cls, a_ndarray, bigdl_type='float'):
if (a_ndarray is None):
return None
invalidInputError(isinstance(a_ndarray, np.ndarray), f'input should be a np.ndarray, not ${type(a_ndarray)}')
return cls(a_ndarray, (a_ndarray.shape if a_ndarray.shape else a_ndarray.size), bigdl_type)
def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type='float'):
if (a_ndarray is None):
return None
invalidInputError(isinstance(a_ndarray, np.ndarray), f'input should be a np.ndarray, not ${type(a_ndarray)}')
invalidInputError(isinstance(i_ndarray, np.ndarray), f'indices should be a np.ndarray, not ${type(i_ndarray)}')
invalidInputError((i_ndarray.size == (a_ndarray.size * shape.size)), f'size of values ${(a_ndarray.size * shape.size)} and indices ${i_ndarray.size} should match')
return cls(a_ndarray, shape, bigdl_type, i_ndarray)
def to_ndarray(self):
invalidInputError((self.indices is None), 'sparseTensor to ndarray is not supported')
return np.array(self.storage, dtype=get_dtype(self.bigdl_type)).reshape(self.shape)
def __reduce__(self):
if (self.indices is None):
return (JTensor, (self.storage.tostring(), self.shape.tostring(), self.bigdl_type))
else:
return (JTensor, (self.storage.tostring(), self.shape.tostring(), self.bigdl_type, self.indices.tostring()))
def __str__(self):
return self.__repr__()
def __repr__(self):
indices = ('' if (self.indices is None) else (' ,indices %s' % str(self.indices)))
return ('JTensor: storage: %s, shape: %s%s, %s' % (str(self.storage), str(self.shape), indices, self.bigdl_type)) |
def weight_constrain(loss1, mal_loss1, agent_model, constrain_weights, t):
args = gv.args
loss2 = tf.constant(0.0)
layer_count = 0
if (('dist_oth' in args.mal_strat) and (t < 1)):
rho = 0.0
else:
rho = 0.0001
for layer in agent_model.layers:
counter = 0
for weight in layer.weights:
constrain_weight_curr = tf.convert_to_tensor(constrain_weights[layer_count], dtype=tf.float32)
delta_constrain = (weight - constrain_weight_curr)
if ('wt_o' in args.mal_strat):
if ((counter % 2) == 0):
loss2 += tf.nn.l2_loss(delta_constrain)
else:
loss2 += tf.nn.l2_loss(delta_constrain)
layer_count += 1
counter += 1
loss = (loss1 + (rho * loss2))
mal_loss = mal_loss1
return (loss, loss2, mal_loss) |
def compute_mAPs(truth: dict, pred: dict, tolerances: list[int]=[0, 1, 2, 4]):
assert ({v['video'] for v in truth} == {v['video'] for v in pred}), 'Video set mismatch!'
truth_by_label = parse_ground_truth(truth)
(fig, axes) = (None, None)
class_aps_for_tol = []
mAPs = []
for (i, tol) in enumerate(tolerances):
class_aps = []
for (j, (label, truth_for_label)) in enumerate(sorted(truth_by_label.items())):
ap = compute_average_precision(get_predictions(pred, label=label), truth_for_label, tolerance=tol, plot_ax=(axes[(j, i)] if (axes is not None) else None))
class_aps.append((label, ap))
mAP = np.mean([x[1] for x in class_aps])
mAPs.append(mAP)
class_aps.append(('mAP', mAP))
class_aps_for_tol.append(class_aps)
header = (['AP tol'] + tolerances)
rows = []
for (c, _) in class_aps_for_tol[0]:
row = [c]
for class_aps in class_aps_for_tol:
for (c2, val) in class_aps:
if (c2 == c):
row.append((val * 100))
rows.append(row)
return (mAPs, tolerances, header, rows) |
def singular_locus_set():
syst = jacobian(3, 2)
for pol in syst:
print(pol)
(embsyst, embsols) = witset(syst, False)
print('the polynomials in the witness set :')
for pol in embsyst:
print(pol)
input('hit enter to continue')
print('the solutions :')
for sol in embsols:
print(sol)
print('degree of the singular locus set :', len(embsols))
input('hit enter to continue')
return (embsyst, embsols) |
def text_pruning(text, ref):
new_text = []
for i in range(len(text)):
if ((not text[i]) or (text[i] == '.')):
continue
try:
cur_score = rouge(text[i], ref)
except:
print(text[i])
if (cur_score > test_pruning_thresh):
new_text.append(text[i])
return new_text |
class Root(nn.Module):
def __init__(self, cfg, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, bias=False, padding=((kernel_size - 1) // 2))
self.bn = get_norm(cfg.MODEL.DLA.NORM, out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x |
class ConvNet(Backbone):
def __init__(self, c_hidden=64):
super().__init__()
self.conv1 = Convolution(3, c_hidden)
self.conv2 = Convolution(c_hidden, c_hidden)
self.conv3 = Convolution(c_hidden, c_hidden)
self.conv4 = Convolution(c_hidden, c_hidden)
self._out_features = ((2 ** 2) * c_hidden)
def _check_input(self, x):
(H, W) = x.shape[2:]
assert ((H == 32) and (W == 32)), 'Input to network must be 32x32, but got {}x{}'.format(H, W)
def forward(self, x):
self._check_input(x)
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.conv3(x)
x = F.max_pool2d(x, 2)
x = self.conv4(x)
x = F.max_pool2d(x, 2)
return x.view(x.size(0), (- 1)) |
def standard_newton_power_series(pols, lser, idx=1, maxdeg=4, nbr=4, checkin=True, verbose=True):
from phcpy.solver import number_of_symbols
from phcpy.interface import store_standard_system, load_standard_system
from phcpy.phcpy2c3 import py2c_standard_Newton_power_series as newton
from phcpy.phcpy2c3 import py2c_syspool_standard_init
from phcpy.phcpy2c3 import py2c_syspool_standard_create
from phcpy.phcpy2c3 import py2c_syspool_standard_size as poolsize
from phcpy.phcpy2c3 import py2c_syspool_copy_to_standard_container
from phcpy.phcpy2c3 import py2c_syspool_standard_clear
nbsym = number_of_symbols(pols)
if verbose:
print('the polynomials :')
for pol in pols:
print(pol)
print('Number of variables :', nbsym)
if checkin:
if (not checkin_newton_power_series(nbsym, lser, idx)):
return lser
store_standard_system(lser, nbvar=1)
py2c_syspool_standard_init(1)
py2c_syspool_standard_create(1)
store_standard_system(pols, nbvar=nbsym)
fail = newton(idx, maxdeg, nbr, int(verbose))
size = ((- 1) if fail else poolsize())
if verbose:
if (size == (- 1)):
print("An error occurred in the execution of Newton's method.")
else:
print('Computed one series solution.')
py2c_syspool_copy_to_standard_container(1)
result = load_standard_system()
result = substitute_symbol(result, idx)
py2c_syspool_standard_clear()
return result |
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids) |
def _add_categories_metadata(dataset_name: str, categories: List[Dict[(str, Any)]]):
meta = MetadataCatalog.get(dataset_name)
meta.categories = {c['id']: c['name'] for c in categories}
logger = logging.getLogger(__name__)
logger.info('Dataset {} categories: {}'.format(dataset_name, meta.categories)) |
def load_df_wbm_with_preds(models: Sequence[str]=(*PRED_FILES,), pbar: bool=True, id_col: str=default_id_col, **kwargs: Any) -> pd.DataFrame:
if (mismatch := ', '.join((set(models) - set(PRED_FILES)))):
raise ValueError(f'Unknown models: {mismatch}, expected subset of {set(PRED_FILES)}')
dfs: dict[(str, pd.DataFrame)] = {}
try:
for model_name in (bar := tqdm(models, disable=(not pbar), desc='Loading preds')):
bar.set_postfix_str(model_name)
df = glob_to_df(PRED_FILES[model_name], pbar=False, **kwargs)
df = df.set_index(id_col)
dfs[model_name] = df
except Exception as exc:
raise RuntimeError(f'Failed to load model_name={model_name!r}') from exc
from matbench_discovery.data import df_wbm
df_out = df_wbm.copy()
for (model_name, df) in dfs.items():
model_key = model_name.lower().replace('', '_').replace(' ', '_')
cols = [col for col in df if col.startswith(f'e_form_per_atom_{model_key}')]
if cols:
if (len(cols) > 1):
print(f'Warning: multiple pred cols for model_name={model_name!r}, using {cols[0]!r} out of cols={cols!r}')
df_out[model_name] = df[cols[0]]
elif (pred_cols := list(df.filter(like='_pred_ens'))):
if (len(pred_cols) != 1):
raise ValueError(f'len(pred_cols)={len(pred_cols)!r}, expected 1')
df_out[model_name] = df[pred_cols[0]]
if (std_cols := list(df.filter(like='_std_ens'))):
df_out[f'{model_name}_std'] = df[std_cols[0]]
elif (pred_cols := list(df.filter(like='_pred_'))):
if (len(pred_cols) != 10):
raise ValueError(f'len(pred_cols)={len(pred_cols)!r}, expected 10')
df_out[model_name] = df[pred_cols].mean(axis=1)
else:
cols = list(df)
msg = f'No pred col for model_name={model_name!r}, available cols={cols!r}'
if (model_name != model_key):
msg = msg.replace(', ', f' (model_key={model_key!r}), ')
raise ValueError(msg)
return df_out |
def get_bilateral_grid(input, r_sigma, s_sigma):
x = Var('x')
y = Var('y')
z = Var('z')
c = Var('c')
xi = Var('xi')
yi = Var('yi')
zi = Var('zi')
clamped = Func('clamped')
clamped[(x, y)] = input[(clamp(x, 0, (input.width() - 1)), clamp(y, 0, (input.height() - 1)))]
r = RDom(0, s_sigma, 0, s_sigma, 'r')
val = clamped[((((x * s_sigma) + r.x) - (s_sigma // 2)), (((y * s_sigma) + r.y) - (s_sigma // 2)))]
val = clamp(val, 0.0, 1.0)
zi = cast(int_t, ((val / r_sigma) + 0.5))
histogram = Func('histogram')
histogram[(x, y, z, c)] = 0.0
histogram[(x, y, zi, c)] += select((c == 0), val, 1.0)
(blurx, blury, blurz) = (Func('blurx'), Func('blury'), Func('blurz'))
blurz[(x, y, z, c)] = ((((histogram[(x, y, (z - 2), c)] + (histogram[(x, y, (z - 1), c)] * 4)) + (histogram[(x, y, z, c)] * 6)) + (histogram[(x, y, (z + 1), c)] * 4)) + histogram[(x, y, (z + 2), c)])
blurx[(x, y, z, c)] = ((((blurz[((x - 2), y, z, c)] + (blurz[((x - 1), y, z, c)] * 4)) + (blurz[(x, y, z, c)] * 6)) + (blurz[((x + 1), y, z, c)] * 4)) + blurz[((x + 2), y, z, c)])
blury[(x, y, z, c)] = ((((blurx[(x, (y - 2), z, c)] + (blurx[(x, (y - 1), z, c)] * 4)) + (blurx[(x, y, z, c)] * 6)) + (blurx[(x, (y + 1), z, c)] * 4)) + blurx[(x, (y + 2), z, c)])
val = clamp(clamped[(x, y)], 0.0, 1.0)
zv = (val / r_sigma)
zi = cast(int_t, zv)
zf = (zv - zi)
xf = (cast(float_t, (x % s_sigma)) / s_sigma)
yf = (cast(float_t, (y % s_sigma)) / s_sigma)
xi = (x / s_sigma)
yi = (y / s_sigma)
interpolated = Func('interpolated')
interpolated[(x, y, c)] = lerp(lerp(lerp(blury[(xi, yi, zi, c)], blury[((xi + 1), yi, zi, c)], xf), lerp(blury[(xi, (yi + 1), zi, c)], blury[((xi + 1), (yi + 1), zi, c)], xf), yf), lerp(lerp(blury[(xi, yi, (zi + 1), c)], blury[((xi + 1), yi, (zi + 1), c)], xf), lerp(blury[(xi, (yi + 1), (zi + 1), c)], blury[((xi + 1), (yi + 1), (zi + 1), c)], xf), yf), zf)
bilateral_grid = Func('bilateral_grid')
bilateral_grid[(x, y)] = (interpolated[(x, y, 0)] / interpolated[(x, y, 1)])
target = get_target_from_environment()
if target.has_gpu_feature():
print('Compiling for GPU.')
histogram.compute_root().reorder(c, z, x, y).gpu_tile(x, y, 8, 8)
histogram = histogram.update()
histogram.reorder(c, r.x, r.y, x, y).gpu_tile(x, y, xi, yi, 8, 8).unroll(c)
blurx.compute_root().gpu_tile(x, y, z, xi, yi, zi, 16, 16, 1)
blury.compute_root().gpu_tile(x, y, z, xi, yi, zi, 16, 16, 1)
blurz.compute_root().gpu_tile(x, y, z, xi, yi, zi, 8, 8, 4)
bilateral_grid.compute_root().gpu_tile(x, y, xi, yi, s_sigma, s_sigma)
else:
print('Compiling for CPU.')
histogram.compute_root().parallel(z)
histogram = histogram.update()
histogram.reorder(c, r.x, r.y, x, y).unroll(c)
blurz.compute_root().reorder(c, z, x, y).parallel(y).vectorize(x, 4).unroll(c)
blurx.compute_root().reorder(c, x, y, z).parallel(z).vectorize(x, 4).unroll(c)
blury.compute_root().reorder(c, x, y, z).parallel(z).vectorize(x, 4).unroll(c)
bilateral_grid.compute_root().parallel(y).vectorize(x, 4)
return bilateral_grid |
class CamembertForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class LoopPad(object):
def __init__(self, max_len):
self.max_len = max_len
def __call__(self, tensor):
length = tensor.size(0)
if (length == self.max_len):
return tensor
n_pad = (self.max_len - length)
pad = ([tensor] * (n_pad // length))
if ((n_pad % length) > 0):
pad += [tensor[0:(n_pad % length)]]
tensor = torch.cat(([tensor] + pad), 0)
return tensor |
def test_revert_sync_batchnorm():
conv_syncbn = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN')).to('cpu')
conv_syncbn.train()
x = torch.randn(1, 3, 10, 10)
with pytest.raises(ValueError):
y = conv_syncbn(x)
conv_bn = revert_sync_batchnorm(conv_syncbn)
y = conv_bn(x)
assert (y.shape == (1, 8, 9, 9))
assert (conv_bn.training == conv_syncbn.training)
conv_syncbn.eval()
conv_bn = revert_sync_batchnorm(conv_syncbn)
assert (conv_bn.training == conv_syncbn.training) |
class SynPASS13Segmentation(SegmentationDataset):
NUM_CLASS = 13
def __init__(self, root='datasets/SynPASS', split='val', mode=None, transform=None, weather='all', **kwargs):
super(SynPASS13Segmentation, self).__init__(root, split, mode, transform, **kwargs)
assert os.path.exists(self.root), 'Please put dataset in {SEG_ROOT}/datasets/SynPASS'
self.root = root
(self.images, self.mask_paths) = _get_city_pairs(self.root, self.split)
assert (len(self.images) == len(self.mask_paths))
if (len(self.images) == 0):
raise RuntimeError((('Found 0 images in subfolders of:' + root) + '\n'))
self._key = np.array([(- 1), 2, 4, (- 1), 11, 5, 0, 0, 1, 8, 12, 3, 7, 10, (- 1), (- 1), (- 1), (- 1), 6, (- 1), (- 1), (- 1), 9])
def _map23to13(self, mask):
values = np.unique(mask)
new_mask = np.zeros_like(mask)
new_mask -= 1
for value in values:
if (value == 255):
new_mask[(mask == value)] = (- 1)
else:
new_mask[(mask == value)] = self._key[value]
mask = new_mask
return mask
def _val_sync_transform_resize(self, img, mask):
(img, mask) = (self._img_transform(img), self._mask_transform(mask))
return (img, mask)
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if (self.mode == 'test'):
if (self.transform is not None):
img = self.transform(img)
return (img, os.path.basename(self.images[index]))
mask = Image.open(self.mask_paths[index])
if (self.mode == 'train'):
(img, mask) = self._sync_transform(img, mask, resize=True)
elif (self.mode == 'val'):
(img, mask) = self._val_sync_transform_resize(img, mask)
else:
assert (self.mode == 'testval')
(img, mask) = self._val_sync_transform_resize(img, mask)
if (self.transform is not None):
img = self.transform(img)
return (img, mask, os.path.basename(self.images[index]))
def _mask_transform(self, mask):
target = self._map23to13(np.array(mask).astype('int32'))
return torch.LongTensor(np.array(target).astype('int32'))
def __len__(self):
return len(self.images)
def pred_offset(self):
return 0
def classes(self):
return ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'car') |
def load_training_config(config_name: str) -> TrainingConfig:
with hydra.initialize_config_module(config_module='tbv.training_configs'):
cfg = hydra.compose(config_name=config_name)
config: TrainingConfig = instantiate(cfg.TrainingConfig)
return config |
def test_cast_as_tensor_check_wrong():
assert_raises(AssertionError, _test_cast, True, torch.int64, 0)
assert_raises(AssertionError, _test_cast, True, torch.bool, 1)
assert_raises(AssertionError, _test_cast, 1, torch.int32, 0)
assert_raises(AssertionError, _test_cast, 1, torch.int64, 1)
assert_raises(AssertionError, _test_cast, 1.2, torch.int64, 0)
assert_raises(AssertionError, _test_cast, 1.2, torch.float64, 1) |
class GlobalNode(Module):
def __init__(self):
super().__init__()
att_mask = Linear(config.emb_size, 1)
att_feat = Sequential(Linear(config.emb_size, config.emb_size), LeakyReLU())
self.glob = GlobalAttention(att_mask, att_feat)
self.tranform = Sequential(Linear((config.emb_size + config.emb_size), config.emb_size), LeakyReLU())
def forward(self, xg_old, x, batch_ind):
xg = self.glob(x, batch_ind)
xg = torch.cat([xg, xg_old], dim=1)
xg = (self.tranform(xg) + xg_old)
return xg |
def data_parallel(batch_group: List[TensorDict], model: Model, cuda_devices: List) -> Dict[(str, torch.Tensor)]:
assert (len(batch_group) <= len(cuda_devices))
moved = [nn_util.move_to_device(batch, device) for (batch, device) in zip(batch_group, cuda_devices)]
used_device_ids = cuda_devices[:len(moved)]
replicas = replicate(model, used_device_ids)
inputs = ([()] * len(batch_group))
outputs = parallel_apply(replicas, inputs, moved, used_device_ids)
losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
return {'loss': losses.mean()} |
def train(cfg, observer):
model = get_model(cfg.mode)(cfg)
if (cfg.mode == 'geom'):
if (cfg.flow_pretrained_model and (not cfg.resume)):
data = torch.load(cfg.flow_pretrained_model)['model_state_dict']
(missing_keys, unexp_keys) = model.load_state_dict(data, strict=False)
print('missing_keys')
print(missing_keys)
print('')
print('unexp_keys')
print(unexp_keys)
print(('Load Flow Pretrained Model from ' + cfg.flow_pretrained_model))
if (cfg.depth_pretrained_model and (not cfg.resume)):
data = torch.load(cfg.depth_pretrained_model)['model_state_dict']
(missing_keys, unexp_keys) = model.load_state_dict(data, strict=False)
print('missing_keys')
print(missing_keys)
print('')
print('unexp_keys')
print(unexp_keys)
print(('Load Depth Pretrained Model from ' + cfg.depth_pretrained_model))
if cfg.multi_gpu:
model = torch.nn.DataParallel(model)
model = model.cuda()
if cfg.fix_flow:
for (k, v) in model.named_parameters():
if ((k.find('pwc') != (- 1)) or (k.find('fpyramid') != (- 1))):
print(k)
v.requires_grad = False
if cfg.fix_depth:
for (k, v) in model.named_parameters():
if (k.find('depth') != (- 1)):
print(k)
v.requires_grad = False
if cfg.fix_pose:
for (k, v) in model.named_parameters():
if (k.find('pose') != (- 1)):
print(k)
v.requires_grad = False
print('these parameters need to be update')
for (k, v) in model.named_parameters():
if v.requires_grad:
print(k)
optimizer = torch.optim.Adam([{'params': filter((lambda p: p.requires_grad), model.parameters()), 'lr': cfg.lr}])
if cfg.resume:
if (cfg.iter_start > 0):
(cfg.iter_start, model, optimizer) = load_model(cfg.model_dir, 'iter_{}.pth'.format(cfg.iter_start), model, optimizer)
else:
(cfg.iter_start, model, optimizer) = load_model(cfg.model_dir, 'last.pth', model, optimizer)
loss_weights_dict = generate_loss_weights_dict(cfg)
visualizer = Visualizer(loss_weights_dict, cfg.log_dump_dir)
data_dir = os.path.join(cfg.prepared_base_dir)
if (not os.path.exists(os.path.join(data_dir, 'train.txt'))):
if (cfg.dataset == 'kitti_depth'):
kitti_raw_dataset = KITTI_RAW(cfg.raw_base_dir, cfg.static_frames_txt, cfg.test_scenes_txt)
kitti_raw_dataset.prepare_data_mp(data_dir, stride=1)
elif (cfg.dataset == 'kitti_odo'):
kitti_raw_dataset = KITTI_Odo(cfg.raw_base_dir)
kitti_raw_dataset.prepare_data_mp(data_dir, stride=1)
elif (cfg.dataset == 'nyuv2'):
nyu_raw_dataset = NYU_Prepare(cfg.raw_base_dir, cfg.nyu_test_dir)
nyu_raw_dataset.prepare_data_mp(data_dir, stride=10)
else:
raise NotImplementedError
if (cfg.dataset == 'kitti_depth'):
dataset = KITTI_Prepared(data_dir, num_scales=cfg.num_scales, img_hw=cfg.img_hw, num_iterations=((cfg.num_iterations - cfg.iter_start) * cfg.batch_size))
elif (cfg.dataset == 'kitti_odo'):
dataset = KITTI_Prepared(data_dir, num_scales=cfg.num_scales, img_hw=cfg.img_hw, num_iterations=((cfg.num_iterations - cfg.iter_start) * cfg.batch_size))
elif (cfg.dataset == 'nyuv2'):
dataset = NYU_v2(data_dir, num_scales=cfg.num_scales, img_hw=cfg.img_hw, num_iterations=((cfg.num_iterations - cfg.iter_start) * cfg.batch_size))
else:
raise NotImplementedError
dataloader = torch.utils.data.DataLoader(dataset, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.num_workers, drop_last=False)
if ((cfg.dataset == 'kitti_depth') or (cfg.dataset == 'kitti_odo')):
(gt_flows_2012, noc_masks_2012) = load_gt_flow_kitti(cfg.gt_2012_dir, 'kitti_2012')
(gt_flows_2015, noc_masks_2015) = load_gt_flow_kitti(cfg.gt_2015_dir, 'kitti_2015')
gt_masks_2015 = load_gt_mask(cfg.gt_2015_dir)
elif (cfg.dataset == 'nyuv2'):
(test_images, test_gt_depths) = load_nyu_test_data(cfg.nyu_test_dir)
print('starting iteration: {}.'.format(cfg.iter_start))
for (iter_, inputs) in enumerate(tqdm(dataloader)):
if (((iter_ % cfg.test_interval) == 0) and (not cfg.no_test)):
model.eval()
if args.multi_gpu:
model_eval = model.module
else:
model_eval = model
if ((cfg.dataset == 'kitti_depth') or (cfg.dataset == 'kitti_odo')):
if (cfg.mode == 'flow'):
eval_2012_res = test_kitti_2012(cfg, model_eval, gt_flows_2012, noc_masks_2012)
eval_2015_res = test_kitti_2015(cfg, model_eval, gt_flows_2015, noc_masks_2015, gt_masks_2015, depth_save_dir=os.path.join(cfg.model_dir, 'results'))
visualizer.add_log_pack({'eval_2012_res': eval_2012_res, 'eval_2015_res': eval_2015_res})
if (cfg.mode == 'depth'):
eval_depth_res = test_eigen_depth(cfg, model_eval)
visualizer.add_log_pack({'eval_eigen_res': eval_depth_res})
if (cfg.mode == 'geom'):
eval_2012_res = test_kitti_2012(cfg, model_eval, gt_flows_2012, noc_masks_2012)
eval_2015_res = test_kitti_2015(cfg, model_eval, gt_flows_2015, noc_masks_2015, gt_masks_2015, depth_save_dir=os.path.join(cfg.model_dir, 'results'))
visualizer.add_log_pack({'eval_2012_res': eval_2012_res, 'eval_2015_res': eval_2015_res})
eval_depth_res = test_eigen_depth(cfg, model_eval)
visualizer.add_log_pack({'eval_eigen_res': eval_depth_res})
(abs_rel, sq_rel, rms, log_rms, a1, a2, a3) = eval_depth_res
observer.add_scalar('test_depth', abs_rel, iter_)
elif (cfg.dataset == 'nyuv2'):
if (not (cfg.mode == 'flow')):
eval_nyu_res = test_nyu(cfg, model_eval, test_images, test_gt_depths)
visualizer.add_log_pack({'eval_nyu_res': eval_nyu_res})
visualizer.dump_log(os.path.join(cfg.model_dir, 'log.pkl'))
model.train()
iter_ = (iter_ + cfg.iter_start)
optimizer.zero_grad()
inputs = [k.cuda() for k in inputs]
(loss_pack, mask_pack) = model(inputs)
if ((iter_ % cfg.log_interval) == 0):
visualizer.print_loss(loss_pack, iter_=iter_)
if (cfg.mode == 'geom'):
if (iter_ and ((iter_ % cfg.vis_interval) == 0)):
observer.add_scalar('depth_photometric_loss', loss_pack['loss_depth_pixel'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('depth_ssim_loss', loss_pack['loss_depth_ssim'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('depth_smooth_loss', loss_pack['loss_depth_smooth'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('depth_consis_loss', loss_pack['loss_depth_consis'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('flow_photometric_loss', loss_pack['loss_flow_pixel'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('flow_ssim_loss', loss_pack['loss_flow_ssim'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('flow_smooth_loss', loss_pack['loss_flow_smooth'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('flow_consis_loss', loss_pack['loss_flow_consis'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('depth_flow_consis', loss_pack['loss_depth_flow_consis'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('epipolar', loss_pack['loss_epipolar'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('pnp', loss_pack['loss_pnp'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('triangulate', loss_pack['loss_triangle'].mean().detach().cpu().numpy(), iter_)
observer.add_scalar('8_point', loss_pack['loss_eight_point'].mean().detach().cpu().numpy(), iter_)
if (iter_ and ((iter_ % (cfg.vis_interval * 10)) == 0)):
observer.add_image('origin_middle_image', mask_pack['origin_middle_image'], iter_)
observer.add_image('occ_fwd_mask', mask_pack['occ_fwd_mask'], iter_)
observer.add_image('dyna_fwd_mask', mask_pack['dyna_fwd_mask'], iter_)
observer.add_image('inlier_fwd_mask', mask_pack['inlier_fwd_mask'], iter_)
observer.add_image('rigid_fwd_mask', mask_pack['rigid_fwd_mask'], iter_)
observer.add_image('valid_fwd_mask', mask_pack['valid_fwd_mask'], iter_)
observer.add_image('fwd_mask', mask_pack['fwd_mask'], iter_)
observer.add_image('texture_mask_fwd', mask_pack['texture_mask_fwd'], iter_)
observer.add_image('pred_depth', visualizer.tensor2array((1 / mask_pack['pred_depth_img']), max_value=None, colormap='magma'), iter_)
observer.add_image('pred_disp', visualizer.tensor2array((1 / mask_pack['pred_depth_img']), max_value=None, colormap='bone'), iter_)
observer.add_image('pred_flow', flow_to_image(mask_pack['pred_flow_img']), iter_)
loss_list = []
for key in list(loss_pack.keys()):
loss_list.append((loss_weights_dict[key] * loss_pack[key].mean()).unsqueeze(0))
loss = torch.cat(loss_list, 0).sum()
loss.backward()
optimizer.step()
if (((iter_ + 1) % cfg.save_interval) == 0):
save_model(iter_, cfg.model_dir, 'iter_{}.pth'.format(iter_), model, optimizer)
save_model(iter_, cfg.model_dir, 'last.pth'.format(iter_), model, optimizer)
if (cfg.dataset == 'kitti_depth'):
if ((cfg.mode == 'depth') or (cfg.mode == 'depth_pose')):
eval_depth_res = test_eigen_depth(cfg, model_eval) |
class GolemTrainer():
_logger = logging.getLogger(__name__)
def __init__(self, learning_rate=0.001):
self.learning_rate = learning_rate
def train(self, model, X, num_iter, checkpoint_iter=None, output_dir=None):
model.sess.run(tf.compat.v1.global_variables_initializer())
self._logger.info('Started training for {} iterations.'.format(num_iter))
for i in range(0, (int(num_iter) + 1)):
if (i == 0):
(score, likelihood, h, B_est) = self.eval_iter(model, X)
else:
(score, likelihood, h, B_est) = self.train_iter(model, X)
if ((checkpoint_iter is not None) and ((i % checkpoint_iter) == 0)):
self.train_checkpoint(i, score, likelihood, h, B_est, output_dir)
return B_est
def eval_iter(self, model, X):
(score, likelihood, h, B_est) = model.sess.run([model.score, model.likelihood, model.h, model.B], feed_dict={model.X: X, model.lr: self.learning_rate})
return (score, likelihood, h, B_est)
def train_iter(self, model, X):
(_, score, likelihood, h, B_est) = model.sess.run([model.train_op, model.score, model.likelihood, model.h, model.B], feed_dict={model.X: X, model.lr: self.learning_rate})
return (score, likelihood, h, B_est)
def train_checkpoint(self, i, score, likelihood, h, B_est, output_dir):
self._logger.info('[Iter {}] score {:.3E}, likelihood {:.3E}, h {:.3E}'.format(i, score, likelihood, h))
if (output_dir is not None):
create_dir('{}/checkpoints'.format(output_dir))
np.save('{}/checkpoints/B_iteration_{}.npy'.format(output_dir, i), B_est) |
def load_syn(dataset_dir, split='train'):
data_dir = osp.join(dataset_dir, SYN[split])
n_max = (25000 if (split == 'train') else 9000)
return read_image_list(data_dir, n_max=n_max) |
class XnliProcessor(DataProcessor):
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_train_examples(self, data_dir):
lg = (self.language if (self.train_language is None) else self.train_language)
lines = self._read_tsv(os.path.join(data_dir, f'XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv'))
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = f'train-{i}'
text_a = line[0]
text_b = line[1]
label = ('contradiction' if (line[2] == 'contradictory') else line[2])
if (not isinstance(text_a, str)):
raise ValueError(f'Training input {text_a} is not a string')
if (not isinstance(text_b, str)):
raise ValueError(f'Training input {text_b} is not a string')
if (not isinstance(label, str)):
raise ValueError(f'Training label {label} is not a string')
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, 'XNLI-1.0/xnli.test.tsv'))
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
language = line[0]
if (language != self.language):
continue
guid = f'test-{i}'
text_a = line[6]
text_b = line[7]
label = line[1]
if (not isinstance(text_a, str)):
raise ValueError(f'Training input {text_a} is not a string')
if (not isinstance(text_b, str)):
raise ValueError(f'Training input {text_b} is not a string')
if (not isinstance(label, str)):
raise ValueError(f'Training label {label} is not a string')
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
return ['contradiction', 'entailment', 'neutral'] |
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str)
parser.add_argument('--mask_root', type=str)
parser.add_argument('--model_save_path', type=str, default='checkpoint')
parser.add_argument('--result_save_path', type=str, default='results')
parser.add_argument('--target_size', type=int, default=256)
parser.add_argument('--mask_mode', type=int, default=1)
parser.add_argument('--num_iters', type=int, default=450000)
parser.add_argument('--model_path', type=str, default='checkpoint/100000.pth')
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--n_threads', type=int, default=6)
parser.add_argument('--finetune', action='store_true')
parser.add_argument('--test', action='store_true')
parser.add_argument('--gpu_id', type=str, default='0')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
model = RFRNetModel()
if args.test:
model.initialize_model(args.model_path, False)
model.cuda()
dataloader = DataLoader(Dataset(args.data_root, args.mask_root, args.mask_mode, args.target_size, mask_reverse=True, training=False))
model.test(dataloader, args.result_save_path)
else:
model.initialize_model(args.model_path, True)
model.cuda()
dataloader = DataLoader(Dataset(args.data_root, args.mask_root, args.mask_mode, args.target_size, mask_reverse=True), batch_size=args.batch_size, shuffle=True, num_workers=args.n_threads)
model.train(dataloader, args.model_save_path, args.finetune, args.num_iters) |
class FlaxViTForImageClassification(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def write_e2e_src(prompt_lst, corr_path):
with open(corr_path, 'w') as f:
for x in prompt_lst:
print(x, file=f)
return |
def with_progress(collection, length=None, title=None, pbar=NoProgressBar()):
if (length is None):
length = len(collection)
if (title is not None):
pbar.set_title(title)
pbar.start(length)
for elem in collection:
(yield elem)
pbar.update() |
def _flatten_to_tuple(outputs):
result = []
if isinstance(outputs, torch.Tensor):
result.append(outputs)
elif isinstance(outputs, (list, tuple)):
for v in outputs:
result.extend(_flatten_to_tuple(v))
elif isinstance(outputs, dict):
for (_, v) in outputs.items():
result.extend(_flatten_to_tuple(v))
elif isinstance(outputs, Instances):
result.extend(_flatten_to_tuple(outputs.get_fields()))
elif isinstance(outputs, (Boxes, BitMasks, ImageList)):
result.append(outputs.tensor)
else:
log_first_n(logging.WARN, f'Output of type {type(outputs)} not included in flops/activations count.', n=10)
return tuple(result) |
class ToyDataset(Dataset):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
def __getitem__(self, idx):
return (torch.ones((4, 8)), torch.zeros((4, 8))) |
class CorefDataset(Dataset):
def __init__(self, input_data, tokenizer, model_name_or_path, max_seq_length=(- 1)):
self.tokenizer = tokenizer
(examples, self.max_mention_num, self.max_cluster_size, self.max_num_clusters, dockey2eems_tokenspan, dockey2pems_tokenspan) = self._parse_jsonlines(input_data)
self.max_seq_length = max_seq_length
(self.examples, self.lengths, self.num_examples_filtered, self.dockey2eems_subtokenspan, self.dockey2pems_subtokenspan) = self._tokenize(examples, dockey2eems_tokenspan, dockey2pems_tokenspan, model_name_or_path)
logger.info(f'Finished preprocessing Coref dataset. {len(self.examples)} examples were extracted, {self.num_examples_filtered} were filtered due to sequence length.')
def _parse_jsonlines(self, d):
examples = []
max_mention_num = (- 1)
max_cluster_size = (- 1)
max_num_clusters = (- 1)
dockey2pems_tokenspan = {}
dockey2eems_tokenspan = {}
doc_key = d['doc_key']
assert (type(d['sentences'][0]) == list), "'sentences' should be 2d list, not just a 1d list of the tokens."
input_words = flatten_list_of_lists(d['sentences'])
clusters = d['clusters']
max_mention_num = max(max_mention_num, len(flatten_list_of_lists(clusters)))
max_cluster_size = max(max_cluster_size, (max((len(cluster) for cluster in clusters)) if clusters else 0))
max_num_clusters = max(max_num_clusters, (len(clusters) if clusters else 0))
speakers = flatten_list_of_lists(d['speakers'])
examples.append((doc_key, input_words, clusters, speakers))
dockey2eems_tokenspan[doc_key] = d['mentions']
dockey2pems_tokenspan[doc_key] = d['pems']
return (examples, max_mention_num, max_cluster_size, max_num_clusters, dockey2eems_tokenspan, dockey2pems_tokenspan)
def _tokenize(self, examples, dockey2eems_tokenspan, dockey2pems_tokenspan, model_name_or_path):
coref_examples = []
lengths = []
num_examples_filtered = 0
dockey2eems_subtokenspan = {}
dockey2pems_subtokenspan = {}
for (doc_key, words, clusters, speakers) in examples:
word_idx_to_start_token_idx = dict()
word_idx_to_end_token_idx = dict()
end_token_idx_to_word_idx = [0]
token_ids = []
last_speaker = None
for (idx, (word, speaker)) in enumerate(zip(words, speakers)):
if (last_speaker != speaker):
speaker_prefix = (([SPEAKER_START] + self.tokenizer.encode((' ' + speaker), add_special_tokens=False)) + [SPEAKER_END])
last_speaker = speaker
else:
speaker_prefix = []
for _ in range(len(speaker_prefix)):
end_token_idx_to_word_idx.append(idx)
token_ids.extend(speaker_prefix)
word_idx_to_start_token_idx[idx] = (len(token_ids) + 1)
tokenized = self.tokenizer.encode((' ' + word), add_special_tokens=False)
for _ in range(len(tokenized)):
end_token_idx_to_word_idx.append(idx)
token_ids.extend(tokenized)
word_idx_to_end_token_idx[idx] = len(token_ids)
if (0 < self.max_seq_length < len(token_ids)):
num_examples_filtered += 1
continue
new_clusters = [[(word_idx_to_start_token_idx[start], word_idx_to_end_token_idx[end]) for (start, end) in cluster] for cluster in clusters]
lengths.append(len(token_ids))
coref_examples.append(((doc_key, end_token_idx_to_word_idx), CorefExample(token_ids=token_ids, clusters=new_clusters)))
dockey2eems_subtokenspan[doc_key] = [(word_idx_to_start_token_idx[start], word_idx_to_end_token_idx[end]) for (start, end) in dockey2eems_tokenspan[doc_key]]
dockey2pems_subtokenspan[doc_key] = [(word_idx_to_start_token_idx[start], word_idx_to_end_token_idx[end]) for (start, end) in dockey2pems_tokenspan[doc_key]]
return (coref_examples, lengths, num_examples_filtered, dockey2eems_subtokenspan, dockey2pems_subtokenspan)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return self.examples[item]
def pad_clusters_inside(self, clusters):
return [(cluster + ([(NULL_ID_FOR_COREF, NULL_ID_FOR_COREF)] * (self.max_cluster_size - len(cluster)))) for cluster in clusters]
def pad_clusters_outside(self, clusters):
return (clusters + ([[]] * (self.max_num_clusters - len(clusters))))
def pad_clusters(self, clusters):
clusters = self.pad_clusters_outside(clusters)
clusters = self.pad_clusters_inside(clusters)
return clusters
def _pe_create_tensored_batch(self, padded_batch, len_example):
assert (len_example == 3)
tensored_batch = tuple()
for i in range(len_example):
to_stack = []
for example in padded_batch:
assert (len(example) == 3), f'example contains three components: input_ids, attention_mask, and clusters. Current len(examples): {len(example)}'
if (i < 2):
to_stack.append(example[i].squeeze())
elif (i == 2):
to_stack.append(example[i])
tensored_batch += (torch.stack(to_stack, dim=0),)
return tensored_batch
def pad_batch(self, batch, max_length):
max_length += 2
padded_batch = []
for example in batch:
encoded_dict = self.tokenizer.encode_plus(example[0], use_fast=False, add_special_tokens=True, pad_to_max_length=True, max_length=max_length, return_attention_mask=True, return_tensors='pt')
clusters = self.pad_clusters(example.clusters)
example = ((encoded_dict['input_ids'], encoded_dict['attention_mask']) + (torch.tensor(clusters),))
padded_batch.append(example)
tensored_batch = self._pe_create_tensored_batch(padded_batch, len(example))
return tensored_batch |
class SemiSupervisedSampler(torch.utils.data.Sampler):
def __init__(self, sup_inds, unsup_inds, batch_size, unsup_fraction=0.5, num_batches=None):
if ((unsup_fraction is None) or (unsup_fraction < 0)):
self.sup_inds = (sup_inds + unsup_inds)
unsup_fraction = 0.0
else:
self.sup_inds = sup_inds
self.unsup_inds = unsup_inds
self.batch_size = batch_size
unsup_batch_size = int((batch_size * unsup_fraction))
self.sup_batch_size = (batch_size - unsup_batch_size)
if (num_batches is not None):
self.num_batches = num_batches
else:
self.num_batches = int(np.ceil((len(self.sup_inds) / self.sup_batch_size)))
super().__init__(None)
def __iter__(self):
batch_counter = 0
while (batch_counter < self.num_batches):
sup_inds_shuffled = [self.sup_inds[i] for i in torch.randperm(len(self.sup_inds))]
for sup_k in range(0, len(self.sup_inds), self.sup_batch_size):
if (batch_counter == self.num_batches):
break
batch = sup_inds_shuffled[sup_k:(sup_k + self.sup_batch_size)]
if (self.sup_batch_size < self.batch_size):
batch.extend([self.unsup_inds[i] for i in torch.randint(high=len(self.unsup_inds), size=((self.batch_size - len(batch)),), dtype=torch.int64)])
np.random.shuffle(batch)
(yield batch)
batch_counter += 1
def __len__(self):
return self.num_batches |
def func_mod(in_file, list_param):
with open(in_file) as f:
data = f.read()
data = data.split('\n')
dict_param = {}
for i in data:
tmp = i.split()
if (len(tmp) > 0):
for i in list_param:
if (i == tmp[0]):
dict_param[i] = tmp[1]
return dict_param |
def _Graph_fromMOLStringMulti(s: str, options: MDLOptions=MDLOptions(), add: bool=True) -> List[Graph]:
return _graphsLoad(_Graph_fromMOLStringMulti_orig(s, options), add) |
def resample_bounding_box(metadata, transform):
for (idx, transfo) in enumerate(transform.transform['im'].transforms):
if ('Resample' == transfo.__class__.__name__):
(hspace, wspace, dspace) = (transfo.hspace, transfo.wspace, transfo.dspace)
hfactor = (metadata[MetadataKW.INPUT_METADATA][0][MetadataKW.ZOOMS][0] / hspace)
wfactor = (metadata[MetadataKW.INPUT_METADATA][0][MetadataKW.ZOOMS][1] / wspace)
dfactor = (metadata[MetadataKW.INPUT_METADATA][0][MetadataKW.ZOOMS][2] / dspace)
factor = (hfactor, wfactor, dfactor)
coord = adjust_bb_size(metadata[MetadataKW.INPUT_METADATA][0][MetadataKW.BOUNDING_BOX], factor, resample=True)
for i in range(len(metadata[MetadataKW.INPUT_METADATA])):
metadata[MetadataKW.INPUT_METADATA][i][MetadataKW.BOUNDING_BOX] = coord
for i in range(len(metadata[MetadataKW.GT_METADATA])):
metadata[MetadataKW.GT_METADATA][i][MetadataKW.BOUNDING_BOX] = coord
break |
def get_peft_state_maybe_zero_3(state_dict, bias):
if (bias == 'none'):
to_return = {k: state_dict[k].cpu().clone().detach() for k in state_dict if ('lora_' in k)}
elif (bias == 'all'):
to_return = {k: state_dict[k] for k in state_dict if (('lora_' in k) or ('bias' in k))}
elif (bias == 'lora_only'):
to_return = {}
for k in state_dict:
if ('lora_' in k):
to_return[k] = state_dict[k]
bias_name = (k.split('lora_')[0] + 'bias')
if (bias_name in state_dict):
to_return[bias_name] = state_dict[bias_name]
else:
raise NotImplementedError
to_return = {k: maybe_zero_3(v) for (k, v) in to_return.items()}
return to_return |
def resnet50(num_classes=1000, pretrained='imagenet'):
model = models.resnet50(pretrained=False)
if (pretrained is not None):
settings = pretrained_settings['resnet50'][pretrained]
model = load_pretrained(model, num_classes, settings)
return model |
class TrainerBase(object):
def __init__(self, args, train_loader=None, val_loader=None, test_loader=None, train=True):
self.args = args
self.train_loader = train_loader
self.val_loader = val_loader
self.test_loader = test_loader
self.verbose = True
if self.args.distributed:
if (self.args.gpu != 0):
self.verbose = False
if (self.args.tokenizer is None):
self.args.tokenizer = self.args.backbone
if (not self.verbose):
set_global_logging_level(logging.ERROR, ['transformers'])
def create_config(self):
from transformers import T5Config, BartConfig
if ('t5' in self.args.backbone):
config_class = T5Config
elif ('bart' in self.args.backbone):
config_class = BartConfig
else:
return None
config = config_class.from_pretrained(self.args.backbone)
args = self.args
config.feat_dim = args.feat_dim
config.pos_dim = args.pos_dim
config.n_images = 2
config.use_vis_order_embedding = args.use_vis_order_embedding
config.dropout_rate = args.dropout
config.dropout = args.dropout
config.attention_dropout = args.dropout
config.activation_dropout = args.dropout
config.use_vis_layer_norm = args.use_vis_layer_norm
config.individual_vis_layer_norm = args.individual_vis_layer_norm
config.losses = args.losses
config.share_vis_lang_layer_norm = args.share_vis_lang_layer_norm
config.classifier = args.classifier
return config
def create_model(self, model_class, config=None, **kwargs):
print(f'Building Model at GPU {self.args.gpu}')
model_name = self.args.backbone
model = model_class.from_pretrained(model_name, config=config, **kwargs)
return model
def create_tokenizer(self, **kwargs):
from transformers import T5Tokenizer, BartTokenizer, T5TokenizerFast, BartTokenizerFast
from tokenization import VLT5Tokenizer, VLT5TokenizerFast
if ('t5' in self.args.tokenizer):
if self.args.use_vision:
tokenizer_class = VLT5TokenizerFast
else:
tokenizer_class = T5TokenizerFast
elif ('bart' in self.args.tokenizer):
tokenizer_class = BartTokenizer
tokenizer_name = self.args.backbone
tokenizer = tokenizer_class.from_pretrained(tokenizer_name, max_length=self.args.max_text_length, do_lower_case=self.args.do_lower_case, **kwargs)
return tokenizer
def create_optimizer_and_scheduler(self):
if self.verbose:
print('Building Optimizer')
lr_scheduler = None
if ('adamw' in self.args.optim):
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
batch_per_epoch = len(self.train_loader)
t_total = ((batch_per_epoch // self.args.gradient_accumulation_steps) * self.args.epochs)
warmup_ratio = self.args.warmup_ratio
warmup_iters = int((t_total * warmup_ratio))
if self.verbose:
print(('Batch per epoch: %d' % batch_per_epoch))
print(('Total Iters: %d' % t_total))
print('Warmup ratio:', warmup_ratio)
print(('Warm up Iters: %d' % warmup_iters))
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in self.model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': self.args.weight_decay}, {'params': [p for (n, p) in self.model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optim = AdamW(optimizer_grouped_parameters, lr=self.args.lr, eps=self.args.adam_eps)
lr_scheduler = get_linear_schedule_with_warmup(optim, warmup_iters, t_total)
else:
optim = self.args.optimizer(list(self.model.parameters()), self.args.lr)
return (optim, lr_scheduler)
def load_checkpoint(self, ckpt_path):
state_dict = load_state_dict(ckpt_path, 'cpu')
original_keys = list(state_dict.keys())
for key in original_keys:
if key.startswith('vis_encoder.'):
new_key = ('encoder.' + key[len('vis_encoder.'):])
state_dict[new_key] = state_dict.pop(key)
if key.startswith('model.vis_encoder.'):
new_key = ('model.encoder.' + key[len('model.vis_encoder.'):])
state_dict[new_key] = state_dict.pop(key)
results = self.model.load_state_dict(state_dict, strict=False)
if self.verbose:
print('Model loaded from ', ckpt_path)
pprint(results)
def init_weights(self):
def init_bert_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=1)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_()
self.model.apply(init_bert_weights)
self.model.init_weights()
def predict(self):
pass
def evaluate(self):
pass
def save(self, name):
if (not os.path.isdir(self.args.output)):
os.makedirs(self.args.output, exist_ok=True)
torch.save(self.model.state_dict(), os.path.join(self.args.output, ('%s.pth' % name)))
def load(self, path, loc=None):
if ((loc is None) and hasattr(self.args, 'gpu')):
loc = f'cuda:{self.args.gpu}'
state_dict = torch.load(('%s.pth' % path), map_location=loc)
original_keys = list(state_dict.keys())
for key in original_keys:
if key.startswith('module.vis_encoder.'):
new_key = ('module.encoder.' + key[len('module.vis_encoder.'):])
state_dict[new_key] = state_dict.pop(key)
if key.startswith('module.model.vis_encoder.'):
new_key = ('module.model.encoder.' + key[len('module.model.vis_encoder.'):])
state_dict[new_key] = state_dict.pop(key)
results = self.model.load_state_dict(state_dict, strict=False)
if self.verbose:
print('Model loaded from ', path)
pprint(results) |
def tiny_oshi_zumo_nfsp_avg_policy_params(env: MultiAgentEnv) -> Dict[(str, Any)]:
return {'framework': 'torch', 'num_gpus': float(os.getenv('WORKER_GPU_NUM', 0.0)), 'num_workers': 0, 'num_gpus_per_worker': float(os.getenv('WORKER_GPU_NUM', 0.0)), 'num_envs_per_worker': 1, 'learning_starts': 16000, 'train_batch_size': 4096, 'lr': 0.1, 'model': merge_dicts(MODEL_DEFAULTS, {'fcnet_activation': 'relu', 'fcnet_hiddens': [128, 128], 'custom_model': get_valid_action_fcn_class_for_env(env=env)})} |
def make_atom14_masks_np(batch: Dict[(str, torch.Tensor)]) -> Dict[(str, np.ndarray)]:
batch = tree_map((lambda n: torch.tensor(n, device=batch['aatype'].device)), batch, np.ndarray)
out = tensor_tree_map((lambda t: np.array(t)), make_atom14_masks(batch))
return out |
class BrainReporter(StatsReporter):
def __init__(self, job_meta: JobMeta) -> None:
self._job_meta = job_meta
self._brain_client = GlobalBrainClient.BRAIN_CLIENT
def report_dataset_metric(self, dataset: DatasetMetric):
self._brain_client.report_training_set_metric(self._job_meta, dataset)
def report_training_hyper_params(self, params: TrainingHyperParams):
self._brain_client.report_training_hyper_params(self._job_meta, params)
def report_model_info(self, metric: ModelInfo):
job_metrics = init_job_metrics_message(self._job_meta)
job_metrics.metrics_type = brain_pb2.MetricsType.Model_Feature
metrics = job_metrics.model_feature
metrics.total_variable_size = metric.tensor_stats.total_variable_size
metrics.variable_count = metric.tensor_stats.variable_count
metrics.op_count = metric.op_stats.op_count
self._brain_client.report_metrics(job_metrics)
def report_runtime_stats(self, stats: RuntimeMetric):
self._brain_client.report_node_runtime_stats(self._job_meta, self._job_meta.namespace, stats)
def report_job_type(self, job_type: str):
job_metrics = init_job_metrics_message(self._job_meta)
job_metrics.metrics_type = brain_pb2.MetricsType.Type
job_metrics.type = job_type
logger.info('Report job_type = %s', job_type)
self._brain_client.report_metrics(job_metrics)
def report_job_exit_reason(self, reason: str):
self._brain_client.report_job_exit_reason(self._job_meta, reason)
def report_customized_data(self, data):
job_metrics = init_job_metrics_message(self._job_meta)
job_metrics.metrics_type = brain_pb2.MetricsType.Customized_Data
job_metrics.customized_data = json.dumps(data)
self._brain_client.report_metrics(job_metrics)
def report_job_meta(self):
job_metrics = init_job_metrics_message(self._job_meta)
job_metrics.metrics_type = brain_pb2.MetricsType.Workflow_Feature
metrics = job_metrics.workflow_feature
metrics.job_name = self._job_meta.name
metrics.user_id = self._job_meta.user
self._brain_client.report_metrics(job_metrics)
def report_job_resource(self, job_resource):
job_metrics = init_job_metrics_message(self._job_meta)
job_metrics.metrics_type = brain_pb2.MetricsType.Resource
job_metrics.resource = job_resource.to_json()
self._brain_client.report_metrics(job_metrics) |
def make_sup_data_loaders(path, batch_size, num_workers, transform_train, transform_test, use_validation=True, val_size=5000, shuffle_train=True, dataset='cifar10'):
if (dataset == 'notmnist'):
test_set = torchvision.datasets.ImageFolder(root=path, transform=transform_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True)
return (None, test_loader, 10)
download = True
if (dataset.lower() == 'svhn'):
ds = SVHN_
elif (dataset.lower() == 'ag_news'):
ds = AG_News
download = False
else:
ds = getattr(torchvision.datasets, dataset.upper())
train_set = ds(root=path, train=True, download=download, transform=transform_train)
if (not (hasattr(train_set, 'train_data') or hasattr(train_set, 'test_data'))):
ds_base = ds
ds = (lambda *args, **kwargs: OldInterface(ds_base(*args, **kwargs)))
train_set = ds(root=path, train=True, download=download, transform=transform_train)
num_classes = (max(train_set.train_labels) + 1)
if use_validation:
print((((('Using train (' + str((len(train_set.train_data) - val_size))) + ') + validation (') + str(val_size)) + ')'))
train_set.train_data = train_set.train_data[:(- val_size)]
train_set.train_labels = train_set.train_labels[:(- val_size)]
test_set = ds(root=path, train=True, download=download, transform=transform_test)
test_set.train = False
test_set.test_data = test_set.train_data[(- val_size):]
test_set.test_labels = test_set.train_labels[(- val_size):]
delattr(test_set, 'train_data')
delattr(test_set, 'train_labels')
else:
test_set = ds(root=path, train=False, download=download, transform=transform_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=(True and shuffle_train), num_workers=num_workers, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True)
return (train_loader, test_loader, num_classes) |
def logging(s, log_path, print_=True, log_=True):
if print_:
print(s, flush=True)
if log_:
with open(log_path, 'a+') as f_log:
f_log.write((s + '\n')) |
class TFConvBertForMaskedLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def test_shapely_polygon_intersection1():
poly1 = np.array([[0, 0], [3, 0], [3, 3], [0, 3]])
poly2 = np.array([[2, 1], [5, 1], [5, 4], [2, 4]])
inter_area = shapely_polygon_intersection(poly1, poly2)
assert (inter_area == 2)
assert (shapely_polygon_area(poly1) == 9)
assert (shapely_polygon_area(poly2) == 9) |
def load_data(dest_dir='/tmp/.zoo/dataset', nb_words=None, oov_char=2, test_split=0.2):
path = download_reuters(dest_dir)
with open(path, 'rb') as f:
(x, y) = cPickle.load(f)
shuffle_by_seed([x, y])
if (not nb_words):
nb_words = max([max(s) for s in x])
if (oov_char is not None):
new_x = []
for s in x:
new_s = []
for word in s:
if (word >= nb_words):
new_s.append(oov_char)
else:
new_s.append(word)
new_x.append(new_s)
else:
new_x = []
for s in x:
new_s = []
for word in s:
if (word < nb_words):
new_s.append(word)
new_x.append(new_s)
x = new_x
split_index = int((len(x) * (1 - test_split)))
(x_train, y_train) = (x[:split_index], y[:split_index])
(x_test, y_test) = (x[split_index:], y[split_index:])
return ((x_train, y_train), (x_test, y_test)) |
def old_resnet101(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model |
class XnliProcessor(DataProcessor):
'Processor for the XNLI dataset.\n Adapted from
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_train_examples(self, data_dir):
lg = (self.language if (self.train_language is None) else self.train_language)
lines = self._read_tsv(os.path.join(data_dir, 'XNLI-MT-1.0/multinli/multinli.train.{}.tsv'.format(lg)))
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % ('train', i))
text_a = line[0]
text_b = line[1]
label = ('contradiction' if (line[2] == 'contradictory') else line[2])
assert isinstance(text_a, str), f'Training input {text_a} is not a string'
assert isinstance(text_b, str), f'Training input {text_b} is not a string'
assert isinstance(label, str), f'Training label {label} is not a string'
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, 'XNLI-1.0/xnli.test.tsv'))
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
language = line[0]
if (language != self.language):
continue
guid = ('%s-%s' % ('test', i))
text_a = line[6]
text_b = line[7]
label = line[1]
assert isinstance(text_a, str), f'Training input {text_a} is not a string'
assert isinstance(text_b, str), f'Training input {text_b} is not a string'
assert isinstance(label, str), f'Training label {label} is not a string'
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
return ['contradiction', 'entailment', 'neutral'] |
def init_weights(net, init_type='kaiming', scale=1, std=0.02):
logger.info('Initialization method [{:s}]'.format(init_type))
if (init_type == 'normal'):
weights_init_normal_ = functools.partial(weights_init_normal, std=std)
net.apply(weights_init_normal_)
elif (init_type == 'kaiming'):
weights_init_kaiming_ = functools.partial(weights_init_kaiming, scale=scale)
net.apply(weights_init_kaiming_)
elif (init_type == 'orthogonal'):
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [{:s}] not implemented'.format(init_type)) |
class EuroSATDataModule(pl.LightningDataModule):
def __init__(self, data_root, train_batch_size, test_batch_size, num_workers, scale_lower_bound, jitter_prob, greyscale_prob, solarize_prob, **kwargs):
super().__init__()
self.data_root = data_root
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.num_workers = num_workers
self.scale_lower_bound = scale_lower_bound
self.jitter_prob = jitter_prob
self.greyscale_prob = greyscale_prob
self.solarize_prob = solarize_prob
self.nr_of_classes = 10
self.prompt_prefix = 'This is a photo of a'
def setup(self, stage: Optional[str]=None) -> None:
root_dir = (pathlib.Path(self.data_root) / 'eurosat')
train_transform = torchvision.transforms.Compose([torchvision.transforms.RandomResizedCrop(224, scale=(self.scale_lower_bound, 1.0), interpolation=InterpolationMode.BICUBIC), torchvision.transforms.RandomApply([torchvision.transforms.ColorJitter(0.4, 0.4, 0.4, 0.4)], p=self.jitter_prob), torchvision.transforms.RandomGrayscale(p=self.greyscale_prob), torchvision.transforms.RandomApply([Solarize()], p=self.solarize_prob), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=(0., 0.4578275, 0.), std=(0., 0., 0.))])
test_transform = torchvision.transforms.Compose([torchvision.transforms.Resize(size=224), torchvision.transforms.CenterCrop(size=224), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=(0., 0.4578275, 0.), std=(0., 0., 0.))])
self.id_to_class = self._load_id_to_class(root_dir)
self._create_index_to_classes()
self._create_prompts()
if (stage == 'fit'):
self.train_set = JSONDataset(json_path=os.path.join(root_dir, 'split_zhou_EuroSAT.json'), data_root=os.path.join(root_dir, 'images'), split='train', transforms=train_transform)
self.val_set = JSONDataset(json_path=os.path.join(root_dir, 'split_zhou_EuroSAT.json'), data_root=os.path.join(root_dir, 'images'), split='test', transforms=test_transform)
if (stage == 'test'):
self.test_set = JSONDataset(json_path=os.path.join(root_dir, 'split_zhou_EuroSAT.json'), data_root=os.path.join(root_dir, 'images'), split='test', transforms=test_transform)
def train_dataloader(self):
return torch.utils.data.DataLoader(self.train_set, shuffle=True, batch_size=self.train_batch_size, num_workers=self.num_workers, persistent_workers=(True if (self.num_workers > 0) else False))
def val_dataloader(self):
return torch.utils.data.DataLoader(self.val_set, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers, persistent_workers=(True if (self.num_workers > 0) else False))
def test_dataloader(self):
return torch.utils.data.DataLoader(self.test_set, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers, persistent_workers=(True if (self.num_workers > 0) else False))
def _create_index_to_classes(self):
index_to_classes = {key: value.replace('_', ' ') for (key, value) in self.id_to_class.items()}
self.index_to_classes = dict(sorted(index_to_classes.items()))
def _load_id_to_class(self, root_dir):
dummy_set = JSONDataset(json_path=os.path.join(root_dir, 'split_zhou_EuroSAT.json'), data_root=os.path.join(root_dir, 'images'), split='train', transforms=None)
class_to_idx = dummy_set.class_to_idx
return {idx: class_label for (class_label, idx) in class_to_idx.items()}
def _create_prompts(self):
prompts = [((self.prompt_prefix + ' ') + text_label.lower()) for text_label in self.index_to_classes.values()]
self.prompts = prompts
print() |
def simxGetDialogInput(clientID, dialogHandle, operationMode):
inputText = ct.POINTER(ct.c_char)()
ret = c_GetDialogInput(clientID, dialogHandle, ct.byref(inputText), operationMode)
a = bytearray()
if (ret == 0):
i = 0
while (inputText[i] != b'\x00'):
if (sys.version_info[0] == 3):
a.append(int.from_bytes(inputText[i], 'big'))
else:
a.append(inputText[i])
i = (i + 1)
if (sys.version_info[0] == 3):
a = str(a, 'utf-8')
else:
a = str(a)
return (ret, a) |
class SimpleRecurrentSurrogate(nn.Module):
def __init__(self, num_hidden=100, number_input_feats=3, size_ebedding=100):
super(SimpleRecurrentSurrogate, self).__init__()
self.num_hidden = num_hidden
self.embedding = nn.Sequential(nn.Linear(number_input_feats, size_ebedding), nn.Sigmoid())
self.lstm = nn.LSTM(size_ebedding, num_hidden)
self.hid2val = nn.Linear(num_hidden, 1)
self.nonlinearity = nn.Sigmoid()
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.uniform_((- 0.1), 0.1)
m.bias.data.fill_(1.8)
def forward(self, sequence_of_operations):
embeds = []
for s in sequence_of_operations:
embeds.append(self.embedding(s))
embeds = torch.stack(embeds, dim=0)
(lstm_out, hidden) = self.lstm(embeds)
val_space = self.hid2val(lstm_out[(- 1)])
val_space = self.nonlinearity(val_space)
return val_space
def eval_model(self, sequence_of_operations_np, device):
npseq = np.expand_dims(sequence_of_operations_np, 1)
sequence_of_operations = torch.from_numpy(npseq).float().to(device)
res = self.forward(sequence_of_operations)
res = res.cpu().data.numpy()
return res[(0, 0)] |
_config
def model_lifelong_sidetune_double_open_fcn5s_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'FCN5', 'base_weights_path': '/mnt/models/curvature_encoder_student.dat', 'base_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'use_baked_encoding': False, 'side_class': 'FCN5', 'side_weights_path': '/mnt/models/curvature_encoder_student.dat', 'side_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'normalize_pre_transfer': True}}} |
def setup_logger(name, save_dir, if_train):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
if (not osp.exists(save_dir)):
os.makedirs(save_dir)
if if_train:
fh = logging.FileHandler(os.path.join(save_dir, 'train_log.txt'), mode='w')
else:
fh = logging.FileHandler(os.path.join(save_dir, 'test_log.txt'), mode='w')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger |
def execute():
path = '/mnt/data/datasets/patents/patent_matching'
positives = pd.read_csv((path + '/positives_satellite.csv'), header=0, dtype={'application_claim_text': str, 'patent_searchReport_paragraph': str})
negatives = pd.read_csv((path + '/negatives_satellite.csv'), header=0, dtype={'application_claim_text': str, 'patent_searchReport_paragraph': str})
sample_size = 1.0
positives = positives[['application_claim_text', 'patent_searchReport_paragraph']]
positives['label'] = '1'
positives = positives.rename(columns={'application_claim_text': 'text', 'patent_searchReport_paragraph': 'text_b'})
negatives = negatives[['application_claim_text', 'patent_searchReport_paragraph']]
negatives['label'] = '0'
negatives = negatives.rename(columns={'application_claim_text': 'text', 'patent_searchReport_paragraph': 'text_b'})
allSamples = positives.append(negatives).dropna()
allSamples['text_b'] = allSamples['text_b'].str.replace('<\\/p', '', regex=True)
allSamples['text'] = allSamples['text'].str.replace('<\\/p', '', regex=True)
allSamples['text_b'] = allSamples['text_b'].str.replace('\\<.+?\\>', '', regex=True)
allSamples['text'] = allSamples['text'].str.replace('\\<.+?\\>', '', regex=True)
allSamples['text_b'] = allSamples['text_b'].str.replace('--\\>', '', regex=True)
allSamples['text'] = allSamples['text'].str.replace('--\\>', '', regex=True)
allSamples['text_b'] = allSamples['text_b'].str.replace('"', '', regex=True)
allSamples['text'] = allSamples['text'].str.replace('"', '', regex=True)
allSamples['text_b'] = allSamples['text_b'].str.replace('[^A-Za-z0-9\\s.]+', '', regex=True)
allSamples['text'] = allSamples['text'].str.replace('[^A-Za-z0-9\\s.]+', '', regex=True)
allSamples['text_b'].replace('^\\s', '', regex=True, inplace=True)
allSamples['text'].replace('^\\s', '', regex=True, inplace=True)
allSamples['text_b'].replace('\\B\\s+|\\s+\\B', '', regex=True, inplace=True)
allSamples['text'].replace('\\B\\s+|\\s+\\B', '', regex=True, inplace=True)
allSamples['text_b'].replace('^[\\s]*$', np.nan, regex=True, inplace=True)
allSamples['text'].replace('^[\\s]*$', np.nan, regex=True, inplace=True)
allSamples = allSamples.sort_values(by=['text']).dropna()
(train, test_dev) = train_test_split(allSamples, test_size=0.2, shuffle=False)
(test, dev) = train_test_split(test_dev, test_size=0.5, shuffle=False)
train = train.sample(frac=sample_size)
test = test.sample(frac=sample_size)
dev = dev.sample(frac=sample_size)
print('Check for intersection values:')
print('Train in Test')
print(train['text'].isin(test['text']).value_counts())
print('Train in Dev')
print(train['text'].isin(dev['text']).value_counts())
print('Test in Dev')
print(test['text'].isin(dev['text']).value_counts())
train.to_csv((path + '/train.tsv'), sep='\t', index=False)
test.to_csv((path + '/test.tsv'), sep='\t', index=False)
dev.to_csv((path + '/dev.tsv'), sep='\t', index=False) |
def build_dataloader(dataset, vocab, batch_size, max_decode, is_train, num_workers):
shuffle = (True if is_train else False)
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=(lambda data, v=vocab, t=max_decode: Batch(data=data, vocab=v, max_decode=t)), num_workers=num_workers)
return data_loader |
def cents_to_bins(cents, quantize_fn=torch.floor):
bins = quantize_fn((cents / penn.CENTS_PER_BIN)).long()
bins[(bins < 0)] = 0
bins[(bins >= penn.PITCH_BINS)] = (penn.PITCH_BINS - 1)
return bins |
def _concat_dataset(cfg, default_args=None):
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets) |
def raw_transform(box: Box, R: Array) -> Array:
if (jnp.isscalar(box) or (box.size == 1)):
return (R * box)
elif (box.ndim == 1):
indices = (_get_free_indices((R.ndim - 1)) + 'i')
return jnp.einsum(f'i,{indices}->{indices}', box, R)
elif (box.ndim == 2):
free_indices = _get_free_indices((R.ndim - 1))
left_indices = (free_indices + 'j')
right_indices = (free_indices + 'i')
return jnp.einsum(f'ij,{left_indices}->{right_indices}', box, R)
raise ValueError(f'Box must be either: a scalar, a vector, or a matrix. Found {box}.') |
def save_checkpoint_modified(state, epoch, output_directory, is_best=True, curr_step=None):
if (not os.path.exists(output_directory)):
os.makedirs(output_directory)
checkpoint_filename = os.path.join(output_directory, (((('checkpoint-' + str(epoch)) + '_') + str(curr_step)) + '.pth.tar'))
torch.save(state, checkpoint_filename)
print('Checkpoint Saved Successfully\n')
if is_best:
best_filename = os.path.join(output_directory, 'model_best.pth.tar')
shutil.copyfile(checkpoint_filename, best_filename)
if (epoch > 0):
prev_checkpoint_filename = os.path.join(output_directory, (('checkpoint-' + str((epoch - 1))) + '*.pth.tar'))
if os.path.exists(prev_checkpoint_filename):
os.remove(prev_checkpoint_filename) |
def test_double_syspool(vrblvl=0):
initialize_double_syspool(3, vrblvl)
dim = size_double_syspool(vrblvl)
print('The size of the systems pool :', dim)
pol1 = ['t - 1;']
set_double_system(1, pol1, vrblvl)
copy_to_double_syspool(1)
pol2 = ['t - 2;']
set_double_system(1, pol2, vrblvl)
copy_to_double_syspool(2)
pol3 = ['t - 3;']
set_double_system(1, pol3, vrblvl)
copy_to_double_syspool(3)
for i in range(1, (dim + 1)):
clear_double_system(vrblvl)
copy_from_double_syspool(i)
pols = get_double_system(vrblvl)
print('system at', i, 'in the pool :', pols)
clear_double_syspool(vrblvl)
return int((dim != 3)) |
def data_split_evaluator(opt):
if (opt.dataset == 'imagenet_bboxes'):
model = build_model(opt)
model = torch.nn.DataParallel(model)
if (opt.model_type == ModelType.DROPOUT_FN_OF_XSTAR):
model = load_pretrained_dlupi_model(model, opt)
if (opt.model_type == ModelType.EVAL_DROPOUT_FN_OF_XSTAR_CURRICULUM_PHASE2):
model = load_curriculum_learned_model(model, opt)
else:
model = load_pretrained_model(model, opt)
(single_crop_top1, single_crop_top5) = eval_1_crop_accuracy(opt, model)
(multi_crop_top1, multi_crop_top5) = eval_10_crop_accuracy(opt, model)
split = opt.localization_val_path.split('/')[(- 1)]
with open(opt.eval_result_savepath, 'a+') as f:
f.write(('Split = %s \n' % split))
f.write('Single crop top1 = {:.4f}\n'.format(single_crop_top1))
f.write('Single crop top5 = {:.4f}\n'.format(single_crop_top5))
f.write('Multi (10) crop top1 = {:.4f}\n'.format(multi_crop_top1))
f.write('Multi (10) crop top5 = {:.4f}\n'.format(multi_crop_top5))
f.write('\n') |
class ErnieConfig(PretrainedConfig):
model_type = 'ernie'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, task_type_vocab_size=3, use_task_id=False, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.task_type_vocab_size = task_type_vocab_size
self.use_task_id = use_task_id
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.