code stringlengths 101 5.91M |
|---|
def dataloader_creator(cfg):
distributed = cfg.distributed
runner_type = ('EpochBasedRunner' if ('runner' not in cfg) else cfg.runner['type'])
dataset = build_dataset(cfg.data.train)
train_dataloader_default_args = dict(samples_per_gpu=2, workers_per_gpu=0, num_gpus=len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, runner_type=runner_type, persistent_workers=False)
train_loader_cfg = {**train_dataloader_default_args, **cfg.data.get('train_dataloader', {})}
data_loaders = build_dataloader(dataset, **train_loader_cfg)
return data_loaders |
class UnitDictionary(Dictionary):
def __init__(self, *, n_units, bos='<s>', pad='<pad>', eos='</s>', unk='<unk>', extra_special_symbols=None, clip=False):
self.n_units = n_units
(self.bos_word, self.unk_word, self.pad_word, self.eos_word) = (bos, unk, pad, eos)
self.clip = clip
self.symbols = []
self.count = []
self.indices = {}
for i in range(n_units):
self.add_symbol(str(i))
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def encode_line(self, line, append_eos=True, prepend_bos=False) -> torch.IntTensor:
words = [int(x) for x in line.split()]
if self.clip:
words = [min((self.n_units - 1), word) for word in words]
if prepend_bos:
words = ([self.bos_index] + words)
if append_eos:
words.append(self.eos_index)
ids = torch.IntTensor(words)
return ids |
def get_example_inputs(model_name, dataset_name='sst2'):
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
dataset = load_dataset(dataset_name, split='validation')
text = (dataset[0]['text'] if (dataset_name == 'lambada') else dataset[0]['sentence'])
example_inputs = tokenizer(text, padding='max_length', max_length=195, return_tensors='pt')
return example_inputs |
def le_net_cifar(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config=None):
print('Converting LeNet CNN CIFAR to {} mode'.format(MODE_STRING))
return create_le_net_biomodel(le_net.le_net_cifar, MODE, layer_config, pretrained, progress, num_classes) |
def map_to_cuda(tensor_dict):
cuda_tensor_dict = {}
for (key, value) in tensor_dict.items():
cuda_tensor_dict[key] = value.cuda()
return cuda_tensor_dict |
def test_orbit_setup_radec_uvw_oddunits():
from galpy.orbit import Orbit
o = Orbit([(1.0 * units.rad), ((- 0.25) * units.rad), (3000.0 * units.pc), (((- 30.0) * units.pc) / units.Myr), ((20.0 * units.pc) / units.Myr), ((130.0 * units.pc) / units.Myr)], radec=True, uvw=True)
assert (numpy.fabs((o.ra(quantity=False) - ((1.0 / numpy.pi) * 180.0))) < (10.0 ** (- 8.0))), 'Orbit initialization with RA as Quantity does not work as expected'
assert (numpy.fabs((o.dec(quantity=False) + ((0.25 / numpy.pi) * 180.0))) < (10.0 ** (- 8.0))), 'Orbit initialization with Dec as Quantity does not work as expected'
assert (numpy.fabs((o.dist(quantity=False) - 3.0)) < (10.0 ** (- 8.0))), 'Orbit initialization with distance as Quantity does not work as expected'
assert (numpy.fabs((o.U(quantity=False) + (30.0 / 1.))) < (10.0 ** (- 5.0))), 'Orbit initialization with U as Quantity does not work as expected'
assert (numpy.fabs((o.V(quantity=False) - (20.0 / 1.))) < (10.0 ** (- 5.0))), 'Orbit initialization with V as Quantity does not work as expected'
assert (numpy.fabs((o.W(quantity=False) - (130.0 / 1.))) < (10.0 ** (- 5.0))), 'Orbit initialization with W as Quantity does not work as expected'
return None |
def display_few_shot_examples():
data_root = '/dccstor/jsdata1/dev/RepMet/notebooks/food_usecase_data'
image_set = ['PRDS_0_192_501_589_885_top.jpg', 'PRDS_0_119_137_523_447_top.jpg', 'PRDS_0_118_208_470_612_top.jpg', 'PRDS_0_571_234_923_608_top.jpg']
nrows = 1
ncols = 4
fig = plt.figure(2)
ff = 2
fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False)
for (cnt, img_basename) in enumerate(image_set):
imgname = os.path.join(data_root, img_basename)
img = mpimg.imread(imgname)
plt.subplot(nrows, ncols, (cnt + 1))
plt.imshow(img)
plt.axis('off') |
def prepare_image(pil_image, w=512, h=512):
pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1)
arr = np.array(pil_image.convert('RGB'))
arr = ((arr.astype(np.float32) / 127.5) - 1)
arr = np.transpose(arr, [2, 0, 1])
image = torch.from_numpy(arr).unsqueeze(0)
return image |
def initialize(worker):
pconns = {}
cconns = {}
ps = {}
for key in chunk_keys:
(pconn, cconn) = Pipe()
(pconns[key], cconns[key]) = (pconn, cconn)
p = Process(target=worker.brain, args=(cconn,))
p.start()
ps[key] = p
for (key, pconn) in pconns.items():
pconn.send(('INIT', key))
return (pconns, cconns, ps) |
def match_frame_ctrl_input(data_dir, datasets, max_offset, redo_matching=False, remove_zeros=True, policy='autopilot'):
frames = []
for dataset in datasets:
for folder in utils.list_dirs(os.path.join(data_dir, dataset)):
session_dir = os.path.join(data_dir, dataset, folder)
frame_list = match_frame_session(session_dir, max_offset, redo_matching, remove_zeros, policy)
for timestamp in list(frame_list):
frames.append(frame_list[timestamp][0])
return frames |
class TestDiceLoss(TestCase):
def setUp(self) -> None:
self.predict_logit = torch.randn(10, 3, 256, 256)
self.target = torch.randint(0, 3, (10, 256, 256))
def test_mask_dice(self):
iteration = 10
criterion = ThreeDimDiceLoss()
onehot_pred = logit2one_hot(self.predict_logit)
onehot_target = class2one_hot(self.target, 3)
start = time.time()
for _ in range(iteration):
loss1 = criterion(onehot_pred.float(), onehot_target)
end1 = time.time()
print(f'for method 1, costed time:{(end1 - start)}')
for _ in range(iteration):
loss2 = dice_batch(onehot_pred, onehot_target)
end2 = time.time()
print(f'for method2 costed time:{(end2 - end1)}')
for _ in range(iteration):
loss3 = MetaDice(method='3d')(onehot_pred, onehot_target)
end3 = time.time()
print(f'for method3 costed time:{(end3 - end2)}')
def test_3ddice_loss(self):
criterion = ThreeDimDiceLoss()
(loss, dices) = criterion(F.softmax(self.predict_logit, 1), class2one_hot(self.target, 3))
print() |
(a='double', autosave_time='double', bottleneck=str, component='Component', components=list, dump_index='Py_ssize_t', dump_time=object, dump_times=list, dump_times_a=set, dump_times_t=set, initial_time_step='Py_ssize_t', interaction_name=str, output_filenames=dict, output_filenames_autosave=dict, recompute_t_max='bint', static_timestepping_func=object, subtiling='Tiling', subtiling_computation_times=object, subtiling_name=str, sync_at_dump='bint', sync_time='double', t='double', tiling='Tiling', tiling_name=str, time_step='Py_ssize_t', time_step_last_sync='Py_ssize_t', time_step_previous='Py_ssize_t', time_step_type=str, t='double', t_autosave='double', t_backup='double', t_begin='double', t_begin_autosave='double', t_min='double', t_max='double', t_print='double', returns='void')
def timeloop():
if (not ([nr for val in output_times['a'].values() for nr in val] + [nr for val in output_times['t'].values() for nr in val])):
return
masterprint(f'Domain decomposition: {domain_subdivisions[0]}{domain_subdivisions[1]}{domain_subdivisions[2]}')
init_time()
(initial_time_step, t_begin_autosave, t_autosave, output_filenames_autosave) = check_autosave()
if (initial_time_step == 0):
masterprint('Setting up initial conditions ...')
components = get_initial_conditions()
else:
masterprint('Setting up simulation from autosaved snapshot ...')
components = get_initial_conditions(autosave_filename)
if (not components):
masterprint('done')
return
(dump_times, output_filenames) = prepare_for_output(components, ignore_past_times=(initial_time_step > 0))
if (initial_time_step > 0):
output_filenames = output_filenames_autosave
dump_times_updated = []
for dump_time in dump_times:
time_param = dump_time.time_param
time_value_dump = {'t': dump_time.t, 'a': dump_time.a}[time_param]
time_value_current = {'t': universals.t, 'a': universals.a}[time_param]
if (time_value_dump >= time_value_current):
dump_times_updated.append(dump_time)
dump_times = dump_times_updated
components_order[:] = [component.name for component in components]
passive_components[:] = [component for component in components if (not component.is_active())]
components = [component for component in components if (component not in passive_components)]
for component in components:
component.realize_if_linear(0)
component.realize_if_linear(1, 0)
component.realize_if_linear(2, 'trace')
component.realize_if_linear(2, (0, 0))
masterprint('done')
if ((dump_times[0].t == universals.t) or (dump_times[0].a == universals.a)):
dump(components, output_filenames, dump_times[0])
dump_times.pop(0)
if (len(dump_times) == 0):
return
static_timestepping_func = prepare_static_timestepping()
initial_fac_times.add(universals.t)
(t_max, bottleneck) = get_base_timestep_size(components, static_timestepping_func)
t_begin = t_max
if (t_begin > (dump_times[0].t - universals.t)):
t_begin = (dump_times[0].t - universals.t)
t = t_begin
if (initial_time_step > 0):
t_begin = t_begin_autosave
t = t_autosave
t_min = (0.0001 * t_begin)
autosave_time = time()
get_time_step_integrals(0, 0, (components + passive_components))
initialize_rung_populations(components, t)
subtiling_computation_times = collections.defaultdict((lambda : collections.defaultdict(float)))
masterprint('Beginning of main time loop')
time_step = initial_time_step
time_step_last_sync = initial_time_step
time_step_previous = (time_step - 1)
bottleneck = ''
time_step_type = 'init'
sync_time =
recompute_t_max = True
t_backup = (- 1)
for (dump_index, dump_time) in enumerate(dump_times):
while True:
if (time_step > time_step_previous):
time_step_previous = time_step
if (time_step_type == 'init'):
for component in components:
component.assign_rungs(t, fac_softening)
for component in components:
for (subtiling_name, subtiling) in component.tilings.items():
match = re.search('(.*) \\(subtiles', subtiling_name)
if (not match):
continue
subtiling_computation_times[component][match.group(1)] += subtiling.computation_time_total
if (time_step > initial_time_step):
print_timestep_footer(components)
for component in components:
for tiling in component.tilings.values():
tiling.computation_time_total = 0
universals.time_step = time_step
t_print = t
if (((universals.t + (t * (1 + t_reltol))) + (2 * machine_)) > sync_time):
t_print = (sync_time - universals.t)
print_timestep_heading(time_step, t_print, (bottleneck if (time_step_type == 'init') else ''), components)
if (time_step_type == 'init'):
time_step_type = 'full'
kick_long(components, t, sync_time, 'init')
if B[particle_reordering]:
for component in components:
if (not subtiling_computation_times[component]):
continue
if B[(particle_reordering == 'deterministic')]:
for subtiling_name in component.tilings:
match = re.search('(.*) \\(subtiles', subtiling_name)
if (not match):
continue
interaction_name = match.group(1)
break
else:
interaction_name = collections.Counter(subtiling_computation_times[component]).most_common(1)[0][0]
tiling_name = f'{interaction_name} (tiles)'
subtiling_name = f'{interaction_name} (subtiles)'
component.tile_sort(tiling_name, subtiling_name)
subtiling_computation_times[component].clear()
kick_short(components, t)
if ((dump_time.t - universals.t) <= (1.5 * t)):
sync_time = dump_time.t
continue
(t_max, bottleneck) = get_base_timestep_size(components, static_timestepping_func)
if (t > t_max):
sync_time = (universals.t + (0.5 * t))
recompute_t_max = False
continue
elif (time_step_type == 'full'):
drift_fluids(components, t, sync_time)
driftkick_short(components, t, sync_time)
universals.t += (0.5 * t)
if (((universals.t + (t_reltol * t)) + (2 * machine_)) > sync_time):
universals.t = sync_time
universals.a = scale_factor(universals.t)
kick_long(components, t, sync_time, 'full')
universals.t += (0.5 * t)
if (((universals.t + (t_reltol * t)) + (2 * machine_)) > sync_time):
universals.t = sync_time
universals.a = scale_factor(universals.t)
if (universals.t == sync_time):
time_step_type = 'init'
sync_time =
if (t_backup != (- 1)):
if (t < t_backup):
t = t_backup
t_backup = (- 1)
if recompute_t_max:
(t_max, bottleneck) = get_base_timestep_size(components, static_timestepping_func)
recompute_t_max = True
(t, bottleneck) = update_base_timestep_size(t, t_min, t_max, bottleneck, time_step, time_step_last_sync, tolerate_danger=(bottleneck == bottleneck_static_timestepping))
time_step += 1
time_step_last_sync = time_step
with unswitch:
if (autosave_interval > 0):
if bcast(((time() - autosave_time) > R[(autosave_interval / units.s)])):
autosave(components, time_step, t_begin, t, output_filenames)
autosave_time = time()
if (universals.t == dump_time.t):
if dump(components, output_filenames, dump_time, t):
initial_fac_times.add(universals.t)
(t_max, bottleneck) = get_base_timestep_size(components, static_timestepping_func)
(t, bottleneck) = update_base_timestep_size(t, t_min, t_max, bottleneck, allow_increase=False, tolerate_danger=True)
if (dump_index != (len(dump_times) - 1)):
t_max = (dump_times[(dump_index + 1)].t - universals.t)
if (t > t_max):
t_backup = t
t = t_max
break
t_max = (dump_time.t - universals.t)
if (t > t_max):
t_backup = t
t = t_max
continue
time_step += 1
if ((dump_time.t - universals.t) <= (1.5 * t)):
sync_time = dump_time.t
continue
(t_max, bottleneck) = get_base_timestep_size(components, static_timestepping_func)
if (t > t_max):
sync_time = (universals.t + t)
recompute_t_max = False
continue
if ((t_max > (t_increase_min_factor * t)) and (((time_step + 1) - time_step_last_sync) >= t_period)):
sync_time = (universals.t + t)
recompute_t_max = False
continue
print_timestep_footer(components)
print_timestep_heading(time_step, t, bottleneck, components, end=True)
if (master and os.path.isdir(autosave_subdir)):
masterprint('Removing autosave ...')
shutil.rmtree(autosave_subdir)
if (not os.listdir(output_dirs['autosave'])):
shutil.rmtree(output_dirs['autosave'])
masterprint('done') |
class MetadataCatalog():
_NAME_TO_META = {}
def get(name):
assert len(name)
if (name in MetadataCatalog._NAME_TO_META):
return MetadataCatalog._NAME_TO_META[name]
else:
m = MetadataCatalog._NAME_TO_META[name] = Metadata(name=name)
return m
def list():
return list(MetadataCatalog._NAME_TO_META.keys())
def clear():
MetadataCatalog._NAME_TO_META.clear()
def remove(name):
MetadataCatalog._NAME_TO_META.pop(name) |
class FlavaPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_uniform_density(x_shape):
return FlowDensity(bijection=LogitBijection(x_shape=x_shape).inverse(), prior=UniformDensity(x_shape)) |
def weight_reduce_loss(loss: Tensor, weight: Optional[Tensor]=None, reduction: str='mean', avg_factor: Optional[float]=None) -> Tensor:
if (weight is not None):
loss = (loss * weight)
if (avg_factor is None):
loss = reduce_loss(loss, reduction)
elif (reduction == 'mean'):
eps = torch.finfo(torch.float32).eps
loss = (loss.sum() / (avg_factor + eps))
elif (reduction != 'none'):
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss |
class LightGCN(BasicModel):
def __init__(self, config: dict, dataset: BasicDataset):
super(LightGCN, self).__init__()
self.config = config
self.dataset: dataloader.BasicDataset = dataset
self.__init_weight()
def __init_weight(self):
self.num_users = self.dataset.n_users
self.num_items = self.dataset.m_items
self.latent_dim = self.config['latent_dim_rec']
self.n_layers = self.config['lightGCN_n_layers']
self.keep_prob = self.config['keep_prob']
self.A_split = self.config['A_split']
self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim)
self.embedding_item = torch.nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.latent_dim)
if (self.config['pretrain'] == 0):
nn.init.normal_(self.embedding_user.weight, std=0.1)
nn.init.normal_(self.embedding_item.weight, std=0.1)
world.cprint('use NORMAL distribution initilizer')
else:
self.embedding_user.weight.data.copy_(torch.from_numpy(self.config['user_emb']))
self.embedding_item.weight.data.copy_(torch.from_numpy(self.config['item_emb']))
print('use pretarined data')
self.f = nn.Sigmoid()
self.Graph = self.dataset.getSparseGraph()
print(f"lgn is already to go(dropout:{self.config['dropout']})")
def __dropout_x(self, x, keep_prob):
size = x.size()
index = x.indices().t()
values = x.values()
random_index = (torch.rand(len(values)) + keep_prob)
random_index = random_index.int().bool()
index = index[random_index]
values = (values[random_index] / keep_prob)
g = torch.sparse.FloatTensor(index.t(), values, size)
return g
def __dropout(self, keep_prob):
if self.A_split:
graph = []
for g in self.Graph:
graph.append(self.__dropout_x(g, keep_prob))
else:
graph = self.__dropout_x(self.Graph, keep_prob)
return graph
def computer(self):
users_emb = self.embedding_user.weight
items_emb = self.embedding_item.weight
all_emb = torch.cat([users_emb, items_emb])
embs = [all_emb]
if self.config['dropout']:
if self.training:
g_droped = self.__dropout(self.keep_prob)
else:
g_droped = self.Graph
else:
g_droped = self.Graph
for layer in range(self.n_layers):
if self.A_split:
temp_emb = []
for f in range(len(g_droped)):
temp_emb.append(torch.sparse.mm(g_droped[f], all_emb))
side_emb = torch.cat(temp_emb, dim=0)
all_emb = side_emb
else:
all_emb = torch.sparse.mm(g_droped, all_emb)
embs.append(all_emb)
embs = torch.stack(embs, dim=1)
light_out = torch.mean(embs, dim=1)
(users, items) = torch.split(light_out, [self.num_users, self.num_items])
return (users, items)
def getUsersRating(self, users):
(all_users, all_items) = self.computer()
users_emb = all_users[users.long()]
items_emb = all_items
rating = self.f(torch.matmul(users_emb, items_emb.t()))
return rating
def getEmbedding(self, users, pos_items, neg_items):
(all_users, all_items) = self.computer()
users_emb = all_users[users]
pos_emb = all_items[pos_items]
neg_emb = all_items[neg_items]
users_emb_ego = self.embedding_user(users)
pos_emb_ego = self.embedding_item(pos_items)
neg_emb_ego = self.embedding_item(neg_items)
return (users_emb, pos_emb, neg_emb, users_emb_ego, pos_emb_ego, neg_emb_ego)
def bpr_loss(self, users, pos, neg):
(users_emb, pos_emb, neg_emb, userEmb0, posEmb0, negEmb0) = self.getEmbedding(users.long(), pos.long(), neg.long())
reg_loss = (((1 / 2) * ((userEmb0.norm(2).pow(2) + posEmb0.norm(2).pow(2)) + negEmb0.norm(2).pow(2))) / float(len(users)))
pos_scores = torch.mul(users_emb, pos_emb)
pos_scores = torch.sum(pos_scores, dim=1)
neg_scores = torch.mul(users_emb, neg_emb)
neg_scores = torch.sum(neg_scores, dim=1)
loss = torch.mean(torch.nn.functional.softplus((neg_scores - pos_scores)))
return (loss, reg_loss)
def forward(self, users, items):
(all_users, all_items) = self.computer()
users_emb = all_users[users]
items_emb = all_items[items]
inner_pro = torch.mul(users_emb, items_emb)
gamma = torch.sum(inner_pro, dim=1)
return gamma |
def subquery_range(current, pos, tokens, in_quote=False):
if ((current is not None) and (tokens[pos] == 'SELECT') and (not in_quote)):
return (pos, current[1])
elif ((tokens[pos] == '(') and (not in_quote)):
start = pos
end = (pos + 1)
depth = 1
(in_squote, in_dquote) = (False, False)
while (depth > 0):
for char in tokens[end]:
(in_squote, in_dquote) = update_quotes(char, in_squote, in_dquote)
if (not (in_squote or in_dquote)):
if ('(' in tokens[end]):
depth += 1
elif (')' in tokens[end]):
depth -= 1
end += 1
return (start, end)
elif ((current is not None) and (pos == current[1])):
start = pos
end = (pos + 1)
depth = 1
(in_squote, in_dquote) = (False, False)
while ((depth > 0) and (start > 0)):
for char in tokens[start]:
(in_squote, in_dquote) = update_quotes(char, in_squote, in_dquote)
if (not (in_squote or in_dquote)):
if ('(' in tokens[start]):
depth -= 1
elif (')' in tokens[start]):
depth += 1
start -= 1
if (start != 0):
start += 1
while ((end < len(tokens)) and (tokens[end] != ')')):
end += 1
if (end != len(tokens)):
end += 1
return (start, end)
else:
return current |
def calculate_ap_py(results):
def cal_iou(rect1, rect2):
lt_x = max(rect1[0], rect2[0])
lt_y = max(rect1[1], rect2[1])
rb_x = min(rect1[2], rect2[2])
rb_y = min(rect1[3], rect2[3])
if ((rb_x > lt_x) and (rb_y > lt_y)):
intersection = ((rb_x - lt_x) * (rb_y - lt_y))
else:
return 0
area1 = ((rect1[2] - rect1[0]) * (rect1[3] - rect1[1]))
area2 = ((rect2[2] - rect2[0]) * (rect2[3] - rect2[1]))
intersection = min(intersection, area1, area2)
union = ((area1 + area2) - intersection)
return (float(intersection) / union)
def is_same_face(face_gt, face_pred):
iou = cal_iou(face_gt, face_pred)
return (iou >= 0.5)
def eval_single_image(faces_gt, faces_pred):
pred_is_true = ([False] * len(faces_pred))
gt_been_pred = ([False] * len(faces_gt))
for i in range(len(faces_pred)):
isface = False
for j in range(len(faces_gt)):
if (gt_been_pred[j] == 0):
isface = is_same_face(faces_gt[j], faces_pred[i])
if (isface == 1):
gt_been_pred[j] = True
break
pred_is_true[i] = isface
return pred_is_true
score_res_pair = {}
faces_num_gt = 0
for t in results:
(gt_box_list, bbox_list, face_num_gt) = get_bboxes_scores(t)
faces_num_gt += face_num_gt
pred_is_true = eval_single_image(gt_box_list, bbox_list)
for i in range(0, len(pred_is_true)):
now_score = bbox_list[i][(- 1)]
if (now_score in score_res_pair):
score_res_pair[now_score].append(int(pred_is_true[i]))
else:
score_res_pair[now_score] = [int(pred_is_true[i])]
keys = score_res_pair.keys()
keys = sorted(keys, reverse=True)
tp_num = 0
predict_num = 0
precision_list = []
recall_list = []
for i in range(len(keys)):
k = keys[i]
v = score_res_pair[k]
predict_num += len(v)
tp_num += sum(v)
recall = (float(tp_num) / faces_num_gt)
precision_list.append((float(tp_num) / predict_num))
recall_list.append(recall)
ap = (precision_list[0] * recall_list[0])
for i in range(1, len(precision_list)):
ap += (precision_list[i] * (recall_list[i] - recall_list[(i - 1)]))
return ap |
def ObservationModel(symbolic, observation_size, belief_size, state_size, embedding_size, activation_function='relu'):
if symbolic:
return SymbolicObservationModel(observation_size, belief_size, state_size, embedding_size, activation_function)
else:
return VisualObservationModel(belief_size, state_size, embedding_size, observation_size[1], activation_function) |
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode):
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ((ex_index % 10000) == 0):
logger.info(('Writing example %d of %d' % (ex_index, len(examples))))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, (max_seq_length - 3))
elif (len(tokens_a) > (max_seq_length - 2)):
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ((['[CLS]'] + tokens_a) + ['[SEP]'])
segment_ids = ([0] * len(tokens))
if tokens_b:
tokens += (tokens_b + ['[SEP]'])
segment_ids += ([1] * (len(tokens_b) + 1))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
padding = ([0] * (max_seq_length - len(input_ids)))
input_ids += padding
input_mask += padding
segment_ids += padding
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
if (output_mode == 'classification'):
label_id = label_map[example.label]
elif (output_mode == 'regression'):
label_id = float(example.label)
else:
raise KeyError(output_mode)
if (ex_index < 5):
logger.info('*** Example ***')
logger.info(('guid: %s' % example.guid))
logger.info(('tokens: %s' % ' '.join([str(x) for x in tokens])))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
logger.info(('input_mask: %s' % ' '.join([str(x) for x in input_mask])))
logger.info(('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])))
logger.info(('label: %s (id = %d)' % (example.label, label_id)))
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id))
return features |
def _test_feature_extractors(self, extractors, overwrite_cfgs, overwrite_in_channels):
self.assertGreater(len(extractors), 0)
in_channels_default = 64
for (name, builder) in extractors.items():
print('Testing {}...'.format(name))
if (name in overwrite_cfgs):
cfg = load_config(overwrite_cfgs[name])
else:
cfg = copy.deepcopy(g_cfg)
in_channels = overwrite_in_channels.get(name, in_channels_default)
fe = builder(cfg, in_channels)
self.assertIsNotNone(getattr(fe, 'out_channels', None), 'Need to provide out_channels for feature extractor {}'.format(name))
(N, C_in, H, W) = (2, in_channels, 24, 32)
input = torch.rand([N, C_in, H, W], dtype=torch.float32)
bboxes = [[1, 1, 10, 10], [5, 5, 8, 8], [2, 2, 3, 4]]
img_size = [384, 512]
box_list = BoxList(bboxes, img_size, 'xyxy')
out = fe([input], ([box_list] * N))
self.assertEqual(out.shape[:2], torch.Size([(N * len(bboxes)), fe.out_channels])) |
def imagenet_det_classes() -> list:
return ['accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle', 'zebra'] |
class TransformerLanguageModelConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(default='relu', metadata={'help': 'activation function to use'})
dropout: float = field(default=0.1, metadata={'help': 'dropout probability'})
attention_dropout: float = field(default=0.0, metadata={'help': 'dropout probability for attention weights'})
activation_dropout: float = field(default=0.0, metadata={'help': 'dropout probability after activation in FFN.'})
relu_dropout: float = field(default=0.0, metadata={'help': 'dropout probability after activation in FFN.'})
decoder_embed_dim: int = field(default=512, metadata={'help': 'decoder embedding dimension'})
decoder_output_dim: int = field(default=512, metadata={'help': 'decoder output dimension'})
decoder_input_dim: int = field(default=512, metadata={'help': 'decoder input dimension'})
decoder_ffn_embed_dim: int = field(default=2048, metadata={'help': 'decoder embedding dimension for FFN'})
decoder_layers: int = field(default=6, metadata={'help': 'num decoder layers'})
decoder_attention_heads: int = field(default=8, metadata={'help': 'num decoder attention heads'})
decoder_normalize_before: bool = field(default=False, metadata={'help': 'apply layernorm before each decoder block'})
no_decoder_final_norm: bool = field(default=False, metadata={'help': "don't add an extra layernorm after the last decoder block"})
adaptive_softmax_cutoff: Optional[str] = field(default=None, metadata={'help': 'comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion'})
adaptive_softmax_dropout: float = field(default=0, metadata={'help': 'sets adaptive softmax dropout for the tail projections'})
adaptive_softmax_factor: float = field(default=4, metadata={'help': 'adaptive input factor'})
no_token_positional_embeddings: bool = field(default=False, metadata={'help': 'if set, disables positional embeddings (outside self attention)'})
share_decoder_input_output_embed: bool = field(default=False, metadata={'help': 'share decoder input and output embeddings'})
character_embeddings: bool = field(default=False, metadata={'help': 'if set, uses character embedding convolutions to produce token embeddings'})
character_filters: str = field(default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]', metadata={'help': 'size of character embeddings'})
character_embedding_dim: int = field(default=4, metadata={'help': 'size of character embeddings'})
char_embedder_highway_layers: int = field(default=2, metadata={'help': 'number of highway layers for character token embeddder'})
adaptive_input: bool = field(default=False, metadata={'help': 'if set, uses adaptive input'})
adaptive_input_factor: float = field(default=4, metadata={'help': 'adaptive input factor'})
adaptive_input_cutoff: Optional[str] = field(default=None, metadata={'help': 'comma separated list of adaptive input cutoff points.'})
tie_adaptive_weights: bool = field(default=False, metadata={'help': 'if set, ties the weights of adaptive softmax and adaptive input'})
tie_adaptive_proj: bool = field(default=False, metadata={'help': 'if set, ties the projection weights of adaptive softmax and adaptive input'})
decoder_learned_pos: bool = field(default=False, metadata={'help': 'use learned positional embeddings in the decoder'})
layernorm_embedding: bool = field(default=False, metadata={'help': 'add layernorm to embedding'})
no_scale_embedding: bool = field(default=False, metadata={'help': 'if True, dont scale embeddings'})
checkpoint_activations: bool = field(default=False, metadata={'help': 'checkpoint activations at each layer'})
offload_activations: bool = field(default=False, metadata={'help': 'move checkpointed activations to CPU after they are used.'})
decoder_layerdrop: float = field(default=0.0, metadata={'help': 'LayerDrop probability for decoder'})
decoder_layers_to_keep: Optional[str] = field(default=None, metadata={'help': 'which layers to *keep* when pruning as a comma-separated list'})
quant_noise_pq: float = field(default=0.0, metadata={'help': 'iterative PQ quantization noise at training time'})
quant_noise_pq_block_size: int = field(default=8, metadata={'help': 'block size of quantization noise at training time'})
quant_noise_scalar: float = field(default=0.0, metadata={'help': 'scalar quantization noise and scalar quantization at training time'})
min_params_to_wrap: int = field(default=DEFAULT_MIN_PARAMS_TO_WRAP, metadata={'help': 'minimum number of params for a layer to be wrapped with FSDP() when training with --ddp-backend=fully_sharded. Smaller values will improve memory efficiency, but may make torch.distributed communication less efficient due to smaller input sizes. This option is set to 0 (i.e., always wrap) when --checkpoint-activations or --offload-activations are passed.'})
base_layers: Optional[int] = field(default=0, metadata={'help': 'number of BASE layers in total'})
base_sublayers: Optional[int] = field(default=1, metadata={'help': 'number of sublayers in each BASE layer'})
base_shuffle: Optional[int] = field(default=1, metadata={'help': 'shuffle tokens between workers before computing assignment'})
scale_fc: Optional[bool] = field(default=False, metadata={'help': 'Insert LayerNorm between fully connected layers'})
scale_attn: Optional[bool] = field(default=False, metadata={'help': 'Insert LayerNorm after attention'})
scale_heads: Optional[bool] = field(default=False, metadata={'help': 'Learn a scale coefficient for each attention head'})
scale_resids: Optional[bool] = field(default=False, metadata={'help': 'Learn a scale coefficient for each residual connection'})
add_bos_token: bool = II('task.add_bos_token')
tokens_per_sample: int = II('task.tokens_per_sample')
max_target_positions: Optional[int] = II('task.max_target_positions')
tpu: bool = II('common.tpu') |
class Whole_Slide_Bag_FP_SAVE(Dataset):
def __init__(self, file_path, wsi, pretrained=False, custom_transforms=None, custom_downsample=1, target_patch_size=(- 1), select_idx=None):
self.pretrained = pretrained
self.wsi = wsi
self.roi_transforms = simple_transforms(pretrained=pretrained)
self.file_path = file_path
with h5py.File(self.file_path, 'r') as f:
dset = f['coords']
self.patch_level = f['coords'].attrs['patch_level']
self.patch_size = f['coords'].attrs['patch_size']
self.length = len(dset)
if (target_patch_size > 0):
self.target_patch_size = ((target_patch_size,) * 2)
elif (custom_downsample > 1):
self.target_patch_size = (((self.patch_size // custom_downsample),) * 2)
else:
self.target_patch_size = None
with h5py.File(self.file_path, 'r') as hdf5_file:
dset = np.array(hdf5_file['coords'])
if (select_idx is not None):
self.coords_new = dset[select_idx]
else:
self.coords_new = dset
self.length = self.coords_new.shape[0]
def __len__(self):
return self.length
def __getitem__(self, idx):
coord = np.asarray(self.coords_new[idx])
img = self.wsi.read_region(coord, self.patch_level, (self.patch_size, self.patch_size)).convert('RGB')
if (self.target_patch_size is not None):
img = img.resize(self.target_patch_size)
img = self.roi_transforms(img).unsqueeze(0)
return (img, coord) |
class DotDict(dict):
def __init__(self, value=None):
if (value is None):
pass
elif isinstance(value, dict):
for key in value:
self.__setitem__(key, value[key])
else:
raise TypeError('expected dict')
def __getitem__(self, key):
value = self.get(key, None)
return value
def __setitem__(self, key, value):
if (isinstance(value, dict) and (not isinstance(value, DotDict))):
value = DotDict(value)
if (isinstance(value, list) and (len(value) == 1) and isinstance(value[0], dict)):
value = DotDict(value[0])
if (isinstance(value, list) and (len(value) > 1) and all((isinstance(v, dict) for v in value))):
value = DotDict({k: v for d in value for (k, v) in d.items()})
super(DotDict, self).__setitem__(key, value)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
(__setattr__, __getattr__) = (__setitem__, __getitem__) |
_module()
class TextSnake(TextDetectorMixin, SingleStageTextDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, show_score=False, init_cfg=None):
SingleStageTextDetector.__init__(self, backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
TextDetectorMixin.__init__(self, show_score) |
class Solution(object):
def __init__(self, sol):
if isinstance(sol, str):
self.dict = strsol2dict(sol)
elif isinstance(sol, dict):
self.dict = sol
if ('err' not in self.dict):
self.dict['err'] = 0.0
if ('rco' not in self.dict):
self.dict['rco'] = 1.0
if ('res' not in self.dict):
self.dict['res'] = 0.0
if ('m' not in self.dict):
self.dict['m'] = 1
if ('t' not in self.dict):
self.dict['t'] = complex(0.0)
else:
print('Wrong argument type, provide string or dict.')
def __str__(self):
result = make_solution(variables(self.dict), numerals(self.dict), err=self.dict['err'], rco=self.dict['rco'], res=self.dict['res'], tval=self.dict['t'], multiplicity=self.dict['m'])
return result
def __repr__(self):
return str(self)
def coordinates(self):
return (variables(self.dict), numerals(self.dict))
def dictionary(self):
return self.dict
def variables(self):
return variables(self.dict)
def numerals(self):
return numerals(self.dict)
def diagnostics(self):
return (self.dict['err'], self.dict['rco'], self.dict['res'])
def multiplicity(self):
return self.dict['m']
def timevalue(self):
return self.dict['t'] |
def build_freeimage(args):
path = os.path.join(args.build_path, 'freeimage')
if os.path.exists(path):
return
if PLATFORM_IS_WINDOWS:
url = '
archive_path = os.path.join(args.download_path, 'FreeImage3180Win32Win64.zip')
download_zipfile(url, archive_path, args.build_path, '393d3df75b14cbcb4887da1c395596e2')
shutil.move(os.path.join(args.build_path, 'FreeImage'), path)
copy_file_if_not_exists(os.path.join(path, 'Dist/x64/FreeImage.h'), os.path.join(args.install_path, 'include/FreeImage.h'))
copy_file_if_not_exists(os.path.join(path, 'Dist/x64/FreeImage.lib'), os.path.join(args.install_path, 'lib/FreeImage.lib'))
copy_file_if_not_exists(os.path.join(path, 'Dist/x64/FreeImage.dll'), os.path.join(args.install_path, 'lib/FreeImage.dll'))
else:
url = '
archive_path = os.path.join(args.download_path, 'FreeImage3180.zip')
download_zipfile(url, archive_path, args.build_path, 'f8ba138a3be233a3eed9c456e42e2578')
shutil.move(os.path.join(args.build_path, 'FreeImage'), path)
if PLATFORM_IS_MAC:
with fileinput.FileInput(os.path.join(path, 'Makefile.gnu'), inplace=True, backup='.bak') as fid:
for line in fid:
if ('cp *.so Dist/' in line):
continue
if ('FreeImage: $(STATICLIB) $(SHAREDLIB)' in line):
line = 'FreeImage: $(STATICLIB)'
print(line, end='')
elif PLATFORM_IS_LINUX:
with fileinput.FileInput(os.path.join(path, 'Source/LibWebP/src/dsp/upsampling_mips_dsp_r2.c'), inplace=True, backup='.bak') as fid:
for (i, line) in enumerate(fid):
if ((i >= 36) and (i <= 44)):
line = line.replace('%["', '%[" ')
line = line.replace('"],', ' "],')
print(line, end='')
with fileinput.FileInput(os.path.join(path, 'Source/LibWebP/src/dsp/yuv_mips_dsp_r2.c'), inplace=True, backup='.bak') as fid:
for (i, line) in enumerate(fid):
if ((i >= 56) and (i <= 58)):
line = line.replace('"#', '"# ')
line = line.replace('"(%', ' "(%')
print(line, end='')
subprocess.call(['make', '-f', 'Makefile.gnu', '-j{}'.format(multiprocessing.cpu_count())], cwd=path)
copy_file_if_not_exists(os.path.join(path, 'Source/FreeImage.h'), os.path.join(args.install_path, 'include/FreeImage.h'))
copy_file_if_not_exists(os.path.join(path, 'libfreeimage.a'), os.path.join(args.install_path, 'lib/libfreeimage.a')) |
def test_add_config_non_dict_raises(ing):
with pytest.raises(TypeError):
ing.add_config(12)
with pytest.raises(TypeError):
ing.add_config(True) |
.ml_cpu_only
_dtypes
_feature_dtypes
_functions
_functions
.parametrize('empty_point_set', [False])
def test_voxel_pooling_grad(ml, pos_dtype, feat_dtype, position_fn, feature_fn, empty_point_set):
rng = np.random.RandomState(123)
N = (0 if empty_point_set else 50)
channels = 4
positions = rng.uniform(0, 1, (N, 3)).astype(pos_dtype)
features = np.linspace(0, (N * channels), num=(N * channels), endpoint=False)
np.random.shuffle(features)
features = np.reshape(features, (N, channels)).astype(feat_dtype)
voxel_size = 0.25
def fn(features):
ans = mltest.run_op(ml, ml.device, True, ml.ops.voxel_pooling, positions, features, voxel_size, position_fn, feature_fn)
return ans.pooled_features
def fn_grad(features_bp, features):
return mltest.run_op_grad(ml, ml.device, True, ml.ops.voxel_pooling, features, 'pooled_features', features_bp, positions, features, voxel_size, position_fn, feature_fn)
gradient_OK = check_gradients(features, fn, fn_grad, epsilon=1)
assert gradient_OK |
def compile_all():
args = parse_args()
pdf_id2tex_file_name = {}
all_list = os.listdir(args.tex_base_folder)
all_list.sort()
pbar = tqdm.tqdm(all_list)
for base_folder in pbar:
pbar.set_description('Processing {}'.format(base_folder))
folder = os.path.join(args.tex_base_folder, base_folder)
try:
main_tex_file = get_main_tex_path(folder)
if (main_tex_file == ''):
continue
if (not os.path.exists((main_tex_file + '.raw'))):
shutil.copy(main_tex_file, (main_tex_file + '.raw'))
renewed_latex_contents = convert_latex((main_tex_file + '.raw'), Insert_Colors)
open((main_tex_file + '.color'), 'w').writelines([(line + '\n') for line in renewed_latex_contents])
renewed_latex_contents = convert_latex((main_tex_file + '.raw'), Insert_Black)
open((main_tex_file + '.black'), 'w').writelines([(line + '\n') for line in renewed_latex_contents])
pdf_id2tex_file_name[folder] = main_tex_file
except:
continue
json.dump(pdf_id2tex_file_name, open(args.pdf_id2tex_file_name, 'w'), ensure_ascii=False)
success_compiled = shell_script(pdf_id2tex_file_name)
json.dump(success_compiled, open(args.success_compiled, 'w'), ensure_ascii=False) |
def test_tolerance_value_effect():
(hash_dict, dist_func) = initialize()
bf = BruteForce(hash_dict, dist_func)
query = '5'
valid_retrievals_2 = bf.search(query, tol=2)
valid_retrievals_3 = bf.search(query, tol=3)
assert (set([i[0] for i in valid_retrievals_2]) != set([i[0] for i in valid_retrievals_3])) |
def make_comparable_grid(*batches, nrow):
assert all(((len(batches[0]) == len(batch)) for batch in batches[1:]))
N = len(batches[0])
grids = []
for i in range(0, N, nrow):
rows = [batch[i:(i + nrow)] for batch in batches]
row = torch.cat(rows)
grid = to_grid(row, 'torch', nrow=nrow)
grids.append(grid)
(C, _H, W) = grid.shape
sep_bar = torch.zeros(C, 10, W)
grids.append(sep_bar)
return torch.cat(grids[:(- 1)], dim=1) |
_REGISTRY.register()
class Classification(EvaluatorBase):
def __init__(self, cfg, lab2cname=None, **kwargs):
super().__init__(cfg)
self._lab2cname = lab2cname
self._correct = 0
self._total = 0
self._per_class_res = None
self._y_true = []
self._y_pred = []
if cfg.TEST.PER_CLASS_RESULT:
assert (lab2cname is not None)
self._per_class_res = defaultdict(list)
def reset(self):
self._correct = 0
self._total = 0
if (self._per_class_res is not None):
self._per_class_res = defaultdict(list)
def process(self, mo, gt):
pred = mo.max(1)[1]
matches = pred.eq(gt).float()
self._correct += int(matches.sum().item())
self._total += gt.shape[0]
self._y_true.extend(gt.data.cpu().numpy().tolist())
self._y_pred.extend(pred.data.cpu().numpy().tolist())
if (self._per_class_res is not None):
for (i, label) in enumerate(gt):
label = label.item()
matches_i = int(matches[i].item())
self._per_class_res[label].append(matches_i)
def evaluate(self):
results = OrderedDict()
acc = ((100.0 * self._correct) / self._total)
err = (100.0 - acc)
results['accuracy'] = acc
results['error_rate'] = err
print('=> result\n* total: {:,}\n* correct: {:,}\n* accuracy: {:.2f}%\n* error: {:.2f}%'.format(self._total, self._correct, acc, err))
if (self._per_class_res is not None):
labels = list(self._per_class_res.keys())
labels.sort()
print('=> per-class result')
accs = []
for label in labels:
classname = self._lab2cname[label]
res = self._per_class_res[label]
correct = sum(res)
total = len(res)
acc = ((100.0 * correct) / total)
accs.append(acc)
print('* class: {} ({})\ttotal: {:,}\tcorrect: {:,}\tacc: {:.2f}%'.format(label, classname, total, correct, acc))
mean_acc = np.mean(accs)
print('* average: {:.2f}%'.format(mean_acc))
results['perclass_accuracy'] = mean_acc
if self.cfg.TEST.COMPUTE_CMAT:
cmat = confusion_matrix(self._y_true, self._y_pred, normalize='true')
save_path = osp.join(self.cfg.OUTPUT_DIR, 'cmat.pt')
torch.save(cmat, save_path)
print('Confusion matrix is saved to "{}"'.format(save_path))
return results |
def parse_precision(precision, model='bigdl-llm'):
result = match('([a-zA-Z_]+)(\\d+)([a-zA-Z_\\d]*)', precision)
datatype = result.group(1)
bit = int(result.group(2))
if (bit >= 16):
float_map = dict(bf16='bfloat16', fp16='float16', fp32='float32')
return f'dtype={float_map[precision]}'
else:
if (model == 'hf-causal'):
return f'bnb_type={precision}'
if (model == 'bigdl-llm'):
return f'load_in_low_bit={precision}'
raise RuntimeError(f'invald precision {precision}') |
class Logger(logging.Logger):
NAME = 'SingletonLogger'
def get(cls, file_path=None, level='info', colorize=True):
logging.setLoggerClass(cls)
logger = logging.getLogger(cls.NAME)
logging.setLoggerClass(logging.Logger)
logger.setLevel(log_lv[level])
if logger.hasHandlers():
if (len(logger.handlers) == 2):
return logger
logger.handlers.clear()
log_format = '%(levelname)s::%(asctime)s | %(message)s'
date_format = '%m/%d %H:%M:%S'
if colorize:
formatter = ColorFormatter(log_format, date_format)
else:
formatter = logging.Formatter(log_format, date_format)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if file_path:
file_handler = logging.FileHandler(file_path)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.propagate = False
return logger
def nofmt(self, msg, *args, level='info', **kwargs):
level = log_lv[level]
formatters = self.remove_formats()
super().log(level, msg, *args, **kwargs)
self.set_formats(formatters)
def remove_formats(self):
formatters = []
for handler in self.handlers:
formatters.append(handler.formatter)
handler.setFormatter(logging.Formatter('%(message)s'))
return formatters
def set_formats(self, formatters):
for (handler, formatter) in zip(self.handlers, formatters):
handler.setFormatter(formatter)
def set_file_handler(self, file_path):
file_handler = logging.FileHandler(file_path)
formatter = self.handlers[0].formatter
file_handler.setFormatter(formatter)
self.addHandler(file_handler) |
def extract_layer(model, layer):
layer = layer.split('.')
module = model
if (hasattr(model, 'module') and (layer[0] != 'module')):
module = model.module
if ((not hasattr(model, 'module')) and (layer[0] == 'module')):
layer = layer[1:]
for l in layer:
if hasattr(module, l):
if (not l.isdigit()):
module = getattr(module, l)
else:
module = module[int(l)]
else:
return module
return module |
class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = FSMTTokenizer
def setUp(self):
super().setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
self.langs = ['en', 'ru']
config = {'langs': self.langs, 'src_vocab_size': 10, 'tgt_vocab_size': 20}
self.src_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['src_vocab_file'])
self.tgt_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['tgt_vocab_file'])
config_file = os.path.join(self.tmpdirname, 'tokenizer_config.json')
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, 'w') as fp:
fp.write('\n'.join(merges))
with open(config_file, 'w') as fp:
fp.write(json.dumps(config))
_property
def tokenizer_ru_en(self):
return FSMTTokenizer.from_pretrained('facebook/wmt19-ru-en')
_property
def tokenizer_en_ru(self):
return FSMTTokenizer.from_pretrained('facebook/wmt19-en-ru')
def test_online_tokenizer_config(self):
tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2)
self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ['en', 'ru'])
self.assertEqual(tokenizer.src_vocab_size, 21)
self.assertEqual(tokenizer.tgt_vocab_size, 21)
def test_full_tokenizer(self):
tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file)
text = 'lower'
bpe_tokens = ['low', 'er</w>']
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + ['<unk>'])
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_sequence_builders(self):
tokenizer = self.tokenizer_ru_en
text = tokenizer.encode('sequence builders', add_special_tokens=False)
text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == (text + [2]))
assert (encoded_pair == (((text + [2]) + text_2) + [2]))
def test_match_encode_decode(self):
tokenizer_enc = self.tokenizer_en_ru
tokenizer_dec = self.tokenizer_ru_en
targets = [["Here's a little song I wrote. Don't worry, be happy.", [2470, 39, 11, 2349, 7222, 70, 5979, 7, 8450, 1050, 13160, 5, 26, 6445, 7, 2]], ["This is it. No more. I'm done!", [132, 21, 37, 7, 1434, 86, 7, 70, 6476, 1305, 427, 2]]]
for (src_text, tgt_input_ids) in targets:
encoded_ids = tokenizer_enc.encode(src_text, return_tensors=None)
self.assertListEqual(encoded_ids, tgt_input_ids)
decoded_text = tokenizer_dec.decode(encoded_ids, skip_special_tokens=True)
self.assertEqual(decoded_text, src_text)
def test_tokenizer_lower(self):
tokenizer = FSMTTokenizer.from_pretrained('facebook/wmt19-ru-en', do_lower_case=True)
tokens = tokenizer.tokenize('USA is United States of America')
expected = ['us', 'a</w>', 'is</w>', 'un', 'i', 'ted</w>', 'st', 'ates</w>', 'of</w>', 'am', 'er', 'ica</w>']
self.assertListEqual(tokens, expected)
('FSMTConfig.__init__ requires non-optional args')
def test_torch_encode_plus_sent_to_model(self):
pass
('FSMTConfig.__init__ requires non-optional args')
def test_np_encode_plus_sent_to_model(self):
pass |
def q_rnd():
(u, v, w) = np.random.uniform(0.0, 1.0, size=[3])
v *= (2.0 * np.pi)
w *= (2.0 * np.pi)
return np.asarray([(((1.0 - u) ** 0.5) * np.sin(v)), (((1.0 - u) ** 0.5) * np.cos(v)), ((u ** 0.5) * np.sin(w)), ((u ** 0.5) * np.cos(w))], np.float32) |
class Tester(BaseTrainer):
def __init__(self, config, model, data_loader, writer, checkpoint_dir, logger, valid_data_loader=None, test_data_loader=None, metric_ftns=None):
super(Tester, self).__init__(config, data_loader, writer, checkpoint_dir, logger, valid_data_loader=valid_data_loader, test_data_loader=test_data_loader, metric_ftns=metric_ftns)
if self.config.cuda:
use_cuda = torch.cuda.is_available()
self.device = torch.device(('cuda' if use_cuda else 'cpu'))
else:
self.device = torch.device('cpu')
self.start_epoch = 1
self.epochs = self.config.epochs
self.valid_data_loader = valid_data_loader
self.test_data_loader = test_data_loader
self.do_validation = (self.valid_data_loader is not None)
self.do_test = (self.test_data_loader is not None)
self.log_step = self.config.log_interval
self.model = model
self.mnt_best = np.inf
self.checkpoint_dir = checkpoint_dir
self.gradient_accumulation = config.gradient_accumulation
self.metric_ftns = ['loss', 'acc']
self.valid_metrics = MetricTracker(*[m for m in self.metric_ftns], writer=self.writer, mode='validation')
self.logger = logger
def _valid_epoch(self, epoch, mode, loader):
self.model.eval()
self.valid_sentences = []
self.valid_metrics.reset()
with torch.no_grad():
for (batch_idx, (data, target)) in enumerate(loader):
data = data.to(self.device)
target = target.long().to(self.device)
(output, loss) = self.model(data, target)
loss = loss.mean()
writer_step = (((epoch - 1) * len(loader)) + batch_idx)
prediction = torch.max(output, 1)
acc = (np.sum((prediction[1].cpu().numpy() == target.cpu().numpy())) / target.size(0))
self.valid_metrics.update(key='loss', value=loss.item(), n=1, writer_step=writer_step)
self.valid_metrics.update(key='acc', value=np.sum((prediction[1].cpu().numpy() == target.cpu().numpy())), n=target.size(0), writer_step=writer_step)
self._progress(batch_idx, epoch, metrics=self.valid_metrics, mode=mode, print_summary=True)
val_loss = self.valid_metrics.avg('loss')
return val_loss
def predict(self):
self.model.eval()
predictions = []
with torch.no_grad():
for (batch_idx, (data, target)) in enumerate(self.test_data_loader):
data = data.to(self.device)
logits = self.model(data, None)
(maxes, prediction) = torch.max(logits, 1)
predictions.append(f'{target[0]},{prediction.cpu().numpy()[0]}')
self.logger.info('Inference done')
pred_name = os.path.join(self.checkpoint_dir, f'predictions.csv')
write_csv(predictions, pred_name)
return predictions
def _progress(self, batch_idx, epoch, metrics, mode='', print_summary=False):
metrics_string = metrics.calc_all_metrics()
if (((batch_idx * self.config.dataloader.train.batch_size) % self.log_step) == 0):
if (metrics_string == None):
self.logger.warning(f' No metrics')
else:
self.logger.info(f'{mode} Epoch: [{epoch:2d}/{self.epochs:2d}] Video [{(batch_idx * self.config.dataloader.train.batch_size):5d}/{self.len_epoch:5d}] {metrics_string}')
elif print_summary:
self.logger.info(f'{mode} summary Epoch: [{epoch}/{self.epochs}] {metrics_string}') |
class GraspNetModel():
def __init__(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.is_train = opt.is_train
if (self.gpu_ids and (self.gpu_ids[0] >= torch.cuda.device_count())):
self.gpu_ids[0] = (torch.cuda.device_count() - 1)
self.device = (torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu'))
self.save_dir = join(opt.checkpoints_dir, opt.name)
self.optimizer = None
self.loss = None
self.pcs = None
self.grasps = None
self.net = networks.define_classifier(opt, self.gpu_ids, opt.arch, opt.init_type, opt.init_gain, self.device)
self.criterion = networks.define_loss(opt)
self.confidence_loss = None
if (self.opt.arch == 'vae'):
self.kl_loss = None
self.reconstruction_loss = None
elif (self.opt.arch == 'gan'):
self.reconstruction_loss = None
else:
self.classification_loss = None
if self.is_train:
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.scheduler = networks.get_scheduler(self.optimizer, opt)
if ((not self.is_train) or opt.continue_train):
self.load_network(opt.which_epoch, self.is_train)
def set_input(self, data):
input_pcs = torch.from_numpy(data['pc']).contiguous()
input_grasps = torch.from_numpy(data['grasp_rt']).float()
if (self.opt.arch == 'evaluator'):
targets = torch.from_numpy(data['labels']).float()
else:
targets = torch.from_numpy(data['target_cps']).float()
self.pcs = input_pcs.to(self.device).requires_grad_(self.is_train)
self.grasps = input_grasps.to(self.device).requires_grad_(self.is_train)
self.targets = targets.to(self.device)
def generate_grasps(self, pcs, z=None):
with torch.no_grad():
return self.net.module.generate_grasps(pcs, z=z)
def evaluate_grasps(self, pcs, gripper_pcs):
(success, _) = self.net.module(pcs, gripper_pcs)
return torch.sigmoid(success)
def forward(self):
return self.net(self.pcs, self.grasps, train=self.is_train)
def backward(self, out):
if (self.opt.arch == 'vae'):
(predicted_cp, confidence, mu, logvar) = out
predicted_cp = utils.transform_control_points(predicted_cp, predicted_cp.shape[0], device=self.device)
(self.reconstruction_loss, self.confidence_loss) = self.criterion[1](predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device)
self.kl_loss = (self.opt.kl_loss_weight * self.criterion[0](mu, logvar, device=self.device))
self.loss = ((self.kl_loss + self.reconstruction_loss) + self.confidence_loss)
elif (self.opt.arch == 'gan'):
(predicted_cp, confidence) = out
predicted_cp = utils.transform_control_points(predicted_cp, predicted_cp.shape[0], device=self.device)
(self.reconstruction_loss, self.confidence_loss) = self.criterion(predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device)
self.loss = (self.reconstruction_loss + self.confidence_loss)
elif (self.opt.arch == 'evaluator'):
(grasp_classification, confidence) = out
(self.classification_loss, self.confidence_loss) = self.criterion(grasp_classification.squeeze(), self.targets, confidence, self.opt.confidence_weight, device=self.device)
self.loss = (self.classification_loss + self.confidence_loss)
self.loss.backward()
def optimize_parameters(self):
self.optimizer.zero_grad()
out = self.forward()
self.backward(out)
self.optimizer.step()
def load_network(self, which_epoch, train=True):
save_filename = ('%s_net.pth' % which_epoch)
load_path = join(self.save_dir, save_filename)
net = self.net
if isinstance(net, torch.nn.DataParallel):
net = net.module
print(('loading the model from %s' % load_path))
checkpoint = torch.load(load_path, map_location=self.device)
if hasattr(checkpoint['model_state_dict'], '_metadata'):
del checkpoint['model_state_dict']._metadata
net.load_state_dict(checkpoint['model_state_dict'])
if train:
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
self.opt.epoch_count = checkpoint['epoch']
else:
net.eval()
def save_network(self, net_name, epoch_num):
save_filename = ('%s_net.pth' % net_name)
save_path = join(self.save_dir, save_filename)
torch.save({'epoch': (epoch_num + 1), 'model_state_dict': self.net.module.cpu().state_dict(), 'optimizer_state_dict': self.optimizer.state_dict(), 'scheduler_state_dict': self.scheduler.state_dict()}, save_path)
if ((len(self.gpu_ids) > 0) and torch.cuda.is_available()):
self.net.cuda(self.gpu_ids[0])
def update_learning_rate(self):
self.scheduler.step()
lr = self.optimizer.param_groups[0]['lr']
print(('learning rate = %.7f' % lr))
def test(self):
with torch.no_grad():
out = self.forward()
(prediction, confidence) = out
if (self.opt.arch == 'vae'):
predicted_cp = utils.transform_control_points(prediction, prediction.shape[0], device=self.device)
(reconstruction_loss, _) = self.criterion[1](predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device)
return (reconstruction_loss, 1)
elif (self.opt.arch == 'gan'):
predicted_cp = utils.transform_control_points(prediction, prediction.shape[0], device=self.device)
(reconstruction_loss, _) = self.criterion(predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device)
return (reconstruction_loss, 1)
else:
predicted = torch.round(torch.sigmoid(prediction)).squeeze()
correct = (predicted == self.targets).sum().item()
return (correct, len(self.targets)) |
def current_git_hash():
unstaged_changes = False
try:
subprocess.check_output(['git', 'diff-index', '--quiet', 'HEAD', '--'])
except subprocess.CalledProcessError as grepexc:
if (grepexc.returncode == 1):
warnings.warn('Running experiments with unstaged changes.')
unstaged_changes = True
except FileNotFoundError:
warnings.warn('Git not found')
try:
git_hash = subprocess.check_output(['git', 'describe', '--always']).strip().decode('utf-8')
return (git_hash, unstaged_changes)
except subprocess.CalledProcessError:
return (None, None) |
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU, gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None):
super(SqueezeExcite, self).__init__()
if (rd_channels is None):
rd_round_fn = (rd_round_fn or round)
rd_channels = rd_round_fn((in_chs * rd_ratio))
act_layer = (force_act_layer or act_layer)
self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True)
self.act1 = create_act_layer(act_layer, inplace=True)
self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
return (x * self.gate(x_se)) |
def profile_model(model, cfg):
model.eval()
(B, N, C) = (1, cfg.num_points, 3)
if cfg.variable:
points = torch.randn((B * N), 3).cuda().contiguous()
features = torch.randn((B * N), C).cuda().contiguous()
offset = []
count = 0
for i in range(B):
count += N
offset.append(count)
offsets = torch.IntTensor(offset).cuda()
args = [points, features, offsets]
else:
points = torch.randn(B, N, 3).cuda().contiguous()
if cfg.model.get('feature_last_dim', False):
features = torch.randn(B, N, C).cuda().contiguous()
else:
features = torch.randn(B, C, N).cuda().contiguous()
cls = torch.zeros(B, 16).long().cuda()
if (cfg.dataset.common.NAME == 'ShapeNetPartNormal'):
args = [points, features, cls]
else:
args = [{'pos': points, 'x': features}]
print(f'test input size: ({(points.shape, features.shape)})')
if cfg.get('flops', False):
from deepspeed.profiling.flops_profiler import get_model_profile
detailed = False
(flops, macs, params) = get_model_profile(model=model, args=args, print_profile=detailed, detailed=detailed, warm_up=10, as_string=False, output_file=None, ignore_modules=None)
print(f'Batches npoints Params.(M) GFLOPs')
print(f'{cfg.batch_size} {N} {(params / 1000000.0): .3f} {(flops / (float(B) * .0)): .2f}')
else:
warnings.warn('set flops=True to calculate flops')
if cfg.get('timing', False):
B = cfg.batch_size
if cfg.variable:
points = torch.randn((B * N), 3).cuda().contiguous()
features = torch.randn((B * N), C).cuda().contiguous()
offset = []
count = 0
for i in range(B):
count += N
offset.append(count)
offsets = torch.IntTensor(offset).cuda().contiguous()
args = [points, features, offsets]
else:
points = torch.randn(B, N, 3).cuda().contiguous()
features = torch.randn(B, C, N).cuda().contiguous()
cls = torch.zeros(B, 16).long().cuda().contiguous()
if (cfg.dataset.common.NAME == 'ShapeNetPartNormal'):
args = [points, features, cls]
else:
args = [features.transpose(1, 2).contiguous()]
model = build_model_from_cfg(cfg.model).cuda()
n_runs = cfg.get('nruns', 200)
with torch.no_grad():
for _ in range(10):
model(*args)
start_time = time.time()
for _ in range(n_runs):
model(*args)
torch.cuda.synchronize()
time_taken = (time.time() - start_time)
n_batches = (n_runs * B)
print(f'Throughput (ins./s): {(float(n_batches) / float(time_taken))}')
else:
warnings.warn('set timing=True to calculate inference time') |
def get_default_configuration(network, task, network_trainer, plans_identifier=default_plans_identifier, search_in=(nnunet.__path__[0], 'training', 'network_training'), base_module='nnunet.training.network_training'):
assert (network in ['2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres']), "network can only be one of the following: '3d', '3d_lowres', '3d_fullres', '3d_cascade_fullres'"
dataset_directory = join(preprocessing_output_dir, task)
if (network == '2d'):
plans_file = join(preprocessing_output_dir, task, (plans_identifier + '_plans_2D.pkl'))
else:
plans_file = join(preprocessing_output_dir, task, (plans_identifier + '_plans_3D.pkl'))
plans = load_pickle(plans_file)
possible_stages = list(plans['plans_per_stage'].keys())
if (((network == '3d_cascade_fullres') or (network == '3d_lowres')) and (len(possible_stages) == 1)):
raise RuntimeError('3d_lowres/3d_cascade_fullres only applies if there is more than one stage. This task does not require the cascade. Run 3d_fullres instead')
if ((network == '2d') or (network == '3d_lowres')):
stage = 0
else:
stage = possible_stages[(- 1)]
trainer_class = recursive_find_python_class([join(*search_in)], network_trainer, current_module=base_module)
output_folder_name = join(network_training_output_dir, network, task, ((network_trainer + '__') + plans_identifier))
print('')
print(('I am running the following nnUNet: %s' % network))
print('My trainer class is: ', trainer_class)
print('For that I will be using the following configuration:')
summarize_plans(plans_file)
print(('I am using stage %d from these plans' % stage))
if (((network == '2d') or (len(possible_stages) > 1)) and (not (network == '3d_lowres'))):
batch_dice = True
print('I am using batch dice + CE loss')
else:
batch_dice = False
print('I am using sample dice + CE loss')
print('\nI am using data from this folder: ', join(dataset_directory, plans['data_identifier']))
print('')
return (plans_file, output_folder_name, dataset_directory, batch_dice, stage, trainer_class) |
class Pool(object):
_WORKER_AUGSEQ = None
_WORKER_SEED_START = None
def __init__(self, augseq, processes=None, maxtasksperchild=None, seed=None):
assert (Pool._WORKER_AUGSEQ is None), '_WORKER_AUGSEQ was already set when calling Pool.__init__(). Did you try to instantiate a Pool within a Pool?'
assert ((processes is None) or (processes != 0))
self.augseq = augseq
self.processes = processes
self.maxtasksperchild = maxtasksperchild
self.seed = seed
if (self.seed is not None):
assert (ia.SEED_MIN_VALUE <= self.seed <= ia.SEED_MAX_VALUE)
self._pool = None
self._batch_idx = 0
def pool(self):
if (self._pool is None):
processes = self.processes
if ((processes is not None) and (processes < 0)):
try:
processes = (multiprocessing.cpu_count() - abs(processes))
processes = max(processes, 1)
except (ImportError, NotImplementedError):
processes = None
self._pool = multiprocessing.Pool(processes, initializer=_Pool_initialize_worker, initargs=(self.augseq, self.seed), maxtasksperchild=self.maxtasksperchild)
return self._pool
def map_batches(self, batches, chunksize=None):
assert isinstance(batches, list), (("Expected to get a list as 'batches', got type %s. " + 'Call imap_batches() if you use generators.') % (type(batches),))
return self.pool.map(_Pool_starworker, self._handle_batch_ids(batches), chunksize=chunksize)
def map_batches_async(self, batches, chunksize=None, callback=None, error_callback=None):
assert isinstance(batches, list), (("Expected to get a list as 'batches', got type %s. " + 'Call imap_batches() if you use generators.') % (type(batches),))
return self.pool.map_async(_Pool_starworker, self._handle_batch_ids(batches), chunksize=chunksize, callback=callback, error_callback=error_callback)
def imap_batches(self, batches, chunksize=1, output_buffer_size=None):
assert ia.is_generator(batches), (("Expected to get a generator as 'batches', got type %s. " + 'Call map_batches() if you use lists.') % (type(batches),))
output_buffer_left = _create_output_buffer_left(output_buffer_size)
gen = self.pool.imap(_Pool_starworker, self._ibuffer_batch_loading(self._handle_batch_ids_gen(batches), output_buffer_left), chunksize=chunksize)
for batch in gen:
(yield batch)
if (output_buffer_left is not None):
output_buffer_left.release()
def imap_batches_unordered(self, batches, chunksize=1, output_buffer_size=None):
assert ia.is_generator(batches), (("Expected to get a generator as 'batches', got type %s. " + 'Call map_batches() if you use lists.') % (type(batches),))
output_buffer_left = _create_output_buffer_left(output_buffer_size)
gen = self.pool.imap_unordered(_Pool_starworker, self._ibuffer_batch_loading(self._handle_batch_ids_gen(batches), output_buffer_left), chunksize=chunksize)
for batch in gen:
(yield batch)
if (output_buffer_left is not None):
output_buffer_left.release()
def __enter__(self):
assert (self._pool is None), 'Tried to __enter__ a pool that has already been initialized.'
_ = self.pool
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
if (self._pool is not None):
self._pool.close()
self._pool.join()
self._pool = None
def terminate(self):
if (self._pool is not None):
self._pool.terminate()
self._pool.join()
self._pool = None
def join(self):
if (self._pool is not None):
self._pool.join()
def _handle_batch_ids(self, batches):
ids = np.arange(self._batch_idx, (self._batch_idx + len(batches)))
inputs = list(zip(ids, batches))
self._batch_idx += len(batches)
return inputs
def _handle_batch_ids_gen(self, batches):
for batch in batches:
batch_idx = self._batch_idx
(yield (batch_idx, batch))
self._batch_idx += 1
def _ibuffer_batch_loading(cls, batches, output_buffer_left):
for batch in batches:
if (output_buffer_left is not None):
output_buffer_left.acquire()
(yield batch) |
def detect_trend_anomaly_arr(y, th_arr):
min_diff = (y - th_arr[0])
max_diff = (y - th_arr[1])
anomaly_indexes = np.logical_or((min_diff < 0), (max_diff > 0))
anomaly_scores = np.zeros_like(y)
anomaly_scores[anomaly_indexes] = 1
return list(set(np.where((anomaly_scores > 0))[0])) |
class CIFAR10ItPrServer(ItPrServer):
def init_test_loader(self):
self.test_loader = get_data_loader(EXP_NAME, data_type='test', batch_size=1000, num_workers=8, pin_memory=True)
def init_clients(self):
rand_perm = torch.randperm(NUM_TRAIN_DATA).tolist()
indices = []
len_slice = (NUM_TRAIN_DATA // num_slices)
for i in range(num_slices):
indices.append(rand_perm[(i * len_slice):((i + 1) * len_slice)])
models = [self.model for _ in range(NUM_CLIENTS)]
return (models, indices) |
class Algorithm(torch.nn.Module):
def __init__(self, args):
super(Algorithm, self).__init__()
def update(self, minibatches):
raise NotImplementedError
def predict(self, x):
raise NotImplementedError |
def word_embedding_elmo(sentence: List[str], elmo_model: ElmoEmbedder, remove_stopwords=False, avg_all_layers=True) -> np.ndarray:
if remove_stopwords:
sentence = list(stop_words_filter(sentence))
sentence_vectors = elmo_model.embed_sentence(sentence)
if (not avg_all_layers):
sentence_word_embeddings = sentence_vectors[2][:]
else:
avg_all_layer_sent_embedding = np.mean(sentence_vectors, axis=0, dtype='float32')
return avg_all_layer_sent_embedding
return sentence_word_embeddings |
def print_write(print_str, log_file):
print(*print_str)
if (log_file is None):
return
with open(log_file, 'a') as f:
print(*print_str, file=f) |
class TrainerFactory():
def __init__(self):
self.tfidf_experiments = [ModelType.XGBoost, ModelType.NaiveBayes, ModelType.SVM]
self.sequence_experiments = [ModelType.LSTM, ModelType.BiLSTM, ModelType.TRANSFORMERENCODER]
self.graph_experiments = [ModelType.TreeLSTM, ModelType.GCN, ModelType.GAT, ModelType.GGNN, ExperimentMode.TreeLSTM_Classify, ExperimentMode.TreeLSTM_PairwiseClassify, ExperimentMode.GCN_PairwiseClassify, ExperimentMode.GAT_PairwiseClassify, ExperimentMode.GGNN_PairwiseClassify]
def get_trainer(self, config):
if (config.model_type in self.tfidf_experiments):
return TFIDFTrainer(config)
elif ((config.model_type in self.sequence_experiments) or (config.model_type in self.graph_experiments)):
return SequenceTrainer(config)
else:
raise SystemExit(NotImplementedError(('%s not found' % config.experiment_mode))) |
class ViTHybridImageProcessor(metaclass=DummyObject):
_backends = ['vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision']) |
def parse_args():
parser = argparse.ArgumentParser(description='Test a Visual Grounding network')
parser.add_argument('--gpu_id', help='gpu_id', default=0, type=int)
parser.add_argument('--test_split', help='test_split', default='val', type=str)
parser.add_argument('--batchsize', help='batchsize', default=64, type=int)
parser.add_argument('--vis_pred', help='visualize prediction', default=False, type=bool)
parser.add_argument('--test_net', help='test_net prototxt', default=None, type=str)
parser.add_argument('--pretrained_model', help='pretrained_model', type=str)
parser.add_argument('--cfg', dest='cfg_file', help='optional config file', type=str)
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(1)
opts = parser.parse_args()
return opts |
def fixup_resnet56(**kwargs):
model = FixupResNet(FixupBasicBlock, [9, 9, 9], **kwargs)
return model |
class DummyBoxEnv(DummyEnv):
def __init__(self, random=True, obs_dim=(4,), action_dim=(2,)):
super().__init__(random, obs_dim, action_dim)
def observation_space(self):
return gym.spaces.Box(low=(- 1), high=1, shape=self._obs_dim, dtype=np.float32)
def action_space(self):
return gym.spaces.Box(low=(- 5.0), high=5.0, shape=self._action_dim, dtype=np.float32)
def reset(self):
return np.ones(self._obs_dim, dtype=np.float32)
def step(self, action):
return (self.observation_space.sample(), 0, False, dict(dummy='dummy')) |
class DropoutQBits_(torch.autograd.Function):
def forward(ctx, input, probability):
mask = torch.ops.qbits_customop.dropout_fwd(input, probability)
if any(ctx.needs_input_grad[:1]):
ctx.tensors = (mask,)
else:
ctx.tensors = (None,)
return input
def backward(ctx, grad_output):
(req_grad_input, _) = ctx.needs_input_grad
mask = ctx.tensors[0]
grad_input = None
if req_grad_input:
grad_input = torch.ops.qbits_customop.dropout_bwd(grad_output, mask)
return (grad_input, None) |
def saveFlags(path, flags):
file = (path + '/FLAGS.txt')
with open(file, 'w') as f:
f.write('\n'.join(flags))
print('FLAGS saved') |
class ResNet_D(nn.Module):
'Discriminator ResNet architecture from
def __init__(self, size=64, nfilter=64, nfilter_max=512, res_ratio=0.1):
super().__init__()
s0 = self.s0 = 4
nf = self.nf = nfilter
nf_max = self.nf_max = nfilter_max
nlayers = int(np.log2((size / s0)))
self.nf0 = min(nf_max, (nf * (2 ** nlayers)))
nf0 = min(nf, nf_max)
nf1 = min((nf * 2), nf_max)
blocks = [ResNetBlock(nf0, nf0, bn=False, res_ratio=res_ratio), ResNetBlock(nf0, nf1, bn=False, res_ratio=res_ratio)]
for i in range(1, (nlayers + 1)):
nf0 = min((nf * (2 ** i)), nf_max)
nf1 = min((nf * (2 ** (i + 1))), nf_max)
blocks += [nn.AvgPool2d(3, stride=2, padding=1), ResNetBlock(nf0, nf0, bn=False, res_ratio=res_ratio), ResNetBlock(nf0, nf1, bn=False, res_ratio=res_ratio)]
self.conv_img = nn.Conv2d(3, (1 * nf), 3, padding=1)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.resnet = nn.Sequential(*blocks)
self.fc = nn.Linear(((self.nf0 * s0) * s0), 1)
def forward(self, x):
batch_size = x.size(0)
out = self.relu(self.conv_img(x))
out = self.resnet(out)
out = out.view(batch_size, ((self.nf0 * self.s0) * self.s0))
out = self.fc(out)
return out |
def generate_data_quad(rows):
x_array = []
y_array = []
while (len(x_array) < rows):
a = float(np.random.randint((- 10), 10))
b = float(np.random.randint((- 10), 10))
c = float(np.random.randint((- 10), 10))
y = [0, 0]
try:
y = [(((- b) + math.sqrt(((b * b) - ((4 * a) * c)))) / (2 * a)), (((- b) - math.sqrt(((b * b) - ((4 * a) * c)))) / (2 * a))]
except (ValueError, ZeroDivisionError):
pass
x_array.append([a, b, c])
y_array.append(abs((y[0] - y[1])))
return (np.array(x_array, dtype=np.float32), np.array(y_array, dtype=np.float32)) |
class Segformer_b0_b1(nn.Module):
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=20, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=True, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, norm_layer=partial(nn.LayerNorm, eps=1e-06), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], decoder_dim=256):
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans, embed_dim=embed_dims[0])
self.patch_embed2 = OverlapPatchEmbed(img_size=(img_size // 4), patch_size=3, stride=2, in_chans=embed_dims[0], embed_dim=embed_dims[1])
self.patch_embed3 = OverlapPatchEmbed(img_size=(img_size // 8), patch_size=3, stride=2, in_chans=embed_dims[1], embed_dim=embed_dims[2])
self.patch_embed4 = OverlapPatchEmbed(img_size=(img_size // 16), patch_size=3, stride=2, in_chans=embed_dims[2], embed_dim=embed_dims[3])
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
self.block1 = nn.ModuleList([Block(dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[(cur + i)], norm_layer=norm_layer, sr_ratio=sr_ratios[0]) for i in range(depths[0])])
self.norm1 = norm_layer(embed_dims[0])
cur += depths[0]
self.block2 = nn.ModuleList([Block(dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[(cur + i)], norm_layer=norm_layer, sr_ratio=sr_ratios[1]) for i in range(depths[1])])
self.norm2 = norm_layer(embed_dims[1])
cur += depths[1]
self.block3 = nn.ModuleList([Block(dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[(cur + i)], norm_layer=norm_layer, sr_ratio=sr_ratios[2]) for i in range(depths[2])])
self.norm3 = norm_layer(embed_dims[2])
cur += depths[2]
self.block4 = nn.ModuleList([Block(dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[(cur + i)], norm_layer=norm_layer, sr_ratio=sr_ratios[3]) for i in range(depths[3])])
self.norm4 = norm_layer(embed_dims[3])
self.linear_c4 = LinearMLP(input_dim=embed_dims[3], embed_dim=decoder_dim)
self.linear_c3 = LinearMLP(input_dim=embed_dims[2], embed_dim=decoder_dim)
self.linear_c2 = LinearMLP(input_dim=embed_dims[1], embed_dim=decoder_dim)
self.linear_c1 = LinearMLP(input_dim=embed_dims[0], embed_dim=decoder_dim)
self.linear_fuse = nn.Conv2d((4 * decoder_dim), 64, 1)
self.dropout = nn.Dropout2d(drop_rate)
self.linear_pred = nn.Conv2d(64, num_classes, kernel_size=1)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
m.bias.data.zero_()
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[(cur + i)]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[(cur + i)]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[(cur + i)]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[(cur + i)]
def freeze_patch_emb(self):
self.patch_embed1.requires_grad = False
.ignore
def no_weight_decay(self):
return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = (nn.Linear(self.embed_dim, num_classes) if (num_classes > 0) else nn.Identity())
def forward_features(self, x):
x = x[0]
B = x.shape[0]
outs = []
(x, H, W) = self.patch_embed1(x)
for (i, blk) in enumerate(self.block1):
x = blk(x, H, W)
x = self.norm1(x)
x = x.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
outs.append(x)
(x, H, W) = self.patch_embed2(x)
for (i, blk) in enumerate(self.block2):
x = blk(x, H, W)
x = self.norm2(x)
x = x.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
outs.append(x)
(x, H, W) = self.patch_embed3(x)
for (i, blk) in enumerate(self.block3):
x = blk(x, H, W)
x = self.norm3(x)
x = x.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
outs.append(x)
(x, H, W) = self.patch_embed4(x)
for (i, blk) in enumerate(self.block4):
x = blk(x, H, W)
x = self.norm4(x)
x = x.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
outs.append(x)
return outs
def forward(self, x):
x = self.forward_features(x)
(c1, c2, c3, c4) = x
(n, _, h, w) = c4.shape
(h_out, w_out) = (c1.size()[2], c1.size()[3])
_c4 = self.linear_c4(c4).permute(0, 2, 1).reshape(n, (- 1), c4.shape[2], c4.shape[3])
_c4 = F.interpolate(_c4, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c3 = self.linear_c3(c3).permute(0, 2, 1).reshape(n, (- 1), c3.shape[2], c3.shape[3])
_c3 = F.interpolate(_c3, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c2 = self.linear_c2(c2).permute(0, 2, 1).reshape(n, (- 1), c2.shape[2], c2.shape[3])
_c2 = F.interpolate(_c2, size=c1.size()[2:], mode='bilinear', align_corners=False)
_c1 = self.linear_c1(c1).permute(0, 2, 1).reshape(n, (- 1), c1.shape[2], c1.shape[3])
_c = self.linear_fuse(torch.cat([_c4, _c3, _c2, _c1], dim=1))
x = self.dropout(_c)
x = self.linear_pred(x)
return _c |
def get_numpy_image(url_or_filepath):
if ((' in url_or_filepath) or ('www' in url_or_filepath)):
url = url_or_filepath
response = requests.get(url)
pim = PIL.Image.open(BytesIO(response.content))
else:
filepath = url_or_filepath
pim = PIL.Image.open(filepath)
nim = _pil_to_nparray(pim)
return nim |
def PHC_login(form, phcdb):
import Cookie
error = 0
Folder = ''
Name_First = ''
Status = None
if form.has_key('Signout'):
error = 10
UpdateCookie(form, '', '', error)
elif form.has_key('phcaction'):
(error, usermail, userpwd) = ProcessName(form)
if (not error):
(error, Folder, Name_First, Status) = phcdb.check(usermail, userpwd)
UpdateCookie(form, Folder, Name_First, error)
else:
(Folder, Name_First) = CookieUid()
if (Folder == ''):
error = 5
else:
Status = phcdb.check_userstatus(Folder)
if (not error):
error = AccessFolder(Folder)
return (error, Folder, Name_First, Status) |
def tee_log(file_name):
f = open(file_name, 'w+')
def logger(s):
log(s)
f.write(s)
f.write('\n')
f.flush()
return logger |
def change_default_args(**kwargs):
def layer_wrapper(layer_class):
class DefaultArgLayer(layer_class):
def __init__(self, *args, **kw):
pos_to_kw = get_pos_to_kw_map(layer_class.__init__)
kw_to_pos = {kw: pos for (pos, kw) in pos_to_kw.items()}
for (key, val) in kwargs.items():
if ((key not in kw) and (kw_to_pos[key] > len(args))):
kw[key] = val
super().__init__(*args, **kw)
return DefaultArgLayer
return layer_wrapper |
def test_interpolation_grad():
batch_size = 1
feat_dim = 2
m = 4
feats = torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda()
def interpolate_func(inputs):
idx = torch.from_numpy(np.array([[[0, 1, 2], [1, 2, 3]]])).int().cuda()
weight = torch.from_numpy(np.array([[[1, 1, 1], [2, 2, 2]]])).float().cuda()
interpolated_feats = pointnet2_utils.three_interpolate(inputs, idx, weight)
return interpolated_feats
assert gradcheck(interpolate_func, feats, atol=0.1, rtol=0.1) |
class RMSELoss(nn.Module):
def __init__(self, eps=1e-06):
super().__init__()
self.mse = nn.MSELoss()
self.eps = eps
def forward(self, yhat, y):
return torch.sqrt((self.mse(yhat, y) + self.eps)) |
def _generate_common_dataloader(dataloader, framework, distributed=False):
if (not isinstance(dataloader, DataLoader)):
assert (hasattr(dataloader, '__iter__') and hasattr(dataloader, 'batch_size')), 'dataloader must implement __iter__ method and batch_size attribute'
assert (not distributed), 'Please use neural_compressor.data.DataLoader to support distributed computing'
return dataloader
else:
return DATALOADERS[framework](dataset=dataloader.dataset, batch_size=dataloader.batch_size, collate_fn=dataloader.collate_fn, last_batch=dataloader.last_batch, sampler=dataloader.sampler, batch_sampler=dataloader.batch_sampler, num_workers=dataloader.num_workers, pin_memory=dataloader.pin_memory, shuffle=dataloader.shuffle, distributed=bool((dataloader.distributed or distributed))) |
def print_warning(s):
print((((((TerminalColors.WARNING + '[') + get_time()) + '] WARN ') + str(s)) + TerminalColors.ENDC)) |
def get_cuda_version() -> float:
global VALID_CUDA
if (('CUDA_HOME' not in os.environ) and ('CUDA_PATH' in os.environ)):
os.environ['CUDA_HOME'] = os.environ['CUDA_PATH']
assert ('CUDA_HOME' in os.environ), 'Cannot find the $CUDA_HOME in the environments. Please manually install the CUDA >= 10.1, and set the $CUDA_HOME environment variable.'
cuda_version_file = os.path.join(os.environ['CUDA_HOME'], 'version.txt')
if os.path.exists(cuda_version_file):
with open(cuda_version_file) as f:
version_str = f.readline().replace('\n', '').replace('\r', '')
version = version_str.split(' ')[2]
version = float('.'.join(version.split('.')[0:2]))
else:
nvcc_out = subprocess.run('nvcc -V', shell=True, stdout=subprocess.PIPE)
nvcc_str = nvcc_out.stdout.decode('utf-8')
nvcc_cuda = re.findall('[.]*([\\d]+.[\\d]+),[.]*', nvcc_str)
if (len(nvcc_cuda) == 0):
raise RuntimeError(f'nvcc -V error! {nvcc_str}')
else:
version = float(nvcc_cuda[0])
assert (version in VALID_CUDA), f'CUDA Version {version} must in {VALID_CUDA}. Please manually install the CUDA meets the requirements.'
print(f'Cuda version is {version}')
return version |
def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer, parse_record_fn, dtype=None):
if (dtype is None):
dtype = tf.float32
dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
dataset = dataset.repeat()
dataset = dataset.apply(tf.data.experimental.map_and_batch((lambda value: parse_record_fn(value, is_training, dtype)), batch_size=batch_size, num_parallel_batches=1))
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset |
class FlatNCE(nn.Module):
def __init__(self, temperature):
self.temperature = temperature
super().__init__()
def forward(self, z_i, z_j):
batch_size = z_i.size(0)
features = torch.cat([z_i, z_j], dim=0)
labels = torch.cat([torch.arange(batch_size) for i in range(2)], dim=0)
labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()
features = F.normalize(features, dim=1)
similarity_matrix = torch.matmul(features, features.T)
mask = torch.eye(labels.shape[0], dtype=torch.bool)
labels = labels[(~ mask)].view(labels.shape[0], (- 1))
similarity_matrix = similarity_matrix[(~ mask)].view(similarity_matrix.shape[0], (- 1))
positives = similarity_matrix[labels.bool()].view(labels.shape[0], (- 1))
negatives = similarity_matrix[(~ labels.bool())].view(labels.shape[0], (- 1))
labels = torch.zeros(positives.shape[0], dtype=torch.long)
logits = ((negatives - positives) / self.temperature)
clogits = torch.logsumexp(logits, dim=1, keepdim=True)
loss = torch.exp((clogits - clogits.detach())) |
class Optimizer():
def __init__(self, archive, emitters):
if (len(emitters) == 0):
raise ValueError('Pass in at least one emitter to the optimizer.')
emitter_ids = set((id(e) for e in emitters))
if (len(emitter_ids) != len(emitters)):
raise ValueError('Not all emitters passed in were unique (i.e. some emitters had the same id). If emitters were created with something like [EmitterClass(...)] * n, instead use [EmitterClass(...) for _ in range(n)] so that all emitters are unique instances.')
self._solution_dim = emitters[0].solution_dim
for (idx, emitter) in enumerate(emitters[1:]):
if (emitter.solution_dim != self._solution_dim):
raise ValueError(f'All emitters must have the same solution dim, but Emitter {idx} has dimension {emitter.solution_dim}, while Emitter 0 has dimension {self._solution_dim}')
self._archive = archive
self._archive.initialize(self._solution_dim)
self._emitters = emitters
self._asked = False
self._solutions = []
self._num_emitted = [None for _ in self._emitters]
def archive(self):
return self._archive
def emitters(self):
return self._emitters
def ask(self, grad_estimate=False):
if self._asked:
raise RuntimeError('ask() was called twice in a row.')
self._asked = True
self._solutions = []
with threadpool_limits(limits=1, user_api='blas'):
for (i, emitter) in enumerate(self._emitters):
emitter_sols = emitter.ask(grad_estimate=grad_estimate)
self._solutions.append(emitter_sols)
self._num_emitted[i] = len(emitter_sols)
self._solutions = np.concatenate(self._solutions, axis=0)
return self._solutions
def tell(self, objective_values, behavior_values, jacobian=None, metadata=None):
if (not self._asked):
raise RuntimeError('tell() was called without calling ask().')
self._asked = False
objective_values = np.asarray(objective_values)
behavior_values = np.asarray(behavior_values)
metadata = (np.empty(len(self._solutions), dtype=object) if (metadata is None) else np.asarray(metadata, dtype=object))
with threadpool_limits(limits=1, user_api='blas'):
pos = 0
for (emitter, n) in zip(self._emitters, self._num_emitted):
end = (pos + n)
em_jacobian = (None if (jacobian is None) else jacobian[pos:end])
emitter.tell(self._solutions[pos:end], objective_values[pos:end], behavior_values[pos:end], em_jacobian, metadata[pos:end])
pos = end |
class Pad(UnaryOpBase):
num_var_param = _pad_num_var_param()
in_dtypes = [(i,) for i in DTYPE_GEN_FLOATS]
out_dtypes = [(i,) for i in DTYPE_GEN_FLOATS]
def __str__(self) -> str:
return f'{self.name()} (padding={list(self.padding_list)})'
def __init__(self, padding_list, pad_t):
super().__init__()
self.padding_list = padding_list
self.extra_attrs['type'] = pad_t
self.inp_ranks = [rank_from((len(padding_list) // 2))]
self.out_ranks = [rank_from((len(padding_list) // 2))]
assert ((len(self.padding_list) % 2) == 0), f'padding_list must be even, got {self.padding_list}'
def requires(self, input_shapes: List[AbsTensor]) -> List[Union[(z3.BoolRef, bool)]]:
pad = self.padding_list
isv = input_shapes[0].shape
cons = []
for i in range((len(pad) // 2)):
j = ((len(isv) - 1) - i)
cons.append(nnsmith_gt(nnsmith_add(pad[(i * 2)], isv[j]), 0))
cons.append(nnsmith_gt(nnsmith_add(pad[((i * 2) + 1)], isv[j]), 0))
cons.append(nnsmith_gt(nnsmith_add(pad[((i * 2) + 1)], nnsmith_add(pad[(i * 2)], isv[j])), 0))
for s in input_shapes[0].shape[1:]:
cons.append(nnsmith_gt(s, 0))
return cons
def type_transfer(self, input_shapes: List[AbsTensor]) -> List[AbsTensor]:
isv = input_shapes[0].shape
pad = self.padding_list
s = list(isv)
for i in range((len(pad) // 2)):
j = ((len(isv) - 1) - i)
s[j] = nnsmith_add(nnsmith_add(s[j], pad[(i * 2)]), pad[((i * 2) + 1)])
return [AbsTensor(s, input_shapes[0].dtype)]
def deduct_inp_ranks_and_dtype(self, out_abs_tensor: List[AbsTensor]) -> List[Tuple[(int, DType)]]:
return [(out_abs_tensor[0].ndims, out_abs_tensor[0].dtype)] |
class GMNlayer(MessagePassing):
def __init__(self, in_channels, out_channels, device):
super(GMNlayer, self).__init__(aggr='add')
self.device = device
self.out_channels = out_channels
self.fmessage = nn.Linear((3 * in_channels), out_channels)
self.fnode = torch.nn.GRUCell((2 * out_channels), out_channels, bias=True)
self.__match_args__ = getargspec(self.match)[0][1:]
self.__special_match_args__ = [(i, arg) for (i, arg) in enumerate(self.__match_args__) if (arg in special_args)]
self.__match_args__ = [arg for arg in self.__match_args__ if (arg not in special_args)]
'def propagate(self, edge_index, size=None, **kwargs):\n size = [None, None] if size is None else list(size)\n assert len(size) == 2\n\n i, j = (0, 1) if self.flow == \'target_to_source\' else (1, 0)\n ij = {"_i": i, "_j": j}\n\n message_args = []\n for arg in self.__message_args__:\n #print(arg)\n if arg[-2:] in ij.keys():\n tmp = kwargs.get(arg[:-2], None)\n if tmp is None: # pragma: no cover\n message_args.append(tmp)\n else:\n idx = ij[arg[-2:]]\n if isinstance(tmp, tuple) or isinstance(tmp, list):\n assert len(tmp) == 2\n if tmp[1 - idx] is not None:\n if size[1 - idx] is None:\n size[1 - idx] = tmp[1 - idx].size(0)\n if size[1 - idx] != tmp[1 - idx].size(0):\n raise ValueError(__size_error_msg__)\n tmp = tmp[idx]\n\n if size[idx] is None:\n size[idx] = tmp.size(0)\n if size[idx] != tmp.size(0):\n raise ValueError(__size_error_msg__)\n\n tmp = torch.index_select(tmp, 0, edge_index[idx])\n message_args.append(tmp)\n else:\n message_args.append(kwargs.get(arg, None))\n\n size[0] = size[1] if size[0] is None else size[0]\n size[1] = size[0] if size[1] is None else size[1]\n\n kwargs[\'edge_index\'] = edge_index\n kwargs[\'size\'] = size\n\n for (idx, arg) in self.__special_args__:\n if arg[-2:] in ij.keys():\n message_args.insert(idx, kwargs[arg[:-2]][ij[arg[-2:]]])\n else:\n message_args.insert(idx, kwargs[arg])\n\n update_args = [kwargs[arg] for arg in self.__update_args__]\n out = self.message(*message_args)\n out = scatter_(self.aggr, out, edge_index[i], dim_size=size[i])\n #print(out.size())\n out = self.update(out, *update_args)\n return out'
def propagate_match(self, edge_index, size=None, **kwargs):
size = ([None, None] if (size is None) else list(size))
assert (len(size) == 2)
(i, j) = ((0, 1) if (self.flow == 'target_to_source') else (1, 0))
ij = {'_i': i, '_j': j}
match_args = []
for arg in self.__match_args__:
if (arg[(- 2):] in ij.keys()):
tmp = kwargs.get(arg[:(- 2)], None)
if (tmp is None):
match_args.append(tmp)
else:
idx = ij[arg[(- 2):]]
if (isinstance(tmp, tuple) or isinstance(tmp, list)):
assert (len(tmp) == 2)
if (tmp[(1 - idx)] is not None):
if (size[(1 - idx)] is None):
size[(1 - idx)] = tmp[(1 - idx)].size(0)
if (size[(1 - idx)] != tmp[(1 - idx)].size(0)):
raise ValueError(__size_error_msg__)
tmp = tmp[idx]
if (size[idx] is None):
size[idx] = tmp.size(0)
if (size[idx] != tmp.size(0)):
raise ValueError(__size_error_msg__)
tmp = torch.index_select(tmp, 0, edge_index[idx])
match_args.append(tmp)
else:
match_args.append(kwargs.get(arg, None))
size[0] = (size[1] if (size[0] is None) else size[0])
size[1] = (size[0] if (size[1] is None) else size[1])
kwargs['edge_index'] = edge_index
kwargs['size'] = size
for (idx, arg) in self.__special_match_args__:
if (arg[(- 2):] in ij.keys()):
match_args.insert(idx, kwargs[arg[:(- 2)]][ij[arg[(- 2):]]])
else:
match_args.insert(idx, kwargs[arg])
update_args = [kwargs[arg] for arg in self.__update_args__]
out_attn = self.match(*match_args)
out_attn = scatter_(self.aggr, out_attn, edge_index[i], dim_size=size[i])
out_attn = self.update(out_attn, *update_args)
return out_attn
def forward(self, x1, x2, edge_index1, edge_index2, edge_weight1, edge_weight2, mode='train'):
m1 = self.propagate(edge_index1, size=(x1.size(0), x1.size(0)), x=x1, edge_weight=edge_weight1)
m2 = self.propagate(edge_index2, size=(x2.size(0), x2.size(0)), x=x2, edge_weight=edge_weight2)
scores = torch.mm(x1, x2.t())
attn_1 = F.softmax(scores, dim=1)
attn_2 = F.softmax(scores, dim=0).t()
attnsum_1 = torch.mm(attn_1, x2)
attnsum_2 = torch.mm(attn_2, x1)
u1 = (x1 - attnsum_1)
u2 = (x2 - attnsum_2)
m1 = torch.cat([m1, u1], dim=1)
h1 = self.fnode(m1, x1)
m2 = torch.cat([m2, u2], dim=1)
h2 = self.fnode(m2, x2)
return (h1, h2)
def message(self, x_i, x_j, edge_index, size, edge_weight=None):
if (type(edge_weight) == type(None)):
edge_weight = torch.ones(x_i.size(0), x_i.size(1)).to(self.device)
m = F.relu(self.fmessage(torch.cat([x_i, x_j, edge_weight], dim=1)))
else:
m = F.relu(self.fmessage(torch.cat([x_i, x_j, edge_weight], dim=1)))
return m
def match(self, edge_index_i, x_i, x_j, size_i):
return
'def match(self, edge_index_i, x_i, x_j, size_i):\n #x_j = x_j.view(-1, 1, self.out_channels)\n #alpha = torch.dot(x_i, x_j)\n #print(edge_index_i.size())\n #print(x_i.size(),x_j.size())\n alpha=torch.sum(x_i*x_j, dim=1)\n #alpha=torch.bmm(x_i.unsqueeze(1), x_j.unsqueeze(2))\n #print(alpha.size())\n size_i=x_i.size(0)\n alpha = softmax(alpha, edge_index_i, size_i)\n #print(alpha.size())\n c = torch.ones(A, B) * 2\n v = torch.randn(A, B, C)\n print(c)\n print(v)\n print(c[:,:, None].size())\n d = c[:,:, None] * v\n return alpha[:,None]*x_j\n #return x_j* alpha.view(-1, 1, 1)\n #return (x_i-x_j)* alpha.view(-1, 1, 1)'
def update(self, aggr_out):
return aggr_out |
class FitInfo():
def __init__(self, guesses_dict):
self.fit_param_names = []
self.all_params = dict()
for key in guesses_dict:
self.all_params[key] = _Param(guesses_dict[key])
def add_uniform_fit_param(self, name, low_lim, high_lim, low_guess=None, high_guess=None):
if (name in self.fit_param_names):
raise ValueError('Already fitting for {0}'.format(name))
if (low_guess is None):
low_guess = low_lim
if (high_guess is None):
high_guess = high_lim
best_guess = self.all_params[name].best_guess
self.fit_param_names.append(name)
self.all_params[name] = _UniformParam(best_guess, low_lim, high_lim, low_guess, high_guess)
def add_gaussian_fit_param(self, name, std, low_guess=None, high_guess=None):
if (name in self.fit_param_names):
raise ValueError('Already fitting for {0}'.format(name))
mean = self.all_params[name].best_guess
if (low_guess is None):
low_guess = (mean - (2 * std))
if (high_guess is None):
high_guess = (mean + (2 * std))
self.fit_param_names.append(name)
self.all_params[name] = _GaussianParam(mean, std, low_guess, high_guess)
def _interpret_param_array(self, array):
if (len(array) != len(self.fit_param_names)):
raise ValueException('Fit array invalid')
result = dict()
for (i, key) in enumerate(self.fit_param_names):
result[key] = array[i]
for key in self.all_params:
if (key not in result):
result[key] = self.all_params[key].best_guess
return result
def _within_limits(self, array):
if (len(array) != len(self.fit_param_names)):
raise ValueException('Fit array invalid')
for (i, key) in enumerate(self.fit_param_names):
if (not self.all_params[key].within_limits(array[i])):
return False
return True
def _generate_rand_param_arrays(self, num_arrays):
result = []
for i in range(num_arrays):
row = []
for name in self.fit_param_names:
if (i == 0):
row.append(self.all_params[name].best_guess)
else:
row.append(self.all_params[name].get_random_value())
result.append(row)
return np.array(result)
def _get(self, name):
return self.all_params[name].best_guess
def _get_num_fit_params(self):
return len(self.fit_param_names)
def _from_unit_interval(self, index, u):
name = self.fit_param_names[index]
return self.all_params[name].from_unit_interval(u)
def _ln_prior(self, array):
result = 0
for (i, name) in enumerate(self.fit_param_names):
result += self.all_params[name].ln_prior(array[i])
return result
def __repr__(self):
return 'Params to fit: {}; all params: {}'.format(self.fit_param_names, self.all_params) |
def final():
head = []
head.append(('layernorm.weight', 'norm.weight'))
head.append(('layernorm.bias', 'norm.bias'))
head.append(('classifier.weight', 'head.weight'))
head.append(('classifier.bias', 'head.bias'))
return head |
class DownSamplingBlock(nn.Module):
def __init__(self, nIn, nOut):
super().__init__()
self.nIn = nIn
self.nOut = nOut
if (self.nIn < self.nOut):
nConv = (nOut - nIn)
else:
nConv = nOut
self.conv3x3 = Conv(nIn, nConv, kSize=3, stride=2, padding=1)
self.max_pool = nn.MaxPool2d(2, stride=2)
self.bn_prelu = BNPReLU(nOut)
def forward(self, input):
output = self.conv3x3(input)
if (self.nIn < self.nOut):
max_pool = self.max_pool(input)
output = torch.cat([output, max_pool], 1)
output = self.bn_prelu(output)
return output |
def dot_product_attention(q, k, v, bias, dropout_rate=0.0, summaries=False, image_shapes=None, name=None):
with tf.variable_scope(name, default_name='dot_product_attention', values=[q, k, v]):
logits = tf.matmul(q, k, transpose_b=True)
if (bias is not None):
logits += bias
weights = tf.nn.softmax(logits, name='attention_weights')
weights = tf.nn.dropout(weights, (1.0 - dropout_rate))
if (summaries and (not tf.get_variable_scope().reuse)):
attention_image_summary(weights, image_shapes)
return tf.matmul(weights, v) |
def get_frame_info(level=2):
caller_frame = inspect.stack()[level]
info = inspect.getframeinfo(caller_frame[0])
return (((info.filename + ':') + str(info.lineno)) + ': ') |
def add_parser_params(parser):
parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default=None, help='the name of the checkpoint.')
parser.add_argument('--save_ckpt_steps', type=int, default=500, help='save checkpoints per save_ckpt_steps')
parser.add_argument('--max_ckpt_nums', type=int, default=15, help='the max numbers of checkpoints')
parser.add_argument('--model_dir', type=str, default='/home/zhaoshuai/models/deeplabv3_cbl_2/', help='Base directory for the model.')
parser.add_argument('--output_dir', type=str, default='/home/zhaoshuai/models/deeplabv3_cbl_2/', help='output directory of model.')
parser.add_argument('--seg_model', type=str, default='deeplabv3', choices=['deeplabv3', 'deeplabv3+', 'pspnet'], help='The segmentation model.')
parser.add_argument('--backbone', type=str, default='resnet101', choices=['resnet50', 'resnet101', 'resnet152', 'resnet50_beta', 'resnet101_beta', 'resnet152_beta'], help='backbone name (default: resnet101)')
parser.add_argument('--out_stride', type=int, default=16, help='network output stride (default: 16)')
parser.add_argument('--batch_size', type=int, default=16, metavar='N', help='input batch size for training (default: auto)')
parser.add_argument('--accumulation_steps', type=int, default=1, metavar='N', help='Accumulation steps when calculate the gradients when training')
parser.add_argument('--test_batch_size', type=int, default=None, metavar='N', help='input batch size for testing (default: auto)')
parser.add_argument('--dataset', type=str, default='pascal', choices=['pascal', 'coco', 'cityscapes', 'camvid'], help='dataset name (default: pascal)')
parser.add_argument('--train_split', type=str, default='train', choices=['train', 'trainaug', 'trainval', 'val', 'test'], help='training set name (default: train)')
parser.add_argument('--data_dir', type=str, default='/dataset', help='Path to the directory containing the PASCAL VOC data.')
parser.add_argument('--use_sbd', action='store_true', default=False, help='whether to use SBD dataset (default: True)')
parser.add_argument('--workers', type=int, default=8, metavar='N', help='dataloader threads')
parser.add_argument('--base_size', type=int, default=513, help='base image size')
parser.add_argument('--crop_size', type=int, default=513, help='crop image size')
parser.add_argument('--sync_bn', type=bool, default=None, help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze_bn', type=bool, default=False, help='whether to freeze bn parameters (default: False)')
parser.add_argument('--bn_mom', type=float, default=0.1, metavar='M', help='momentum (default: 0.1) for running mean and var of batch normalization')
parser.add_argument('--epochs', type=int, default=46, metavar='N', help='Number of training epochs: For 30K iteration with batch size 6, train_epoch = 17.01 (= 30K * 6 / 10,582). For 30K iteration with batch size 8, train_epoch = 22.68 (= 30K * 8 / 10,582). For 30K iteration with batch size 10, train_epoch = 25.52 (= 30K * 10 / 10,582). For 30K iteration with batch size 11, train_epoch = 31.19 (= 30K * 11 / 10,582). For 30K iteration with batch size 12, train_epoch = 34.02 (= 30K * 12 / 10,582). For 30K iteration with batch size 14, train_epoch = 39.69 (= 30K * 14 / 10,582). For 30K iteration with batch size 15, train_epoch = 42.53 (= 30K * 15 / 10,582). For 30K iteration with batch size 16, train_epoch = 45.36 (= 30K * 16 / 10,582).')
parser.add_argument('--start_epoch', type=int, default=0, metavar='N', help='start epochs (default:0)')
parser.add_argument('--init_global_step', type=int, default=0, help='Initial global step for controlling learning rate when fine-tuning model.')
parser.add_argument('--use_balanced_weights', action='store_true', default=False, help='whether to use balanced weights (default: False)')
parser.add_argument('--init_lr', type=float, default=0.007, help='learning rate (default: auto)')
parser.add_argument('--lr_multiplier', type=float, default=1.0, help='Learning rate multiplier for the unpretrained model.')
parser.add_argument('--slow_start_lr', type=float, default=0.0001, help='Learning rate employed during slow start.')
parser.add_argument('--slow_start_steps', type=int, default=0, help='Training model with small learning rate for few steps.')
parser.add_argument('--lr_scheduler', type=str, default='poly', choices=['poly', 'step', 'cos'], help='lr scheduler mode: (default: poly)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.0001, metavar='M', help='w-decay (default: 1e-4)')
parser.add_argument('--nesterov', action='store_true', default=False, help='whether use nesterov (default: False)')
parser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--gpu_ids', type=str, default='0', help='use which gpu to train, must be a comma-separated list of integers only (default=0)')
parser.add_argument('--main_gpu', type=int, default=0, help='The main gpu')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--ft', action='store_true', default=False, help='finetuning on a different dataset')
parser.add_argument('--eval_interval', type=int, default=2, help='evaluuation interval (default: 2)')
parser.add_argument('--no_val', action='store_true', default=False, help='skip validation during training')
parser.add_argument('--loss_type', type=int, default=0, help='The loss type used.')
parser.add_argument('--loss_weight_lambda', type=float, default=0.5, help='The realtive weight factor for the loss.')
parser.add_argument('--proc_name', type=str, default='DeepLabv3', help='The name of the process.')
parser.add_argument('--rmi_pool_way', type=int, default=1, help='The pool way when calculate RMI loss, 1 - avg pool, 0 - max pool')
parser.add_argument('--rmi_pool_size', type=int, default=2, help='The pool size of the pool operation before calculate RMI loss')
parser.add_argument('--rmi_pool_stride', type=int, default=2, help='The pool stride of the pool operation before calculate RMI loss')
parser.add_argument('--rmi_radius', type=int, default=3, help='The square radius of rmi [1, 3, 5, 7], they have a center')
parser.add_argument('--crf_iter_steps', type=int, default=1, help='The iter steps of the crf')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--world_size', default=(- 1), type=int, help='number of nodes for distributed training')
parser.add_argument('--dist_backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--multiprocessing_distributed', action='store_true', help='Use multi-processing distributed training to launch N processes per node, which has N GPUs. This is the fastest way to use PyTorch for either single node or multi node data parallel training')
(args, unparsed) = parser.parse_known_args()
args.rmi_pool_stride = args.rmi_pool_size
args.cuda = ((not args.no_cuda) and torch.cuda.is_available())
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
args.gpu_ids = [i for i in range(0, len(args.gpu_ids))]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
args.world_size = int(len(args.gpu_ids))
args.distributed = ((args.world_size > 1) or args.multiprocessing_distributed)
if (args.sync_bn is None):
args.sync_bn = (True if (args.cuda and (len(args.gpu_ids) > 1)) else False)
if (args.epochs is None):
epoches = {'coco': 30, 'cityscapes': 200, 'pascal': 46}
args.epochs = epoches[args.dataset.lower()]
assert (args.accumulation_steps in [1, 2, 4])
assert (args.batch_size in [4, 8, 12, 16, 32, 36, 48, 64])
args.batch_size = (args.batch_size // args.accumulation_steps)
if (args.test_batch_size is None):
args.test_batch_size = args.batch_size
if (args.init_lr is None):
lrs = {'coco': 0.1, 'cityscapes': 0.01, 'pascal': 0.007}
args.init_lr = ((lrs[args.dataset.lower()] / (4 * len(args.gpu_ids))) * args.batch_size)
if (args.checkname is None):
args.checkname = (str(args.seg_model) + str(args.backbone))
if (args.backbone in ['resnet101']):
args.weight_decay = 0.0001
args.bn_mom = 0.05
if (args.seg_model == 'deeplabv3'):
args.lr_multiplier = 10.0
elif (args.seg_model == 'deeplabv3+'):
args.lr_multiplier = 5.0
elif (args.seg_model == 'pspnet'):
args.lr_multiplier = 10.0
else:
raise NotImplementedError
else:
args.weight_decay = 4e-05
args.bn_mom = 0.0003
if ('pascal' in args.dataset):
args.slow_start_steps = 1500
elif ('cityscapes' in args.dataset):
args.slow_start_steps = 3000
elif ('camvid' in args.dataset):
args.slow_start_steps = 300
args.init_lr = 0.025
args.lr_multiplier = 10.0
else:
raise NotImplementedError
return args |
def convert_file_size_to_int(size: Union[(int, str)]):
if isinstance(size, int):
return size
if size.upper().endswith('GIB'):
return (int(size[:(- 3)]) * (2 ** 30))
if size.upper().endswith('MIB'):
return (int(size[:(- 3)]) * (2 ** 20))
if size.upper().endswith('KIB'):
return (int(size[:(- 3)]) * (2 ** 10))
if size.upper().endswith('GB'):
int_size = (int(size[:(- 2)]) * (10 ** 9))
return ((int_size // 8) if size.endswith('b') else int_size)
if size.upper().endswith('MB'):
int_size = (int(size[:(- 2)]) * (10 ** 6))
return ((int_size // 8) if size.endswith('b') else int_size)
if size.upper().endswith('KB'):
int_size = (int(size[:(- 2)]) * (10 ** 3))
return ((int_size // 8) if size.endswith('b') else int_size)
raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.") |
def main():
parser = argparse.ArgumentParser(description='Chainforge command line tool')
subparsers = parser.add_subparsers(dest='serve')
serve_parser = subparsers.add_parser('serve', help='Start Chainforge server')
serve_parser.add_argument('--port', help='The port to run the server on. Defaults to 8000.', type=int, default=8000, nargs='?')
serve_parser.add_argument('--host', help="The host to run the server on. Defaults to 'localhost'.", type=str, default='localhost', nargs='?')
args = parser.parse_args()
if (not args.serve):
parser.print_help()
exit(0)
port = (args.port if args.port else 8000)
host = (args.host if args.host else 'localhost')
print(f'Serving Flask server on {host} on port {port}...')
run_server(host=host, port=port, cmd_args=args) |
def plot_feature(data, label=None, y_range=None, new_fig=True, fig=None):
if new_fig:
fig = plt.figure()
ax = plt.gca()
if (y_range is not None):
ax.set_ylim(y_range)
ax.plot(np.arange(np.shape(data)[0]), data, label=label)
if (label is not None):
ax.legend()
if new_fig:
plt.close()
return fig |
def log_every_n(lvl, msg, n=1, *, name=None):
(caller_module, key) = _find_caller()
_LOG_COUNTER[key] += 1
if ((n == 1) or ((_LOG_COUNTER[key] % n) == 1)):
logging.getLogger((name or caller_module)).log(lvl, msg) |
def add_args(parser):
parser.add_argument('--num_steps', type=int, default=(10 ** 6), help='Number of steps in training')
parser.add_argument('--transitions_per_step', type=int, default=1, help='env transitions per training step. Defaults to 1, but will need to be set higher for repaly ratios < 1')
parser.add_argument('--max_episode_steps', type=int, default=100000, help='maximum steps per episode')
parser.add_argument('--batch_size', type=int, default=512, help='training batch size')
parser.add_argument('--tau', type=float, default=0.005, help='for model parameter % update')
parser.add_argument('--actor_lr', type=float, default=0.0003, help='actor learning rate')
parser.add_argument('--critic_lr', type=float, default=0.0003, help='critic learning rate')
parser.add_argument('--gamma', type=float, default=0.99, help='gamma, the discount factor')
parser.add_argument('--init_alpha', type=float, default=0.1, help='initial entropy regularization coefficeint.')
parser.add_argument('--alpha_lr', type=float, default=0.0001, help='alpha (entropy regularization coefficeint) learning rate')
parser.add_argument('--buffer_size', type=int, default=1000000, help='replay buffer size')
parser.add_argument('--eval_interval', type=int, default=5000, help='how often to test the agent without exploration (in episodes)')
parser.add_argument('--eval_episodes', type=int, default=10, help='how many episodes to run for when testing')
parser.add_argument('--warmup_steps', type=int, default=1000, help='warmup length, in steps')
parser.add_argument('--render', action='store_true', help='flag to enable env rendering during training')
parser.add_argument('--actor_clip', type=float, default=None, help='gradient clipping for actor updates')
parser.add_argument('--critic_clip', type=float, default=None, help='gradient clipping for critic updates')
parser.add_argument('--name', type=str, default='redq_run', help='dir name for saves')
parser.add_argument('--actor_l2', type=float, default=0.0, help='L2 regularization coeff for actor network')
parser.add_argument('--critic_l2', type=float, default=0.0, help='L2 regularization coeff for critic network')
parser.add_argument('--target_delay', type=int, default=2, help='How many training steps to go between target network updates')
parser.add_argument('--save_interval', type=int, default=100000, help='How many steps to go between saving the agent params to disk')
parser.add_argument('--verbosity', type=int, default=1, help='verbosity > 0 displays a progress bar during training')
parser.add_argument('--critic_updates_per_step', type=int, default=20, help='how many critic gradient updates to make per training step. The REDQ paper calls this variable G.')
parser.add_argument('--actor_updates_per_step', type=int, default=1, help='how many actor gradient updates to make per training step')
parser.add_argument('--prioritized_replay', action='store_true', help='flag that enables use of prioritized experience replay')
parser.add_argument('--skip_save_to_disk', action='store_true', help='flag to skip saving agent params to disk during training')
parser.add_argument('--skip_log_to_disk', action='store_true', help='flag to skip saving agent performance logs to disk during training')
parser.add_argument('--log_std_low', type=float, default=(- 10), help='Lower bound for log std of action distribution.')
parser.add_argument('--log_std_high', type=float, default=2, help='Upper bound for log std of action distribution.')
parser.add_argument('--random_ensemble_size', type=int, default=2, help='How many random critic networks to use per TD target computation. The REDQ paper calls this variable M')
parser.add_argument('--critic_ensemble_size', type=int, default=10, help='How many critic networks to sample from on each TD target computation. This it the total size of the critic ensemble. The REDQ paper calls this variable N') |
('torch.cuda.device_count', return_value=1)
('torch.cuda.set_device')
('torch.distributed.init_process_group')
('subprocess.getoutput', return_value='127.0.0.1')
def test_init_dist(mock_getoutput, mock_dist_init, mock_set_device, mock_device_count):
with pytest.raises(ValueError):
init_dist('invaliad_launcher')
os.environ['SLURM_PROCID'] = '0'
os.environ['SLURM_NTASKS'] = '1'
os.environ['SLURM_NODELIST'] = '[0]'
init_dist('slurm')
assert (os.environ['MASTER_PORT'] == '29500')
assert (os.environ['MASTER_ADDR'] == '127.0.0.1')
assert (os.environ['WORLD_SIZE'] == '1')
assert (os.environ['RANK'] == '0')
mock_set_device.assert_called_with(0)
mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1')
mock_dist_init.assert_called_with(backend='nccl')
init_dist('slurm', port=29505)
assert (os.environ['MASTER_PORT'] == '29505')
assert (os.environ['MASTER_ADDR'] == '127.0.0.1')
assert (os.environ['WORLD_SIZE'] == '1')
assert (os.environ['RANK'] == '0')
mock_set_device.assert_called_with(0)
mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1')
mock_dist_init.assert_called_with(backend='nccl')
init_dist('slurm')
assert (os.environ['MASTER_PORT'] == '29505')
assert (os.environ['MASTER_ADDR'] == '127.0.0.1')
assert (os.environ['WORLD_SIZE'] == '1')
assert (os.environ['RANK'] == '0')
mock_set_device.assert_called_with(0)
mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1')
mock_dist_init.assert_called_with(backend='nccl') |
class CrossEntropyLoss(_Loss):
def forward(self, x, y):
assert (x.size() == y.size()), 'input and target must have the same size'
return x.cross_entropy(y, skip_forward=self.skip_forward) |
class PNet(nn.Layer):
def __init__(self):
super(PNet, self).__init__()
self.features = nn.Sequential(OrderedDict([('conv1', nn.Conv2D(3, 10, 3, 1)), ('prelu1', nn.PReLU(10)), ('pool1', nn.MaxPool2D(2, 2, ceil_mode=True)), ('conv2', nn.Conv2D(10, 16, 3, 1)), ('prelu2', nn.PReLU(16)), ('conv3', nn.Conv2D(16, 32, 3, 1)), ('prelu3', nn.PReLU(32))]))
self.conv4_1 = nn.Conv2D(32, 2, 1, 1)
self.conv4_2 = nn.Conv2D(32, 4, 1, 1)
weights = np.load('./pnet.npy', allow_pickle=True)[()]
for (n, p) in self.named_parameters():
p.data = paddle.to_tensor(weights[n])
def forward(self, x):
x = self.features(x)
a = self.conv4_1(x)
b = self.conv4_2(x)
a = F.softmax(a)
return (b, a) |
class GraspSamplerGAN(GraspSampler):
def __init__(self, model_scale, pointnet_radius, pointnet_nclusters, latent_size=2, device='cpu'):
super(GraspSamplerGAN, self).__init__(latent_size, device)
self.create_decoder(model_scale, pointnet_radius, pointnet_nclusters, (latent_size + 3))
def sample_latent(self, batch_size):
return torch.rand(batch_size, self.latent_size).to(self.device)
def forward(self, pc, grasps=None, train=True):
z = self.sample_latent(pc.shape[0])
return self.decode(pc, z)
def generate_grasps(self, pc, z=None):
if (z is None):
z = self.sample_latent(pc.shape[0])
(qt, confidence) = self.decode(pc, z)
return (qt, confidence, z.squeeze())
def generate_dense_latents(self, resolution):
latents = torch.meshgrid(*[torch.linspace(0, 1, resolution) for i in range(self.latent_size)])
return torch.stack([latents[i].flatten() for i in range(len(latents))], dim=(- 1)).to(self.device) |
class decoder(nn.Module):
def __init__(self, d=128):
super(decoder, self).__init__()
self.features = nn.Sequential(nn.Conv2d(12, 32, kernel_size=3, padding=1), nn.ELU(inplace=True), nn.Conv2d(32, 64, kernel_size=3, padding=1), nn.ELU(inplace=True), nn.Conv2d(64, 128, kernel_size=3, padding=1), nn.ELU(inplace=True), nn.Conv2d(128, 256, kernel_size=3, padding=1), nn.ELU(inplace=True), nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.ELU(inplace=True), nn.Conv2d(128, 64, kernel_size=3, padding=1), nn.ELU(inplace=True), nn.Conv2d(64, 32, kernel_size=3, padding=1), nn.ELU(inplace=True), nn.Conv2d(32, 16, kernel_size=3, padding=1), nn.ELU(inplace=True), nn.Conv2d(16, 3, kernel_size=3, padding=1), nn.Sigmoid())
def forward(self, x):
return self.features(x) |
def model2():
f1 = Categorical([[0.23, 0.77]])
f3 = JointCategorical([[0.17, 0.15], [0.4, 0.28]])
f4 = JointCategorical([[0.32, 0.12], [0.08, 0.48]])
f2 = JointCategorical([[[0.1, 0.05, 0.05], [0.15, 0.05, 0.04]], [[0.2, 0.1, 0.05], [0.05, 0.1, 0.06]]])
m1 = Categorical([[0.5, 0.5]])
m2 = Categorical([[(1.0 / 3), (1.0 / 3), (1.0 / 3)]])
m3 = Categorical([[0.5, 0.5]])
m4 = Categorical([[0.5, 0.5]])
model = FactorGraph()
model.add_factor(f1)
model.add_factor(f2)
model.add_factor(f3)
model.add_factor(f4)
model.add_marginal(m1)
model.add_marginal(m2)
model.add_marginal(m3)
model.add_marginal(m4)
model.add_edge(m1, f1)
model.add_edge(m1, f3)
model.add_edge(m1, f4)
model.add_edge(m3, f3)
model.add_edge(m4, f4)
model.add_edge(m3, f2)
model.add_edge(m4, f2)
model.add_edge(m2, f2)
return model |
def optimizer_kwargs(cfg):
return {'optim': cfg.train.optim, 'lr': cfg.train.lr, 'weight_decay': cfg.train.weight_decay, 'momentum': cfg.sgd.momentum, 'sgd_dampening': cfg.sgd.dampening, 'sgd_nesterov': cfg.sgd.nesterov, 'rmsprop_alpha': cfg.rmsprop.alpha, 'adam_beta1': cfg.adam.beta1, 'adam_beta2': cfg.adam.beta2, 'staged_lr': cfg.train.staged_lr, 'new_layers': cfg.train.new_layers, 'base_lr_mult': cfg.train.base_lr_mult} |
def read_data(fname):
lines = [x.strip() for x in open(fname).readlines()]
data = []
label = []
for line in lines:
words = []
tags = []
for pair in line.split(' '):
items = pair.split('/')
words.append(str('/'.join(items[:(- 1)])))
tags.append(standardize(items[(- 1)]))
words = preprocess_tweet(' '.join(words)).split()
data.append(words)
label.append(tags)
return (data, label) |
.script_launch_mode('subprocess')
def test_automate_training(download_functional_test_files, script_runner):
file_config = Path(__data_testing_dir__, 'automate_training_config.json')
file_config_hyper = Path(__data_testing_dir__, 'automate_training_hyperparameter_opt.json')
__output_dir__ = Path(__tmp_dir__, 'results')
ret = script_runner.run('ivadomed_automate_training', '--config', f'{file_config}', '--config-hyper', f'{file_config_hyper}', '--path-data', f'{__data_testing_dir__}', '--output_dir', f'{__output_dir__}')
logger.debug(f'{ret.stdout}')
logger.debug(f'{ret.stderr}')
assert ret.success
assert Path(__output_dir__, 'detailed_results.csv').exists()
assert Path(__output_dir__, 'temporary_results.csv').exists()
assert Path(__output_dir__, 'average_eval.csv').exists()
check_sha256(str(file_config)) |
def score_target_hypo(args, a, b, c, lenpen, target_outfile, hypo_outfile, write_hypos, normalize):
print('lenpen', lenpen, 'weight1', a, 'weight2', b, 'weight3', c)
(gen_output_lst, bitext1_lst, bitext2_lst, lm_res_lst) = load_score_files(args)
dict = dictionary.Dictionary()
scorer = scorer = bleu.Scorer(bleu.BleuConfig(pad=dict.pad(), eos=dict.eos(), unk=dict.unk()))
ordered_hypos = {}
ordered_targets = {}
for shard_id in range(len(bitext1_lst)):
bitext1 = bitext1_lst[shard_id]
bitext2 = bitext2_lst[shard_id]
gen_output = gen_output_lst[shard_id]
lm_res = lm_res_lst[shard_id]
total = len(bitext1.rescore_source.keys())
source_lst = []
hypo_lst = []
score_lst = []
reference_lst = []
j = 1
best_score = (- math.inf)
for i in range(total):
target_len = len(bitext1.rescore_hypo[i].split())
if (lm_res is not None):
lm_score = lm_res.score[i]
else:
lm_score = 0
if (bitext2 is not None):
bitext2_score = bitext2.rescore_score[i]
bitext2_backwards = bitext2.backwards
else:
bitext2_score = None
bitext2_backwards = None
score = rerank_utils.get_score(a, b, c, target_len, bitext1.rescore_score[i], bitext2_score, lm_score=lm_score, lenpen=lenpen, src_len=bitext1.source_lengths[i], tgt_len=bitext1.target_lengths[i], bitext1_backwards=bitext1.backwards, bitext2_backwards=bitext2_backwards, normalize=normalize)
if (score > best_score):
best_score = score
best_hypo = bitext1.rescore_hypo[i]
if ((j == gen_output.num_hypos[i]) or (j == args.num_rescore)):
j = 1
hypo_lst.append(best_hypo)
score_lst.append(best_score)
source_lst.append(bitext1.rescore_source[i])
reference_lst.append(bitext1.rescore_target[i])
best_score = (- math.inf)
best_hypo = ''
else:
j += 1
gen_keys = list(sorted(gen_output.no_bpe_target.keys()))
for key in range(len(gen_keys)):
if (args.prefix_len is None):
assert (hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]]), ((((('pred and rescore hypo mismatch: i: ' + str(key)) + ', ') + str(hypo_lst[key])) + str(gen_keys[key])) + str(gen_output.no_bpe_hypo[key]))
sys_tok = dict.encode_line(hypo_lst[key])
ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]])
scorer.add(ref_tok, sys_tok)
else:
full_hypo = rerank_utils.get_full_from_prefix(hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]])
sys_tok = dict.encode_line(full_hypo)
ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]])
scorer.add(ref_tok, sys_tok)
if write_hypos:
for key in range(len(gen_output.no_bpe_target)):
if (args.prefix_len is None):
assert (hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]]), (((('pred and rescore hypo mismatch:' + 'i:') + str(key)) + str(hypo_lst[key])) + str(gen_output.no_bpe_hypo[key]))
ordered_hypos[gen_keys[key]] = hypo_lst[key]
ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[gen_keys[key]]
else:
full_hypo = rerank_utils.get_full_from_prefix(hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]])
ordered_hypos[gen_keys[key]] = full_hypo
ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[gen_keys[key]]
if (args.num_shards == len(bitext1_lst)):
with open(target_outfile, 'w') as t:
with open(hypo_outfile, 'w') as h:
for key in range(len(ordered_hypos)):
t.write(ordered_targets[key])
h.write(ordered_hypos[key])
res = scorer.result_string(4)
if write_hypos:
print(res)
score = rerank_utils.parse_bleu_scoring(res)
return score |
def log_validation(text_encoder, tokenizer, prior, args, accelerator, weight_dtype, epoch):
logger.info('Running validation... ')
pipeline = AutoPipelineForText2Image.from_pretrained(args.pretrained_decoder_model_name_or_path, prior=accelerator.unwrap_model(prior), prior_text_encoder=accelerator.unwrap_model(text_encoder), prior_tokenizer=tokenizer, torch_dtype=weight_dtype)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
if (args.seed is None):
generator = None
else:
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
images = []
for i in range(len(args.validation_prompts)):
with torch.cuda.amp.autocast():
image = pipeline(args.validation_prompts[i], prior_timesteps=DEFAULT_STAGE_C_TIMESTEPS, generator=generator, height=args.resolution, width=args.resolution).images[0]
images.append(image)
for tracker in accelerator.trackers:
if (tracker.name == 'tensorboard'):
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images('validation', np_images, epoch, dataformats='NHWC')
elif (tracker.name == 'wandb'):
tracker.log({'validation': [wandb.Image(image, caption=f'{i}: {args.validation_prompts[i]}') for (i, image) in enumerate(images)]})
else:
logger.warn(f'image logging not implemented for {tracker.name}')
del pipeline
torch.cuda.empty_cache()
return images |
class Wire():
def __init__(self, tile, index):
self.tile = tile
self.index = index
self.data = tile.get_wire_data(index)
def name(self):
return self.data.name
def intent(self):
return self.data.intent
def node(self):
if (self.index not in self.tile.wire_to_node):
self.tile.wire_to_node[self.index] = Node(self.tile, [self])
return self.tile.wire_to_node[self.index]
def is_gnd(self):
return ('GND_WIRE' in self.name())
def is_vcc(self):
return ('VCC_WIRE' in self.name())
def resistance(self):
return self.data.resistance
def capacitance(self):
return self.data.capacitance |
def train_val_split(dataset, val_frac):
indices = np.arange(len(dataset))
np.random.shuffle(indices)
val_size = int(np.round((len(dataset) * val_frac)))
(train_indices, val_indices) = (indices[val_size:], indices[:val_size])
(train_data, val_data) = (Subset(dataset, train_indices), Subset(dataset, val_indices))
return (train_data, val_data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.