code stringlengths 101 5.91M |
|---|
def test_orbits_method_returntype_scalar():
from galpy.orbit import Orbit
o = Orbit([[(10.0 * units.kpc), (((- 20.0) * units.km) / units.s), ((210.0 * units.km) / units.s), (500.0 * units.pc), (((- 12.0) * units.km) / units.s), (45.0 * units.deg)], [((- 20.0) * units.kpc), ((10.0 * units.km) / units.s), ((230.0 * units.km) / units.s), ((- 300.0) * units.pc), ((12.0 * units.km) / units.s), (125.0 * units.deg)]])
from galpy.potential import MWPotential2014
assert isinstance(o.E(pot=MWPotential2014), units.Quantity), 'Orbit method E does not return Quantity when it should'
assert isinstance(o.ER(pot=MWPotential2014), units.Quantity), 'Orbit method ER does not return Quantity when it should'
assert isinstance(o.Ez(pot=MWPotential2014), units.Quantity), 'Orbit method Ez does not return Quantity when it should'
assert isinstance(o.Jacobi(pot=MWPotential2014), units.Quantity), 'Orbit method Jacobi does not return Quantity when it should'
assert isinstance(o.L(), units.Quantity), 'Orbit method L does not return Quantity when it should'
assert isinstance(o.Lz(), units.Quantity), 'Orbit method Lz does not return Quantity when it should'
assert isinstance(o.rap(pot=MWPotential2014, analytic=True), units.Quantity), 'Orbit method rap does not return Quantity when it should'
assert isinstance(o.rperi(pot=MWPotential2014, analytic=True), units.Quantity), 'Orbit method rperi does not return Quantity when it should'
assert isinstance(o.rguiding(pot=MWPotential2014), units.Quantity), 'Orbit method rguiding does not return Quantity when it should'
assert isinstance(o.rE(pot=MWPotential2014), units.Quantity), 'Orbit method rE does not return Quantity when it should'
assert isinstance(o.LcE(pot=MWPotential2014), units.Quantity), 'Orbit method LcE does not return Quantity when it should'
assert isinstance(o.zmax(pot=MWPotential2014, analytic=True), units.Quantity), 'Orbit method zmax does not return Quantity when it should'
assert isinstance(o.jr(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method jr does not return Quantity when it should'
assert isinstance(o.jp(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method jp does not return Quantity when it should'
assert isinstance(o.jz(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method jz does not return Quantity when it should'
assert isinstance(o.wr(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method wr does not return Quantity when it should'
assert isinstance(o.wp(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method wp does not return Quantity when it should'
assert isinstance(o.wz(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method wz does not return Quantity when it should'
assert isinstance(o.Tr(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method Tr does not return Quantity when it should'
assert isinstance(o.Tp(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method Tp does not return Quantity when it should'
assert isinstance(o.Tz(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method Tz does not return Quantity when it should'
assert isinstance(o.Or(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method Or does not return Quantity when it should'
assert isinstance(o.Op(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method Op does not return Quantity when it should'
assert isinstance(o.Oz(pot=MWPotential2014, type='staeckel', delta=0.5), units.Quantity), 'Orbit method Oz does not return Quantity when it should'
assert isinstance(o.time(), units.Quantity), 'Orbit method time does not return Quantity when it should'
assert isinstance(o.R(), units.Quantity), 'Orbit method R does not return Quantity when it should'
assert isinstance(o.r(), units.Quantity), 'Orbit method r does not return Quantity when it should'
assert isinstance(o.vR(), units.Quantity), 'Orbit method vR does not return Quantity when it should'
assert isinstance(o.vT(), units.Quantity), 'Orbit method vT does not return Quantity when it should'
assert isinstance(o.z(), units.Quantity), 'Orbit method z does not return Quantity when it should'
assert isinstance(o.vz(), units.Quantity), 'Orbit method vz does not return Quantity when it should'
assert isinstance(o.phi(), units.Quantity), 'Orbit method phi does not return Quantity when it should'
assert isinstance(o.vphi(), units.Quantity), 'Orbit method vphi does not return Quantity when it should'
assert isinstance(o.x(), units.Quantity), 'Orbit method x does not return Quantity when it should'
assert isinstance(o.y(), units.Quantity), 'Orbit method y does not return Quantity when it should'
assert isinstance(o.vx(), units.Quantity), 'Orbit method vx does not return Quantity when it should'
assert isinstance(o.vy(), units.Quantity), 'Orbit method vy does not return Quantity when it should'
assert isinstance(o.ra(), units.Quantity), 'Orbit method ra does not return Quantity when it should'
assert isinstance(o.dec(), units.Quantity), 'Orbit method dec does not return Quantity when it should'
assert isinstance(o.ll(), units.Quantity), 'Orbit method ll does not return Quantity when it should'
assert isinstance(o.bb(), units.Quantity), 'Orbit method bb does not return Quantity when it should'
assert isinstance(o.dist(), units.Quantity), 'Orbit method dist does not return Quantity when it should'
assert isinstance(o.pmra(), units.Quantity), 'Orbit method pmra does not return Quantity when it should'
assert isinstance(o.pmdec(), units.Quantity), 'Orbit method pmdec does not return Quantity when it should'
assert isinstance(o.pmll(), units.Quantity), 'Orbit method pmll does not return Quantity when it should'
assert isinstance(o.pmbb(), units.Quantity), 'Orbit method pmbb does not return Quantity when it should'
assert isinstance(o.vlos(), units.Quantity), 'Orbit method vlos does not return Quantity when it should'
assert isinstance(o.vra(), units.Quantity), 'Orbit method vra does not return Quantity when it should'
assert isinstance(o.vdec(), units.Quantity), 'Orbit method vdec does not return Quantity when it should'
assert isinstance(o.vll(), units.Quantity), 'Orbit method vll does not return Quantity when it should'
assert isinstance(o.vbb(), units.Quantity), 'Orbit method vbb does not return Quantity when it should'
assert isinstance(o.helioX(), units.Quantity), 'Orbit method helioX does not return Quantity when it should'
assert isinstance(o.helioY(), units.Quantity), 'Orbit method helioY does not return Quantity when it should'
assert isinstance(o.helioZ(), units.Quantity), 'Orbit method helioZ does not return Quantity when it should'
assert isinstance(o.U(), units.Quantity), 'Orbit method U does not return Quantity when it should'
assert isinstance(o.V(), units.Quantity), 'Orbit method V does not return Quantity when it should'
assert isinstance(o.W(), units.Quantity), 'Orbit method W does not return Quantity when it should'
return None |
class LabelSmoothingLoss(nn.Module, ABC):
def __init__(self, epsilon, reduction='mean'):
super(LabelSmoothingLoss, self).__init__()
self.epsilon = epsilon
self.reduction = reduction
def __call__(self, output_dict, targets):
assert (isinstance(output_dict, dict) and (KEY_OUTPUT in output_dict.keys()))
inputs = output_dict[KEY_OUTPUT]
n = inputs.size()[(- 1)]
log_preds = F.log_softmax(inputs, dim=(- 1))
loss = self.reduce_loss((- log_preds.sum(dim=(- 1))), reduction=self.reduction)
nll = F.nll_loss(log_preds, targets, reduction=self.reduction)
return {KEY_LOSS: self.linear_combination((loss / n), nll, self.epsilon)}
def reduce_loss(self, loss, reduction='mean'):
return (loss.mean() if (reduction == 'mean') else (loss.sum() if (reduction == 'sum') else loss))
def linear_combination(self, x, y, epsilon):
return ((epsilon * x) + ((1 - epsilon) * y)) |
def test_init(g1, g2):
assert (g1.num_v == 4)
assert (g1.num_e == 2)
assert ((0, 1) in g1.e[0])
assert (g1.A[(0, 1)] == 1)
assert ((1, 0) in g1.e_both_side[0])
assert (g1.A[(1, 0)] == 1)
assert (g2.num_v == 4)
assert (g2.num_e == 3)
assert ((0, 3) in g2.e[0])
assert (g2.A[(0, 3)] == 0.5)
assert ((0, 2) in g2.e[0])
assert (g2.A[(0, 2)] == 1)
assert ((3, 0) in g2.e_both_side[0])
assert (g2.A[(3, 0)] == 0.5)
assert ((2, 0) in g2.e_both_side[0])
assert (g2.A[(2, 0)] == 1) |
def make_attention_block(in_planes, reduction, attention_type, **kwargs):
if (attention_type == 'GlobalContextBlock2D'):
return GlobalContextBlock2D(in_channels=in_planes, reduction=reduction)
elif (attention_type == 'SqueezeAndExcitationBlock2D'):
return SqueezeAndExcitationBlock2D(in_channels=in_planes, reduction=reduction, **kwargs)
elif (attention_type == 'NonLocal2DEmbeddedGaussian'):
return NonLocal2DEmbeddedGaussian(in_channels=in_planes)
elif (attention_type == 'SimplifiedNonLocal2DEmbeddedGaussian'):
return SimplifiedNonLocal2DEmbeddedGaussian(in_channels=in_planes)
else:
raise ValueError('no matching type') |
class BertJapaneseTokenizer(BertTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=False, do_word_tokenize=True, do_subword_tokenize=True, word_tokenizer_type='basic', subword_tokenizer_type='wordpiece', never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', mecab_kwargs=None, **kwargs):
super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, do_lower_case=do_lower_case, do_word_tokenize=do_word_tokenize, do_subword_tokenize=do_subword_tokenize, word_tokenizer_type=word_tokenizer_type, subword_tokenizer_type=subword_tokenizer_type, never_split=never_split, mecab_kwargs=mecab_kwargs, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_word_tokenize = do_word_tokenize
self.word_tokenizer_type = word_tokenizer_type
self.lower_case = do_lower_case
self.never_split = never_split
self.mecab_kwargs = copy.deepcopy(mecab_kwargs)
if do_word_tokenize:
if (word_tokenizer_type == 'basic'):
self.word_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=False)
elif (word_tokenizer_type == 'mecab'):
self.word_tokenizer = MecabTokenizer(do_lower_case=do_lower_case, never_split=never_split, **(mecab_kwargs or {}))
else:
raise ValueError(f"Invalid word_tokenizer_type '{word_tokenizer_type}' is specified.")
self.do_subword_tokenize = do_subword_tokenize
self.subword_tokenizer_type = subword_tokenizer_type
if do_subword_tokenize:
if (subword_tokenizer_type == 'wordpiece'):
self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
elif (subword_tokenizer_type == 'character'):
self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=self.unk_token)
else:
raise ValueError(f"Invalid subword_tokenizer_type '{subword_tokenizer_type}' is specified.")
def do_lower_case(self):
return self.lower_case
def __getstate__(self):
state = dict(self.__dict__)
if (self.word_tokenizer_type == 'mecab'):
del state['word_tokenizer']
return state
def __setstate__(self, state):
self.__dict__ = state
if (self.word_tokenizer_type == 'mecab'):
self.word_tokenizer = MecabTokenizer(do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.mecab_kwargs or {}))
def _tokenize(self, text):
if self.do_word_tokenize:
tokens = self.word_tokenizer.tokenize(text, never_split=self.all_special_tokens)
else:
tokens = [text]
if self.do_subword_tokenize:
split_tokens = [sub_token for token in tokens for sub_token in self.subword_tokenizer.tokenize(token)]
else:
split_tokens = tokens
return split_tokens |
class TestBinaryOp(unittest.TestCase):
def setUpClass(self):
pass
def tearDownClass(self):
pass
def test_binary_op(self):
model = Graph()
input_tensors = [Tensor(name='any', source_op=[], dest_op=['anyop'])]
output_tensors = [Tensor(name='any_out', source_op=['anyop'], dest_op=['mul'])]
any_node = util.construct_node('anyop', 'AnyOpTest', input_tensors=input_tensors, output_tensors=output_tensors)
input_tensors = [Tensor(name='any_out', source_op=['anyop'], dest_op=['mul']), Tensor(name='mul_1', data=np.array(1).astype('int64'), shape=[])]
output_tensors = [Tensor(name='mul_out', source_op=['mul'], dest_op=['not'])]
Mul_node = util.construct_node('mul', 'Mul', input_tensors=input_tensors, output_tensors=output_tensors)
input_tensors = [Tensor(name='mul_out', source_op=['mul'], dest_op=['not'])]
output_tensors = [Tensor(name='not_out', source_op=['not'], dest_op=['neg'])]
Not_node = util.construct_node('not', 'Not', input_tensors=input_tensors, output_tensors=output_tensors)
input_tensors = [Tensor(name='not_out', source_op=['not'], dest_op=['neg'])]
output_tensors = [Tensor(name='neg_out', source_op=['neg'], dest_op=[])]
Neg_node = util.construct_node('neg', 'Neg', input_tensors=input_tensors, output_tensors=output_tensors)
Mul_node.set_attr('onnxruntime', None)
Not_node.set_attr('onnxruntime', None)
Neg_node.set_attr('onnxruntime', None)
model.insert_nodes(0, [any_node, Mul_node, Not_node, Neg_node])
config = {'architecture': 'Transformers', 'layer': 3}
model.framework_modeling_config = config
config_1 = model.framework_modeling_config
val_0 = model.inquire_config_item('layer')
val_1 = config_1['layer']
id_0 = model.get_node_id('not')
model.rename_node('not', 'not_new')
id_1 = model.get_node_id('not_new')
self.assertEqual(any_node.op_type, 'AnyOpTest')
self.assertEqual(Mul_node.input_tensors[1].data.dtype, np.float32)
self.assertEqual(len(Not_node.input_tensors), 2)
self.assertEqual(len(Neg_node.input_tensors), 2)
self.assertEqual(val_0, val_1)
self.assertEqual(id_0, id_1) |
class Kukaiiwa(Robot):
def __init__(self, name: str, id_num: int, world, sim_step: float, use_physics_sim: bool, base_position: Union[(list, np.ndarray)], base_orientation: Union[(list, np.ndarray)], resting_angles: Union[(list, np.ndarray)], control_mode: Union[(int, str)], ik_xyz_delta: float=0.005, ik_rpy_delta: float=0.005, jt_joint_delta: float=0.5, joint_velocities_overwrite: Union[(float, List)]=1, joint_limits_overwrite: Union[(float, List)]=1, controlled_joints: list=[], self_collision: bool=True):
super().__init__(name, id_num, world, sim_step, use_physics_sim, base_position, base_orientation, resting_angles, control_mode, ik_xyz_delta, ik_rpy_delta, jt_joint_delta, joint_velocities_overwrite, joint_limits_overwrite, controlled_joints, self_collision)
self.end_effector_link_id = 'lbr_iiwa_link_7'
self.base_link_id = 'lbr_iiwa_link_0'
self.urdf_path = 'robots/predefined/kuka_iiwa/model.urdf' |
def osnet_x1_0_efdmix23_a0d3(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
model = OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[64, 256, 384, 512], loss=loss, efdmix_layers=['conv2', 'conv3'], efdmix_alpha=0.3, **kwargs)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model |
class GoldenFeaturesTransformerOriginal(object):
def __init__(self, features_count=None):
self._new_features = []
self._new_columns = []
self._features_count = features_count
self._scorer = get_logloss_score
self._error = None
def fit(self, X, y):
if self._new_features:
return
if ((self._error is not None) and self._error):
raise Exception(('Golden Features not created due to error (please check errors.md). ' + self._error))
if (X.shape[1] == 0):
self._error = f'Golden Features not created. No continous features. Input data shape: {X.shape}, {y.shape}'
raise Exception('Golden Features not created. No continous features.')
start_time = time.time()
combinations = itertools.combinations(X.columns, r=2)
items = [i for i in combinations]
if (len(items) > 250000):
si = np.random.choice(len(items), 250000, replace=False)
items = [items[i] for i in si]
(X_train, X_test, y_train, y_test) = self._subsample(X, y)
for i in range(len(items)):
items[i] += (X_train, y_train, X_test, y_test, self._scorer)
scores = []
with Pool() as p:
scores = p.map(get_score, items)
if (not scores):
self._error = f'Golden Features not created. Empty scores. Input data shape: {X.shape}, {y.shape}'
raise Exception('Golden Features not created. Empty scores.')
result = []
for i in range(len(items)):
if (scores[i][0] is not None):
result += [(items[i][0], items[i][1], 'diff', scores[i][0])]
if (scores[i][1] is not None):
result += [(items[i][0], items[i][1], 'ratio', scores[i][1])]
if (scores[i][2] is not None):
result += [(items[i][1], items[i][0], 'ratio', scores[i][2])]
if (scores[i][3] is not None):
result += [(items[i][1], items[i][0], 'sum', scores[i][3])]
if (scores[i][4] is not None):
result += [(items[i][1], items[i][0], 'multiply', scores[i][4])]
df = pd.DataFrame(result, columns=['feature1', 'feature2', 'operation', 'score'])
df.sort_values(by='score', inplace=True)
new_cols_cnt = np.min([100, np.max([10, int((0.1 * X.shape[1]))])])
if ((self._features_count is not None) and (self._features_count > 0) and (self._features_count < df.shape[0])):
new_cols_cnt = self._features_count
print(self._features_count, new_cols_cnt)
self._new_features = df.head(new_cols_cnt)
for new_feature in self._new_features:
new_col = '_'.join([new_feature['feature1'], new_feature['operation'], new_feature['feature2']])
self._new_columns += [new_col]
print(f'Add Golden Feature: {new_col}')
print(f'Created {len(self._new_features)} Golden Features in {np.round((time.time() - start_time), 2)} seconds.')
def transform(self, X):
for new_feature in self._new_features:
new_col = '_'.join([new_feature['feature1'], new_feature['operation'], new_feature['feature2']])
if (new_feature['operation'] == 'diff'):
X[new_col] = (X[new_feature['feature1']] - X[new_feature['feature2']])
elif (new_feature['operation'] == 'ratio'):
(a, b) = (np.array(X[new_feature['feature1']], dtype=float), np.array(X[new_feature['feature2']], dtype=float))
X[new_col] = np.divide(a, b, out=np.zeros_like(a), where=(b != 0)).reshape((- 1), 1)
elif (new_feature['operation'] == 'sum'):
X[new_col] = (X[new_feature['feature1']] + X[new_feature['feature2']])
elif (new_feature['operation'] == 'multiply'):
X[new_col] = (X[new_feature['feature1']] * X[new_feature['feature2']])
return X
def _subsample(self, X, y):
MAX_SIZE = 10000
TRAIN_SIZE = 2500
shuffle = True
stratify = None
if (X.shape[0] > MAX_SIZE):
stratify = y
(X_train, _, y_train, _) = train_test_split(X, y, train_size=MAX_SIZE, shuffle=shuffle, stratify=stratify, random_state=1)
stratify = y_train
(X_train, X_test, y_train, y_test) = train_test_split(X_train, y_train, train_size=TRAIN_SIZE, shuffle=shuffle, stratify=stratify, random_state=1)
else:
stratify = y
train_size = (X.shape[0] // 4)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, train_size=train_size, shuffle=shuffle, stratify=stratify, random_state=1)
return (X_train, X_test, y_train, y_test) |
def prepare_for_retracing(gm: GraphModule) -> Tuple[(GraphModule, Dict[(str, Any)])]:
attributes = _cache_attributes(gm)
_patch_arguments_(gm, gm.dynamic2static)
return (gm, attributes) |
class GeneratorWithLongSkipsExtraConv(torch.nn.Module):
def __init__(self, input_dim, num_filter, output_dim, num_resnet):
super(GeneratorWithLongSkipsExtraConv, self).__init__()
self.pad = torch.nn.ReflectionPad2d(3)
self.conv1 = ConvBlock(input_dim, num_filter, kernel_size=7, stride=1, padding=0)
self.conv2 = ConvBlock(num_filter, (num_filter * 2))
self.conv3 = ConvBlock((num_filter * 2), (num_filter * 4))
self.resnet_blocks = []
for i in range(num_resnet):
self.resnet_blocks.append(ResnetBlock((num_filter * 4), (num_filter * 4)))
self.resnet_blocks = torch.nn.Sequential(*self.resnet_blocks)
self.deconv1 = DeconvBlock((num_filter * 4), (num_filter * 2))
self.conv_ad1 = ConvBlock(((num_filter * 2) + (num_filter * 2)), (num_filter * 2), stride=1, padding=1)
self.deconv2 = DeconvBlock((num_filter * 2), num_filter)
self.conv_ad2 = ConvBlock((num_filter + num_filter), num_filter, stride=1, padding=1)
self.deconv3 = ConvBlock(num_filter, output_dim, kernel_size=7, stride=1, padding=0, activation='tanh', batch_norm=False)
def forward(self, x):
enc1 = self.conv1(self.pad(x))
enc2 = self.conv2(enc1)
enc3 = self.conv3(enc2)
res = self.resnet_blocks(enc3)
dec1 = torch.cat((self.deconv1(res), enc2), dim=1)
dec1 = self.conv_ad1(dec1)
dec2 = torch.cat((self.deconv2(dec1), enc1), dim=1)
dec2 = self.conv_ad2(dec2)
out = self.deconv3(self.pad(dec2))
return out |
.timeout(30)
def test_init_with_crashed_worker():
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec, scripted_actions=[env.action_space.sample() for _ in range(max_path_length)])
tasks = SetTaskSampler((lambda : GarageEnv(PointEnv())))
n_workers = 2
workers = WorkerFactory(seed=100, max_path_length=max_path_length, n_workers=n_workers)
class CrashingPolicy():
def reset(self, **kwargs):
raise Exception('Intentional subprocess crash')
bad_policy = CrashingPolicy()
sampler = MultiprocessingSampler.from_worker_factory(workers, [policy, bad_policy], envs=tasks.sample(n_workers))
rollouts = sampler.obtain_samples(0, 160, None)
assert (sum(rollouts.lengths) >= 160)
sampler.shutdown_worker()
env.close() |
def mkdir_p(path):
if (not path):
return
try:
os.makedirs(path)
except OSError as exc:
if ((exc.errno == errno.EEXIST) and os.path.isdir(path)):
pass
else:
raise |
class MultiSparseMap3D(genpy.Message):
_md5sum = '2e3d76c98ee3e2b23a422f64965f6418'
_type = 'multi_map_server/MultiSparseMap3D'
_has_header = False
_full_text = "SparseMap3D[] maps\ngeometry_msgs/Pose[] origins\n\n\nMSG: multi_map_server/SparseMap3D\nHeader header\nnav_msgs/MapMetaData info\nVerticalOccupancyGridList[] lists\n\n\n\nMSG: std_msgs/Header\n# Standard metadata for higher-level stamped data types.\n# This is generally used to communicate timestamped data \n# in a particular coordinate frame.\n# \n# sequence ID: consecutively increasing ID \nuint32 seq\n#Two-integer timestamp that is expressed as:\n# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n# time-handling sugar is provided by the client library\ntime stamp\n#Frame this data is associated with\n# 0: no frame\n# 1: global frame\nstring frame_id\n\n\nMSG: nav_msgs/MapMetaData\n# This hold basic information about the characterists of the OccupancyGrid\n\n# The time at which the map was loaded\ntime map_load_time\n# The map resolution [m/cell]\nfloat32 resolution\n# Map width [cells]\nuint32 width\n# Map height [cells]\nuint32 height\n# The origin of the map [m, m, rad]. This is the real-world pose of the\n# cell (0,0) in the map.\ngeometry_msgs/Pose origin\n\nMSG: geometry_msgs/Pose\n# A representation of pose in free space, composed of postion and orientation. \nPoint position\nQuaternion orientation\n\n\nMSG: geometry_msgs/Point\n# This contains the position of a point in free space\nfloat64 x\nfloat64 y\nfloat64 z\n\n\nMSG: geometry_msgs/Quaternion\n# This represents an orientation in free space in quaternion form.\n\nfloat64 x\nfloat64 y\nfloat64 z\nfloat64 w\n\n\nMSG: multi_map_server/VerticalOccupancyGridList\nfloat32 x\nfloat32 y\nint32[] upper\nint32[] lower\nint32[] mass\n\n\n"
__slots__ = ['maps', 'origins']
_slot_types = ['multi_map_server/SparseMap3D[]', 'geometry_msgs/Pose[]']
def __init__(self, *args, **kwds):
if (args or kwds):
super(MultiSparseMap3D, self).__init__(*args, **kwds)
if (self.maps is None):
self.maps = []
if (self.origins is None):
self.origins = []
else:
self.maps = []
self.origins = []
def _get_types(self):
return self._slot_types
def serialize(self, buff):
try:
length = len(self.maps)
buff.write(_struct_I.pack(length))
for val1 in self.maps:
_v1 = val1.header
buff.write(_struct_I.pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack(('<I%sB' % length), length, *_x))
else:
buff.write(struct.pack(('<I%ss' % length), length, _x))
_v3 = val1.info
_v4 = _v3.map_load_time
_x = _v4
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v3
buff.write(_struct_f2I.pack(_x.resolution, _x.width, _x.height))
_v5 = _v3.origin
_v6 = _v5.position
_x = _v6
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v7 = _v5.orientation
_x = _v7
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
length = len(val1.lists)
buff.write(_struct_I.pack(length))
for val2 in val1.lists:
_x = val2
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(val2.upper)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(struct.pack(pattern, *val2.upper))
length = len(val2.lower)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(struct.pack(pattern, *val2.lower))
length = len(val2.mass)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(struct.pack(pattern, *val2.mass))
length = len(self.origins)
buff.write(_struct_I.pack(length))
for val1 in self.origins:
_v8 = val1.position
_x = _v8
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v9 = val1.orientation
_x = _v9
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x)))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x)))))
def deserialize(self, str):
try:
if (self.maps is None):
self.maps = None
if (self.origins is None):
self.origins = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.maps = []
for i in range(0, length):
val1 = multi_map_server.msg.SparseMap3D()
_v10 = val1.header
start = end
end += 4
(_v10.seq,) = _struct_I.unpack(str[start:end])
_v11 = _v10.stamp
_x = _v11
start = end
end += 8
(_x.secs, _x.nsecs) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v10.frame_id = str[start:end].decode('utf-8')
else:
_v10.frame_id = str[start:end]
_v12 = val1.info
_v13 = _v12.map_load_time
_x = _v13
start = end
end += 8
(_x.secs, _x.nsecs) = _struct_2I.unpack(str[start:end])
_x = _v12
start = end
end += 12
(_x.resolution, _x.width, _x.height) = _struct_f2I.unpack(str[start:end])
_v14 = _v12.origin
_v15 = _v14.position
_x = _v15
start = end
end += 24
(_x.x, _x.y, _x.z) = _struct_3d.unpack(str[start:end])
_v16 = _v14.orientation
_x = _v16
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w) = _struct_4d.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.lists = []
for i in range(0, length):
val2 = multi_map_server.msg.VerticalOccupancyGridList()
_x = val2
start = end
end += 8
(_x.x, _x.y) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val2.upper = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val2.lower = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val2.mass = struct.unpack(pattern, str[start:end])
val1.lists.append(val2)
self.maps.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.origins = []
for i in range(0, length):
val1 = geometry_msgs.msg.Pose()
_v17 = val1.position
_x = _v17
start = end
end += 24
(_x.x, _x.y, _x.z) = _struct_3d.unpack(str[start:end])
_v18 = val1.orientation
_x = _v18
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w) = _struct_4d.unpack(str[start:end])
self.origins.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
def serialize_numpy(self, buff, numpy):
try:
length = len(self.maps)
buff.write(_struct_I.pack(length))
for val1 in self.maps:
_v19 = val1.header
buff.write(_struct_I.pack(_v19.seq))
_v20 = _v19.stamp
_x = _v20
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v19.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack(('<I%sB' % length), length, *_x))
else:
buff.write(struct.pack(('<I%ss' % length), length, _x))
_v21 = val1.info
_v22 = _v21.map_load_time
_x = _v22
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v21
buff.write(_struct_f2I.pack(_x.resolution, _x.width, _x.height))
_v23 = _v21.origin
_v24 = _v23.position
_x = _v24
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v25 = _v23.orientation
_x = _v25
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
length = len(val1.lists)
buff.write(_struct_I.pack(length))
for val2 in val1.lists:
_x = val2
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(val2.upper)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(val2.upper.tostring())
length = len(val2.lower)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(val2.lower.tostring())
length = len(val2.mass)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(val2.mass.tostring())
length = len(self.origins)
buff.write(_struct_I.pack(length))
for val1 in self.origins:
_v26 = val1.position
_x = _v26
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v27 = val1.orientation
_x = _v27
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x)))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x)))))
def deserialize_numpy(self, str, numpy):
try:
if (self.maps is None):
self.maps = None
if (self.origins is None):
self.origins = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.maps = []
for i in range(0, length):
val1 = multi_map_server.msg.SparseMap3D()
_v28 = val1.header
start = end
end += 4
(_v28.seq,) = _struct_I.unpack(str[start:end])
_v29 = _v28.stamp
_x = _v29
start = end
end += 8
(_x.secs, _x.nsecs) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v28.frame_id = str[start:end].decode('utf-8')
else:
_v28.frame_id = str[start:end]
_v30 = val1.info
_v31 = _v30.map_load_time
_x = _v31
start = end
end += 8
(_x.secs, _x.nsecs) = _struct_2I.unpack(str[start:end])
_x = _v30
start = end
end += 12
(_x.resolution, _x.width, _x.height) = _struct_f2I.unpack(str[start:end])
_v32 = _v30.origin
_v33 = _v32.position
_x = _v33
start = end
end += 24
(_x.x, _x.y, _x.z) = _struct_3d.unpack(str[start:end])
_v34 = _v32.orientation
_x = _v34
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w) = _struct_4d.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.lists = []
for i in range(0, length):
val2 = multi_map_server.msg.VerticalOccupancyGridList()
_x = val2
start = end
end += 8
(_x.x, _x.y) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val2.upper = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val2.lower = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val2.mass = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
val1.lists.append(val2)
self.maps.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.origins = []
for i in range(0, length):
val1 = geometry_msgs.msg.Pose()
_v35 = val1.position
_x = _v35
start = end
end += 24
(_x.x, _x.y, _x.z) = _struct_3d.unpack(str[start:end])
_v36 = val1.orientation
_x = _v36
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w) = _struct_4d.unpack(str[start:end])
self.origins.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) |
class Segmenter():
def __init__(self):
segm_cfg = Munch.fromDict(rospy.get_param('segmentation'))
segm_model = segmentation_models.DRNSeg(segm_cfg.arch, segm_cfg.data.classes, None, pretrained=True)
segm_model = torch.nn.DataParallel(segm_model).cuda()
cudnn.benchmark = True
resume_path = rospy.get_param('segmentation_checkpoint')
checkpoint = torch.load(resume_path)
segm_model.load_state_dict(checkpoint['state_dict'])
segm_model.eval()
rospy.loginfo("=> loaded checkpoint '{}' (epoch {})".format(resume_path, checkpoint['epoch']))
self.segm_model = segm_model
self.transform = transforms.ToTensor()
self.segmentation_alpha = rospy.get_param('segmentation_alpha')
self.cmap = (255.0 * np.array(map(colors.to_rgb, rospy.get_param('object_colors'))))
self.image_sub = message_filters.Subscriber(rospy.get_param('image_sub_topic'), Image)
self.info_sub = message_filters.Subscriber(rospy.get_param('info_sub_topic'), CameraInfo)
self.ts = message_filters.TimeSynchronizer([self.image_sub, self.info_sub], queue_size=1)
self.ts.registerCallback(self.callback)
self.bridge = CvBridge()
self.image_pub = rospy.Publisher(rospy.get_param('segmentation_pub_topic'), Image, queue_size=1)
def callback(self, image, camera_info):
start_time = time.time()
try:
cv_image = self.bridge.imgmsg_to_cv2(image, 'bgr8')
except CvBridgeError as e:
rospy.logerr(e)
np_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
with torch.no_grad():
input = self.transform(np_image).unsqueeze(0)
output = self.segm_model(input).max(1)[1]
prediction = output.cpu().squeeze(0).numpy()
np_image = (((1 - self.segmentation_alpha) * np_image) + (self.segmentation_alpha * self.cmap[prediction]))
cv_image = cv2.cvtColor(np_image.astype(np.uint8), cv2.COLOR_RGB2BGR)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, 'bgr8'))
except CvBridgeError as e:
rospy.logerr(e)
rospy.loginfo('callback time: {}ms'.format(int((1000 * (time.time() - start_time))))) |
def test(loader, model, criterion, epoch, noise_sd, device, writer=None, print_freq=10):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
model.eval()
with torch.no_grad():
for (i, (inputs, targets)) in enumerate(loader):
data_time.update((time.time() - end))
(inputs, targets) = (inputs.to(device), targets.to(device))
inputs = (inputs + (torch.randn_like(inputs, device=device) * noise_sd))
outputs = model(inputs)
loss = criterion(outputs, targets)
(acc1, acc5) = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(acc1.item(), inputs.size(0))
top5.update(acc5.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % print_freq) == 0):
print('Test: [{0}/{1}]\tTime {batch_time.avg:.3f}\tData {data_time.avg:.3f}\tLoss {loss.avg:.4f}\ {top1.avg:.3f}\ {top5.avg:.3f}'.format(i, len(loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5))
if writer:
writer.add_scalar('loss/test', losses.avg, epoch)
writer.add_scalar('accuracy/', top1.avg, epoch)
writer.add_scalar('accuracy/', top5.avg, epoch)
return (losses.avg, top1.avg) |
class _ResBlockSR(nn.Module):
def __init__(self, inchannel, outchannel, stride=1):
super(_ResBlockSR, self).__init__()
self.layers = nn.Sequential(nn.Conv2d(inchannel, outchannel, 3, stride, 1, bias=True), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(outchannel, outchannel, 3, stride, 1, bias=True))
for i in self.modules():
if isinstance(i, nn.Conv2d):
j = ((i.kernel_size[0] * i.kernel_size[1]) * i.out_channels)
i.weight.data.normal_(0, math.sqrt((2 / j)))
if (i.bias is not None):
i.bias.data.zero_()
def forward(self, x):
out = self.layers(x)
residual = x
out = torch.add(residual, out)
return out |
_module
class Compose(object):
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
if (transform['type'] == 'Empty'):
continue
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, res, info):
for t in self.transforms:
(res, info) = t(res, info)
if (res is None):
return None
return (res, info)
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string |
_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1, outputs_collections=None, scope=None):
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if (depth == depth_in):
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(inputs, depth, [1, 1], stride=stride, activation_fn=None, scope='shortcut')
residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1, activation_fn=None, scope='conv3')
output = tf.nn.relu((shortcut + residual))
return slim.utils.collect_named_outputs(outputs_collections, sc.original_name_scope, output) |
def safe_convert_to_torch_tensor(x, device=None):
def mapping(item):
if torch.is_tensor(item):
return (item if (device is None) else item.to(device))
elif isinstance(item, RepeatedValues):
return RepeatedValues(tree.map_structure(mapping, item.values), item.lengths, item.max_len)
np_item = np.asarray(item).copy()
np_item.setflags(write=1)
tensor = torch.from_numpy(np_item)
if (tensor.dtype == torch.double):
tensor = tensor.float()
return (tensor if (device is None) else tensor.to(device))
return tree.map_structure(mapping, x) |
def find_most_similar(input_str, list_strings):
if (input_str == 'all'):
return list(range(len(pde_list)))
substrings = re.split('\\W+', input_str)
result_indices = []
for substring in substrings:
distances = [lev.distance(substring, list_string) for list_string in list_strings]
result_indices.append(distances.index(min(distances)))
return result_indices |
(interaction_name=str, receiver='Component', supplier='Component', dt_rungs=dict, rank_supplier='int', only_supply='bint', pairing_level=str, tile_indices_receiver='Py_ssize_t[::1]', tile_indices_supplier_paired='Py_ssize_t**', tile_indices_supplier_paired_N='Py_ssize_t*', extra_args=dict, apply_to_i='bint', apply_to_j='bint', factor_i='double', factor_j='double', factors='const double*', forcex_ij='double', forcey_ij='double', forcez_ij='double', indexp_j='Py_ssize_t', indexx_i='Py_ssize_t', indexx_j='Py_ssize_t', particle_particle_t_begin='double', particle_particle_t_final='double', periodic_offset_x='double', periodic_offset_y='double', periodic_offset_z='double', r2='double', r2_index_scaling='double', r2_max='double', rung_index_i='signed char', rung_index_j='signed char', rung_index_s='signed char', rung_indices_jumped_s='signed char*', shortrange_factor='double', shortrange_index='Py_ssize_t', softening='double', subtile_contain_jumping_s='bint', subtiling_r='Tiling', table='const double*', total_factor='double', x_ji='double', y_ji='double', z_ji='double', mom_r='double*', mom_s='double*', momx='double', momy='double', momz='double', returns='void')
def gravity_pairwise_shortrange(interaction_name, receiver, supplier, dt_rungs, rank_supplier, only_supply, pairing_level, tile_indices_receiver, tile_indices_supplier_paired, tile_indices_supplier_paired_N, extra_args):
mom_r = receiver.mom
mom_s = supplier.mom
rung_indices_jumped_s = supplier.rung_indices_jumped
softening = combine_softening_lengths(receiver.softening_length, supplier.softening_length)
table = get_shortrange_table(softening)
factors = compute_factors(receiver, supplier, dt_rungs)
r2_max = R[(shortrange_range ** 2)]
r2_index_scaling = R[((shortrange_table_size - 1) / shortrange_table_maxr2)]
indexp_j = (- 1)
for (indexx_i, indexp_j, indexx_j, rung_index_i, rung_index_s, x_ji, y_ji, z_ji, periodic_offset_x, periodic_offset_y, periodic_offset_z, apply_to_i, apply_to_j, factor_i, subtile_contain_jumping_s, particle_particle_t_begin, subtiling_r) in particle_particle(receiver, supplier, pairing_level, tile_indices_receiver, tile_indices_supplier_paired, tile_indices_supplier_paired_N, rank_supplier, interaction_name, only_supply, factors, forcerange=shortrange_range):
with unswitch(6):
if (periodic_offset_x or periodic_offset_y or periodic_offset_z):
x_ji += periodic_offset_x
y_ji += periodic_offset_y
z_ji += periodic_offset_z
r2 = (((x_ji ** 2) + (y_ji ** 2)) + (z_ji ** 2))
if (r2 > r2_max):
continue
shortrange_index = int((r2 * r2_index_scaling))
shortrange_factor = table[shortrange_index]
with unswitch(3):
if apply_to_i:
total_factor = (factor_i * shortrange_factor)
momx = (x_ji * total_factor)
momy = (y_ji * total_factor)
momz = (z_ji * total_factor)
mom_r[(indexx_i + 0)] += momx
mom_r[(indexx_i + 1)] += momy
mom_r[(indexx_i + 2)] += momz
with unswitch(8):
if B[(not only_supply)]:
with unswitch(2):
if apply_to_j:
with unswitch(4):
if subtile_contain_jumping_s:
rung_index_j = rung_indices_jumped_s[indexp_j]
else:
rung_index_j = rung_index_s
with unswitch(3):
if apply_to_i:
if (rung_index_i == rung_index_j):
mom_s[(indexx_j + 0)] -= momx
mom_s[(indexx_j + 1)] -= momy
mom_s[(indexx_j + 2)] -= momz
continue
factor_j = factors[rung_index_j]
total_factor = (factor_j * shortrange_factor)
mom_s[(indexx_j + 0)] -= (x_ji * total_factor)
mom_s[(indexx_j + 1)] -= (y_ji * total_factor)
mom_s[(indexx_j + 2)] -= (z_ji * total_factor)
if (indexp_j != (- 1)):
particle_particle_t_final = time()
subtiling_r.computation_time += (particle_particle_t_final - particle_particle_t_begin) |
class LowRankAdapter(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.input_dim = config.input_dim
self.down_sample_size = (self.input_dim // config.reduction_factor)
self.activation = Activations(config.non_linearity.lower())
self.down_sampler = LowRankLinear(self.input_dim, self.down_sample_size, w_init=config.low_rank_w_init, rank=config.low_rank_rank)
self.up_sampler = LowRankLinear(self.down_sample_size, self.input_dim, w_init=config.low_rank_w_init, rank=config.low_rank_rank)
self.track_z = config.track_z
def forward(self, x):
z = self.down_sampler(x)
z = self.activation(z)
if self.track_z:
self.z = z
output = self.up_sampler(z)
return output |
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for (batch_idx, (inputs, inputs1, inputs2, inputs3, targets, targets1, targets2, targets3, path)) in enumerate(testloader):
(inputs, inputs1, targets, targets1) = (inputs.to(device), inputs1.to(device), targets.to(device), targets1.to(device))
(inputs2, inputs3, targets2, targets3) = (inputs2.to(device), inputs3.to(device), targets2.to(device), targets3.to(device))
outputs = net(inputs)
outputs1 = net(inputs1)
outputs2 = net(inputs2)
outputs3 = net(inputs3)
loss1 = criterion(outputs, targets)
loss2 = criterion(outputs1, targets1)
loss3 = criterion(outputs2, targets2)
loss4 = criterion(outputs3, targets3)
loss = ((((loss1 + loss2) + loss3) + loss4) / 4.0)
test_loss += loss.item()
(_, predicted) = outputs.max(1)
(_, predicted1) = outputs1.max(1)
(_, predicted2) = outputs2.max(1)
(_, predicted3) = outputs3.max(1)
total += (targets.size(0) * 4)
correct += predicted.eq(targets).sum().item()
correct += predicted1.eq(targets1).sum().item()
correct += predicted2.eq(targets2).sum().item()
correct += predicted3.eq(targets3).sum().item()
progress_bar(batch_idx, len(testloader), ('Loss: %.3f | Acc: %.3f%% (%d/%d)' % ((test_loss / (batch_idx + 1)), ((100.0 * correct) / total), correct, total)))
acc = ((100.0 * correct) / total)
with open('./best_rotation.txt', 'a') as f:
f.write((((str(acc) + ':') + str(epoch)) + '\n'))
if (acc > best_acc):
print('Saving..')
state = {'net': net.state_dict(), 'acc': acc, 'epoch': epoch}
if (not os.path.isdir('checkpoint')):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/rotation.pth')
best_acc = acc |
def rejection_sampling(command, seed=0):
proc = sp.run(command, stdout=sp.PIPE)
facet_list = []
for line in proc.stdout.decode().split('\n')[1:(- 1)]:
if (line.find('#') == 0):
(yield facet_list)
facet_list = []
else:
facet_list.append([int(x) for x in line.strip().split()])
(yield facet_list) |
_CODERS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
def __init__(self, **kwargs):
super(BaseBBoxCoder, self).__init__(**kwargs)
def encode(self, bboxes, gt_bboxes):
return gt_bboxes
def decode(self, bboxes, pred_bboxes):
return pred_bboxes |
def hernquist_ppf(r, a_scale=1.0):
ppf = (((a_scale - (a_scale * r)) + np.sqrt(((a_scale ** 2) - (r * (a_scale ** 2))))) / r)
return ppf |
def main(args, override_args=None):
utils.import_user_module(args)
assert ((args.max_tokens is not None) or (args.max_sentences is not None)), 'Must specify batch size either with --max-tokens or --max-sentences'
use_fp16 = args.fp16
use_cuda = (torch.cuda.is_available() and (not args.cpu))
if (override_args is not None):
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))
else:
overrides = None
logger.info('loading model(s) from {}'.format(args.path))
(models, model_args, task) = checkpoint_utils.load_model_ensemble_and_task([args.path], arg_overrides=overrides, suffix=getattr(args, 'checkpoint_suffix', ''))
model = models[0]
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
logger.info(model_args)
criterion = task.build_criterion(model_args)
criterion.eval()
for subset in args.valid_subset.split(','):
try:
task.load_dataset(subset, combine=False, epoch=1)
dataset = task.dataset(subset)
except KeyError:
raise Exception(('Cannot find dataset: ' + subset))
itr = task.get_batch_iterator(dataset=dataset, max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions(task.max_positions(), *[m.max_positions() for m in models]), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, seed=args.seed, num_workers=args.num_workers).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(itr, log_format=args.log_format, log_interval=args.log_interval, prefix=f"valid on '{subset}' subset", default_log_format=('tqdm' if (not args.no_progress_bar) else 'simple'))
log_outputs = []
for (i, sample) in enumerate(progress):
sample = (utils.move_to_cuda(sample) if use_cuda else sample)
(_loss, _sample_size, log_output) = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i) |
class ResNetNoPadding(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNetNoPadding, self).__init__()
self.in_planes = 64
self.conv1 = Conv2d_NoPadding(3, 64, kernel_size=7, stride=2, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
print(out.shape)
out = F.avg_pool2d(out, out.shape[3])
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
class ArcsinhFlow(Flow):
def __init__(self, init_a: float, init_b: float, init_c: float, init_d: float, add_init_f0: bool, set_restrictions: bool) -> None:
super(ArcsinhFlow, self).__init__()
self.a = nn.Parameter(torch.tensor(init_a, dtype=cg.dtype))
self.b = nn.Parameter(torch.tensor(init_b, dtype=cg.dtype))
self.c = nn.Parameter(torch.tensor(init_c, dtype=cg.dtype))
self.d = nn.Parameter(torch.tensor(init_d, dtype=cg.dtype))
if add_init_f0:
set_restrictions = True
self.set_restrictions = set_restrictions
self.add_init_f0 = add_init_f0
def asinh(self, f: torch.tensor) -> torch.tensor:
return torch.log((f + (((f ** 2) + 1) ** 0.5)))
def forward(self, f0: torch.tensor, X: torch.tensor=None) -> torch.tensor:
a = self.a
c = self.c
d = self.d
b = self.b
if self.set_restrictions:
d = softplus(d)
b = softplus(b)
fk = (a + (b * self.asinh(((f0 - c) / d))))
if self.add_init_f0:
return (fk + f0)
return fk
def _forward_grad(self, x):
a = self.a
c = self.c
d = self.d
b = self.b
if self.set_restrictions:
d = softplus(d)
b = softplus(b)
return ((b * torch.cosh(((b * self.asinh(x)) - a))) / torch.sqrt((1 + (x ** 2))))
def inverse(self, f: torch.tensor) -> torch.tensor:
b = self.b
d = self.d
if self.set_restrictions:
b = softplus(self.b)
d = softplus(self.d)
return (self.c + (d * torch.sinh(((f - self.a) / b)))) |
class Conv3d_wd(nn.Conv3d):
def __init__(self, in_channels, out_channels, kernel_size, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=True):
super(Conv3d_wd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True).mean(dim=4, keepdim=True)
weight = (weight - weight_mean)
std = torch.sqrt((torch.var(weight.view(weight.size(0), (- 1)), dim=1) + 1e-12)).view((- 1), 1, 1, 1, 1)
weight = (weight / std.expand_as(weight))
return F.conv3d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) |
class DataLoaderX(DataLoader):
def __init__(self, local_rank, **kwargs):
super(DataLoaderX, self).__init__(**kwargs)
self.stream = torch.cuda.Stream(local_rank)
self.local_rank = local_rank
def __iter__(self):
self.iter = super(DataLoaderX, self).__iter__()
self.iter = BackgroundGenerator(self.iter, self.local_rank)
self.preload()
return self
def preload(self):
self.batch = next(self.iter, None)
if (self.batch is None):
return None
with torch.cuda.stream(self.stream):
for k in range(len(self.batch)):
self.batch[k] = self.batch[k].to(device=self.local_rank, non_blocking=True)
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if (batch is None):
raise StopIteration
self.preload()
return batch |
class PytorchRayWorker(TorchRunner):
def __init__(self, model_creator, optimizer_creator, loss_creator=None, metrics=None, scheduler_creator=None, config=None, sync_stats=True, log_level=logging.INFO):
super().__init__(model_creator=model_creator, optimizer_creator=optimizer_creator, loss_creator=loss_creator, metrics=metrics, scheduler_creator=scheduler_creator, config=config, sync_stats=sync_stats, log_level=log_level)
self.backend = 'torch-local'
self.rank = 0
self.size = 0
def setup_horovod(self):
import horovod.torch as hvd
hvd.init()
self.backend = 'horovod'
self.rank = hvd.rank()
self.size = hvd.size()
self.setup_components_horovod()
self.training_models = self.models
self.setup_operator(self.training_models)
def get_node_ip_port(self):
ip = self.get_node_ip()
port = find_free_port()
return (ip, port)
def get_node_ip(self):
return ray._private.services.get_node_ip_address()
def setup_components_horovod(self):
import horovod.torch as hvd
self.logger.debug('Creating model')
self.models = self.model_creator(self.config)
if (not isinstance(self.models, Iterable)):
self.models = [self.models]
else:
invalidInputError(False, 'only support single model for now')
invalidInputError(all((isinstance(model, nn.Module) for model in self.models)), 'All models must be PyTorch models: {}.'.format(self.models))
self.logger.debug('Creating optimizer.')
self.optimizers = self.optimizer_creator(self.given_models, self.config)
if (not isinstance(self.optimizers, Iterable)):
hvd.broadcast_parameters(self.models[0].state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.optimizers, root_rank=0)
parameters = self.models[0].named_parameters()
self.optimizers = hvd.DistributedOptimizer(self.optimizers, named_parameters=parameters)
self.optimizers = [self.optimizers]
else:
invalidInputError(False, 'only support one optimizer for now')
self._create_schedulers_if_available()
self._create_loss()
def predict(self, data_creator, batch_size=32, profile=False, callbacks=None):
config = copy.copy(self.config)
self._toggle_profiling(profile=profile)
shards_ref = data_creator(config, batch_size)
if isinstance(shards_ref, IterableDataset):
pred_stats = super().predict(partition=shards_ref, batch_size=batch_size, profile=profile, callbacks=callbacks)
for pred_stat in pred_stats:
pred_stat.update(pred_stat)
worker_stats = pred_stat['prediction']
else:
if (not isinstance(shards_ref, ray.ObjectID)):
invalidInputError(False, 'Only xshards and Ray Dataset is supported for predict')
partition = ray.get(shards_ref)
worker_stats = super().predict(partition=partition, batch_size=batch_size, profile=profile, callbacks=callbacks)
return worker_stats |
def edge_flip(F, FF, FFi, f0, e0, AdjMat_lil):
f1 = int(FF[(f0, e0)])
if (f1 == (- 1)):
assert False
e1 = int(FFi[(f0, e0)])
e01 = ((e0 + 1) % 3)
e02 = ((e0 + 2) % 3)
e11 = ((e1 + 1) % 3)
e12 = ((e1 + 2) % 3)
f01 = int(FF[(f0, e01)])
f02 = int(FF[(f0, e02)])
f11 = int(FF[(f1, e11)])
f12 = int(FF[(f1, e12)])
u1 = F[(f0, e01)]
u0 = F[(f1, e11)]
v0 = F[(f0, e02)]
v1 = F[(f1, e12)]
if (AdjMat_lil[(v0, v1)] != 0):
assert False
AdjMat_lil[(v0, v1)] = 1
AdjMat_lil[(v1, v0)] = 1
AdjMat_lil[(u0, u1)] = 0
AdjMat_lil[(u1, u0)] = 0
F[(f0, e01)] = F[(f1, e12)]
F[(f1, e11)] = F[(f0, e02)]
FF[(f0, e0)] = f11
FF[(f0, e01)] = f1
FF[(f1, e1)] = f01
FF[(f1, e11)] = f0
if (f11 != (- 1)):
FF[(f11, FFi[(f1, e11)])] = f0
if (f01 != (- 1)):
FF[(f01, FFi[(f0, e01)])] = f1
FFi[(f0, e0)] = FFi[(f1, e11)]
FFi[(f1, e1)] = FFi[(f0, e01)]
FFi[(f0, e01)] = e11
FFi[(f1, e11)] = e01
if (f11 != (- 1)):
FFi[(f11, FFi[(f0, e0)])] = e0
if (f01 != (- 1)):
FFi[(f01, FFi[(f1, e1)])] = e1
return True |
def get_optimizer(model, lr=0.001, wd=0.0):
parameters = filter((lambda p: p.requires_grad), model.parameters())
optim = torch.optim.Adam(parameters, lr=lr, weight_decay=wd)
return optim |
def domain_encoding(loaders, args, encoder):
statistics = []
for loader in loaders:
ind = 0
labels = None
S = []
for (batch, label) in loader:
if args.cuda:
batch = Variable(batch.cuda())
S.append(encoder(batch))
if (ind == 0):
labels = label
else:
labels = torch.cat((labels, label), dim=0)
ind += 1
S = torch.cat(S, 0)
neg_index = (labels == 0).nonzero()
pos_index = (labels == 1).nonzero()
neg_index = Variable(neg_index.expand(neg_index.size(0), S.size(1)))
pos_index = Variable(pos_index.expand(pos_index.size(0), S.size(1)))
if args.cuda:
pos_index = pos_index.cuda()
neg_index = neg_index.cuda()
pos_S = torch.gather(S, 0, pos_index)
neg_S = torch.gather(S, 0, neg_index)
pos_mu_S = torch.mean(pos_S, dim=0, keepdim=True)
neg_mu_S = torch.mean(neg_S, dim=0, keepdim=True)
mu_S = torch.mean(S, dim=0, keepdim=True)
statistics.append((mu_S, pos_mu_S, neg_mu_S))
return statistics |
def main():
for (i, lvl) in enumerate([logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]):
log_name = str(lvl)
init_log(log_name, lvl)
logger = logging.getLogger(log_name)
print('****cur lvl:{}'.format(lvl))
logger.debug('debug')
logger.info('info')
logger.warning('warning')
logger.error('error')
logger.critical('critiacal') |
def AddSamplerLayer(x, num_samples, traj_length, feature_size, activation=None):
x = Dense(((num_samples * traj_length) * feature_size))(x)
if (activation is not None):
x = activation(x)
x = Reshape((num_samples, traj_length, feature_size))(x)
return x |
class Uniform(object):
def __init__(self, a, b):
self.a = a
self.b = b
def sample(self):
return random.uniform(self.a, self.b) |
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = '
filename = 'cifar-100-python.tar.gz'
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [['train', '16019d7e3df5f24257cddd939b257f8d']]
test_list = [['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc']]
meta = {'filename': 'tiny', 'key': 'fine_label_names', 'coarse_key': 'coarse_label_names', 'md5': '7973b15100ade9c7d40fb424638fde48'} |
def resize():
for item in dirs:
if os.path.isfile((path + item)):
im = Image.open((path + item))
(f, e) = os.path.splitext((path + item))
imResize = im.resize((64, 64), Image.ANTIALIAS)
imResize.save((f + ' resized.jpg'), 'JPEG', quality=90) |
(version='2.0')
def reset_non_value_to_default(obj, key, default):
if isinstance(obj, dict):
if ((key not in obj.keys()) or (obj[key] is None)):
return default
else:
return obj[key]
elif ((not hasattr(obj, key)) or (getattr(obj, key) is None)):
return default
else:
return getattr(obj, key) |
class Pipeline2D():
def __init__(self, cfg):
self.cfg = cfg
print('Use visdom:', cfg.visdom.use)
print('Use virtual view:', (not cfg.dataset.real_view))
def train(self):
device = self.get_device()
scene_dataset = self.get_scene_dataset(mode='train', use_transform=True)
model = self.get_model().to(device)
optimizer = None
lr = self.cfg.optimizer.learning_rate
if (self.cfg.optimizer.name == 'Adam'):
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=self.cfg.optimizer.weight_decay)
loss_function = torch.nn.CrossEntropyLoss(reduction='sum')
model.train()
if self.cfg.visdom.use:
viz = Visdom(env=self.cfg.visdom.env)
num_epoch = self.cfg.num_epoch
for epoch in range(num_epoch):
for scene_id in range(1):
image_dataset = scene_dataset[scene_id]['imgset']
image_dataloader = data.DataLoader(dataset=image_dataset, batch_size=self.cfg.data_loader.batch_size, shuffle=False, num_workers=self.cfg.data_loader.num_workers, collate_fn=collate_image)
for (idx, batch) in enumerate(image_dataloader):
img = batch['color_img'].to(device)
semantic_label = batch['semantic_label'].to(device)
pred = model(img)
pred = torch.softmax(pred, dim=1)
loss = loss_function(pred, semantic_label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ((idx % 10) == 0):
if self.cfg.visdom.use:
viz.line(X=np.array([idx]), Y=np.array([loss.item()]), win=('epoch%d scene%d' % (epoch, scene_id)), opts=dict(title=('epoch%d scene%d' % (epoch, scene_id))), update='append')
else:
if ((idx % 100) == 0):
pred_label = torch.max(pred, dim=1).indices
mean_iou = miou_2d(pred_label.cpu(), semantic_label.cpu())
mean_iou = (mean_iou.sum() / len(mean_iou))
print(('Evaluation: epoch %d idx %d miou:%f' % (epoch, idx, mean_iou)))
print(('epoch %d idx %d loss:%f' % (epoch, idx, loss.item())))
is_save = True
if (((epoch % 10) == 0) and is_save):
self.save_model(model, (self.cfg.model.model_name + ('_epoch%d' % epoch)))
print(('Model in epoch%u saved. ' % epoch))
def evaluation(self):
model_path = self.cfg.evaluation.model_path
device = self.get_device()
scene_dataset = self.get_scene_dataset(mode='train', use_transform=True)
model = self.get_model(model_path).to(device)
model.eval()
if self.cfg.visdom.use:
viz = Visdom(env=self.cfg.visdom.env)
for scene_id in range(1):
image_dataset = scene_dataset[scene_id]['imgset']
image_dataloader = data.DataLoader(dataset=image_dataset, batch_size=1, shuffle=False, num_workers=self.cfg.data_loader.num_workers, collate_fn=collate_image)
for (idx, batch) in enumerate(image_dataloader):
with torch.no_grad():
img = batch['color_img'].to(device)
semantic_label = batch['semantic_label'].to(device)
pred = model(img)
pred_label = torch.max(pred, dim=1).indices
mean_iou = miou_2d(pred_label.cpu(), semantic_label.cpu())
print(mean_iou)
def save_model(self, model, model_name):
save_path = os.path.join(self.cfg.model.save_model_path, (model_name + '.pth'))
torch.save(model.state_dict(), save_path)
def get_scene_dataset(self, mode='train', use_transform=True):
if self.cfg.dataset.real_view:
dataset = RealviewScannetDataset(self.cfg, mode=mode, use_transform=use_transform)
else:
dataset = VirtualviewScannetDataset(self.cfg, mode=mode, use_transform=use_transform)
return dataset
def get_model(self, model_path=None):
if (self.cfg.model.model_name == 'deeplabv3+'):
from modeling.deeplab import DeepLab
backbone = self.cfg.model.backbone
num_classes = self.cfg.model.num_classes
output_stride = self.cfg.model.output_stride
model = DeepLab(backbone=backbone, num_classes=num_classes, output_stride=output_stride)
elif (self.cfg.model.model_name == 'unet'):
from unet import UNet
num_channels = self.cfg.model.num_channels
num_classes = self.cfg.model.num_classes
model = UNet(n_channels=num_channels, n_classes=num_classes)
if (model_path is None):
if self.cfg.model.pretrain:
pretrain_path = self.cfg.model.pretrained_model_path
model.load_state_dict(torch.load(pretrain_path))
if (not (model_path is None)):
model.load_state_dict(torch.load(model_path))
print('Model name:', self.cfg.model.model_name)
return model
def get_device(self):
device = None
if (self.cfg.device != 'cpu'):
device = torch.device(self.cfg.device)
else:
device = torch.device('cpu')
return device |
def reset(nn):
def _reset(item):
if hasattr(item, 'reset_parameters'):
item.reset_parameters()
if (nn is not None):
if (hasattr(nn, 'children') and (len(list(nn.children())) > 0)):
for item in nn.children():
_reset(item)
else:
_reset(nn) |
def vote_last_response(states, vote_type, model_selectors, request: gr.Request):
with open(get_conv_log_filename(), 'a') as fout:
data = {'tstamp': round(time.time(), 4), 'type': vote_type, 'models': [x for x in model_selectors], 'states': [x.dict() for x in states], 'ip': request.client.host}
fout.write((json.dumps(data) + '\n'))
if (':' not in model_selectors[0]):
for i in range(15):
names = (('### Model A: ' + states[0].model_name), ('### Model B: ' + states[1].model_name))
(yield ((names + ('',)) + ((disable_btn,) * 3)))
time.sleep(0.2)
else:
names = (('### Model A: ' + states[0].model_name), ('### Model B: ' + states[1].model_name))
(yield ((names + ('',)) + ((disable_btn,) * 3))) |
class RoIAwarePool3dFunction(Function):
def forward(ctx, rois, pts, pts_feature, out_size, max_pts_per_voxel, mode):
if isinstance(out_size, int):
out_x = out_y = out_z = out_size
else:
assert (len(out_size) == 3)
assert mmcv.is_tuple_of(out_size, int)
(out_x, out_y, out_z) = out_size
num_rois = rois.shape[0]
num_channels = pts_feature.shape[(- 1)]
num_pts = pts.shape[0]
pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))
argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)
pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_per_voxel), dtype=torch.int)
roiaware_pool3d_ext.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, mode)
ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, mode, num_pts, num_channels)
return pooled_features
def backward(ctx, grad_out):
ret = ctx.roiaware_pool3d_for_backward
(pts_idx_of_voxels, argmax, mode, num_pts, num_channels) = ret
grad_in = grad_out.new_zeros((num_pts, num_channels))
roiaware_pool3d_ext.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, mode)
return (None, None, grad_in, None, None, None) |
def test_mildnonaxi_oortA_grid_tlist():
idf = dehnendf(beta=0.0)
pot = [LogarithmicHaloPotential(normalize=1.0), EllipticalDiskPotential(twophio=0.001)]
edf = evolveddiskdf(idf, pot=pot, to=(- 10.0))
(oa, grid, dgridR, dgridphi) = edf.oortA(0.9, t=[0.0, (- 2.5), (- 5.0), (- 7.5), (- 10.0)], phi=0.2, integrate_method='rk6_c', grid=True, derivRGrid=True, derivphiGrid=True, returnGrids=True, gridpoints=_GRIDPOINTS, derivGridpoints=_GRIDPOINTS)
ioa = idf.oortA(0.9)
assert numpy.all((numpy.fabs((oa - ioa)) < 0.005)), 'oortA of evolveddiskdf for axisymmetric potential is not equal to that of initial DF'
oa = edf.oortA(0.9, t=[0.0, (- 2.5), (- 5.0), (- 7.5), (- 10.0)], phi=0.2, integrate_method='rk6_c', grid=grid, derivRGrid=dgridR, derivphiGrid=dgridphi, gridpoints=_GRIDPOINTS, derivGridpoints=_GRIDPOINTS)
assert numpy.all((numpy.fabs((oa - ioa)) < 0.005)), 'oortA of evolveddiskdf for axisymmetric potential is not equal to that of initial DF when calculated with pre-computed grid'
return None |
def inTopk(scores, ans, k):
result = False
topk = torch.topk(scores, k)[1]
for x in topk:
if (x in ans):
result = True
return result |
class ReplaceExpression(TraverseAction):
expr_to_replace: TreeNode
replacement_expr: TreeNode
inserted_node: List[id]
def __init__(self, expr_to_replace: TreeNode, replacement_expr: TreeNode):
super().__init__()
self.expr_to_replace = expr_to_replace
self.replacement_expr = replacement_expr
self.inserted_edges = []
def _pre_action(self, edge) -> bool:
if (edge.get_target() == self.replacement_expr):
return False
if (edge.get_target() == self.expr_to_replace):
edge.replace_target(self.replacement_expr)
return True |
class Dataset(object):
def __init__(self, path):
self.path = path
files = glob.glob((path + '/*.csv'))
self.collections = {file_name(file): file for file in files}
def rows(self, collection_name, num_epochs=None):
if (collection_name not in self.collections):
raise ValueError('Collection not found: {}'.format(collection_name))
epoch = 0
while True:
with open(self.collections[collection_name], 'r') as f:
r = csv.reader(f)
for row in r:
(yield row)
epoch += 1
if (num_epochs and (epoch >= num_epochs)):
raise StopIteration
def _batch_iter(self, collection_name, batch_size, num_epochs):
gen = ([self.rows(collection_name, num_epochs)] * batch_size)
return itertools.zip_longest(*gen, fillvalue=None)
def batches(self, collection_name, batch_size, num_epochs=None, shuffle=False, max_len=None, multilabel=False):
for batch in self._batch_iter(collection_name, batch_size, num_epochs):
data = [format_row(row, shuffle, multilabel) for row in batch if row]
(y, x, seq_lengths) = zip(*data)
if (not (max_len is None)):
x = pp.pad_sequences(x, maxlen=max_len, padding='post')
else:
x = pp.pad_sequences(x, padding='post')
(yield (np.array(y), x, np.array(seq_lengths), None, None, None))
def batches_split(self, collection_name, batch_size, num_epochs=None, shuffle=False, max_len=None, multilabel=False, lm_sent_len=30):
for batch in self._batch_iter(collection_name, batch_size, num_epochs):
data = [format_row(row, shuffle, multilabel) for row in batch if row]
(y, x, seq_lengths) = zip(*data)
(x_split, y_split, seq_lengths_split, split_indices) = split_data_LM(x, y, lm_sent_len=lm_sent_len)
if (max_len is None):
x_split = pp.pad_sequences(x_split, padding='post')
else:
x_split = pp.pad_sequences(x_split, maxlen=max_len, padding='post')
(yield (np.array(y_split), x_split, np.array(seq_lengths_split), split_indices, x, seq_lengths))
def batches_nvdm_LM(self, collection_name, batch_size, vocab_size, num_epochs=None, max_len=None, multilabel=False):
for batch in self._batch_iter(collection_name, batch_size, num_epochs):
data_batch = []
count_batch = []
y_batch = []
mask = []
for (i, row) in enumerate(batch):
if row:
count = 0
y_batch.append(row[0].strip())
raw_x = row[1].strip()
id_freqs = u.format_doc(raw_x).split()
doc = np.zeros(vocab_size)
for value in id_freqs:
(index, freq) = value.strip().split(':')
doc[int(index)] = float(freq)
count += int(freq)
count_batch.append(count)
mask.append(float(1))
data_batch.append(doc)
data_batch = np.array(data_batch, dtype=np.float32)
mask = np.array(mask, dtype=np.float32)
(yield (y_batch, data_batch, count_batch, mask)) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, RepCONCFinetuneArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s-%(levelname)s-%(name)s- %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
resume_from_checkpoint = False
if (os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and any((x.startswith('checkpoint') for x in os.listdir(training_args.output_dir)))):
if (not training_args.overwrite_output_dir):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
else:
resume_from_checkpoint = True
model_args: ModelArguments
data_args: DataTrainingArguments
training_args: RepCONCFinetuneArguments
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Model parameters %s', model_args)
logger.info('Data parameters %s', data_args)
logger.info('Training parameters %s', training_args)
set_seed(training_args.seed)
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, use_fast=True)
repconc = RepCONC.from_pretrained(model_args.model_name_or_path, use_constraint=(not training_args.not_use_constraint), sk_epsilon=model_args.sk_epsilon, sk_iters=model_args.sk_iters)
eval_dataset = load_validation_set(data_args.valid_corpus_path, data_args.valid_query_path, data_args.valid_qrel_path, sep_token=tokenizer.sep_token)
train_set = QDRelDataset(tokenizer, qrel_path=data_args.qrel_path, query_path=data_args.query_path, corpus_path=data_args.corpus_path, max_query_len=data_args.max_query_len, max_doc_len=data_args.max_doc_len, negative=training_args.negative, negative_per_query=training_args.negative_per_query, rel_threshold=1, verbose=is_main_process(training_args.local_rank))
data_collator = FinetuneCollator(tokenizer=tokenizer, max_query_len=data_args.max_query_len, max_doc_len=data_args.max_doc_len)
trainer = RepCONCFinetuner(qrels=train_set.get_qrels(), model=repconc, args=training_args, train_dataset=train_set, tokenizer=tokenizer, data_collator=data_collator, eval_dataset=eval_dataset)
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
trainer.save_model() |
def get_chatgpt_response(model, prompt):
response = ''
for data in model.ask(prompt):
response = data['message']
model.delete_conversation(model.conversation_id)
model.reset_chat()
return response |
class Polygon():
def __init__(self, pos, length, height, space, mass=5.0):
moment = 1000
body = pm.Body(mass, moment)
body.position = Vec2d(pos)
shape = pm.Poly.create_box(body, (length, height))
shape.color = (0, 0, 255)
shape.friction = 0.5
shape.collision_type = 2
space.add(body, shape)
self.body = body
self.shape = shape
wood = pygame.image.load('../resources/images/wood.png').convert_alpha()
wood2 = pygame.image.load('../resources/images/wood2.png').convert_alpha()
rect = pygame.Rect(251, 357, 86, 22)
self.beam_image = wood.subsurface(rect).copy()
rect = pygame.Rect(16, 252, 22, 84)
self.column_image = wood2.subsurface(rect).copy()
def to_pygame(self, p):
return (int(p.x), int(((- p.y) + 600)))
def getVelocity(self):
return abs(self.body.velocity[1])
def draw_poly(self, element, screen):
poly = self.shape
ps = poly.get_vertices()
ps.append(ps[0])
ps = map(self.to_pygame, ps)
ps = list(ps)
color = (255, 0, 0)
pygame.draw.lines(screen, color, False, ps)
if (element == 'beams'):
p = poly.body.position
p = Vec2d(self.to_pygame(p))
angle_degrees = (math.degrees(poly.body.angle) + 180)
rotated_logo_img = pygame.transform.rotate(self.beam_image, angle_degrees)
offset = (Vec2d(rotated_logo_img.get_size()) / 2.0)
p = (p - offset)
np = p
screen.blit(rotated_logo_img, (np.x, np.y))
if (element == 'columns'):
p = poly.body.position
p = Vec2d(self.to_pygame(p))
angle_degrees = (math.degrees(poly.body.angle) + 180)
rotated_logo_img = pygame.transform.rotate(self.column_image, angle_degrees)
offset = (Vec2d(rotated_logo_img.get_size()) / 2.0)
p = (p - offset)
np = p
screen.blit(rotated_logo_img, (np.x, np.y))
def getPosition(self):
return self.body.position
def getVertices(self):
return self.shape.get_vertices()
def getRadius(self):
return self.shape._get_radius() |
_arg_scope
def masked_separable_convolution2d(inputs, num_outputs, kernel_size, depth_multiplier, stride=1, padding='SAME', data_format=None, rate=1, activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, biases_initializer=init_ops.zeros_initializer(), biases_regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, scope=None, task_id=1):
if (data_format not in [None, 'NHWC', 'NCHW']):
raise ValueError(('Invalid data_format: %r' % (data_format,)))
layer_variable_getter = _build_variable_getter({'bias': 'biases', 'depthwise_kernel': 'depthwise_weights', 'pointwise_kernel': 'pointwise_weights'})
with variable_scope.variable_scope(scope, 'SeparableConv2d', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
if ((data_format is None) or (data_format == 'NHWC')):
df = 'channels_last'
elif (data_format == 'NCHW'):
df = 'channels_first'
else:
raise ValueError('Unsupported data format', data_format)
if (num_outputs is not None):
layer = MaskedSeparableConv2D(filters=num_outputs, kernel_size=kernel_size, strides=stride, padding=padding, data_format=df, dilation_rate=utils.two_element_tuple(rate), activation=None, depth_multiplier=depth_multiplier, use_bias=((not normalizer_fn) and biases_initializer), depthwise_initializer=weights_initializer, pointwise_initializer=weights_initializer, depthwise_regularizer=weights_regularizer, pointwise_regularizer=weights_regularizer, bias_initializer=biases_initializer, bias_regularizer=biases_regularizer, activity_regularizer=None, trainable=trainable, name=sc.name, dtype=inputs.dtype.base_dtype, task_id=task_id, _scope=sc, _reuse=reuse)
outputs = layer.apply(inputs)
_add_variable_to_collections(layer.depthwise_kernel, variables_collections, 'weights')
_add_variable_to_collections(layer.pointwise_kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if (normalizer_fn is not None):
normalizer_params = (normalizer_params or {})
with tf.variable_scope('task_{}'.format(task_id)):
outputs = normalizer_fn(outputs, **normalizer_params)
else:
raise ValueError('Num Outputs is None, Need to apply depthwise conv2d')
if (activation_fn is not None):
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.original_name_scope, outputs) |
def infer_failure(inst) -> bool:
lib = ('torch' if ('torch' in inst.name_index) else 'tf')
judge_result_dir = os.path.join(RULE_DIR, f'{lib}_rules_validity')
try:
with open(os.path.join(judge_result_dir, f'{inst.name_index}.pkl'), 'rb') as f:
valid = pickle.load(f)[0]
except Exception as e:
valid = False
return (False if valid else True) |
def train_loader_creator(config, batch_size):
train_transform = A.Compose([A.Resize(width=128, height=128, p=1.0), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.RandomRotate90(p=0.5), A.ShiftScaleRotate(shift_limit=0.01, scale_limit=0.04, rotate_limit=0, p=0.25)])
train_ds = BrainDataset(config['train'], train_transform)
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=0)
return train_loader |
class RandomSampler(Sampler):
def __init__(self, data_source, replacement=False, num_samples=None):
super(RandomSampler, self).__init__(data_source)
self.replacement = replacement
self._num_samples = num_samples
if (not isinstance(self.replacement, bool)):
raise ValueError('replacement should be a boolean value, but got replacement={}'.format(self.replacement))
if ((self._num_samples is not None) and (not replacement)):
raise ValueError('With replacement=False, num_samples should not be specified, since a random permute will be performed.')
if ((not isinstance(self.num_samples, int)) or (self.num_samples <= 0)):
raise ValueError('num_samples should be a positive integer value, but got num_samples={}'.format(self.num_samples))
def num_samples(self):
if (self._num_samples is None):
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
if self.replacement:
return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
return iter(torch.randperm(n).tolist())
def __len__(self):
return self.num_samples |
class VarSkipRNNBase(nn.Module):
def __init__(self, Cell, input_size, hidden_size, num_layers=1, bias=True, batch_first=False, dropout=(0, 0), bidirectional=False, **kwargs):
super(VarSkipRNNBase, self).__init__()
self.Cell = Cell
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.bidirectional = bidirectional
self.lstm = False
num_directions = (2 if bidirectional else 1)
self.all_cells = []
for layer in range(num_layers):
for direction in range(num_directions):
layer_input_size = (input_size if (layer == 0) else (hidden_size * num_directions))
cell = self.Cell(layer_input_size, hidden_size, self.bias, p=dropout, **kwargs)
self.all_cells.append(cell)
self.add_module(('cell%d' % ((layer * num_directions) + direction)), cell)
def reset_parameters(self):
for cell in self.all_cells:
cell.reset_parameters()
def reset_noise(self, batch_size):
for cell in self.all_cells:
cell.reset_noise(batch_size)
def forward(self, input, skip_connect, mask=None, hx=None):
batch_size = (input.size(0) if self.batch_first else input.size(1))
if (hx is None):
num_directions = (2 if self.bidirectional else 1)
hx = input.new_zeros((self.num_layers * num_directions), batch_size, self.hidden_size)
if self.lstm:
hx = (hx, hx)
func = rnn_F.AutogradSkipConnectRNN(num_layers=self.num_layers, batch_first=self.batch_first, bidirectional=self.bidirectional, lstm=self.lstm)
self.reset_noise(batch_size)
(output, hidden) = func(input, skip_connect, self.all_cells, hx, (None if (mask is None) else mask.view((mask.size() + (1,)))))
return (output, hidden)
def step(self, input, hx=None, hs=None, mask=None):
assert (not self.bidirectional), 'step only cannot be applied to bidirectional RNN.'
batch_size = input.size(0)
if (hx is None):
hx = input.new_zeros(self.num_layers, batch_size, self.hidden_size)
if self.lstm:
hx = (hx, hx)
if (hs is None):
hs = input.new_zeros(self.num_layers, batch_size, self.hidden_size)
func = rnn_F.AutogradSkipConnectStep(num_layers=self.num_layers, lstm=self.lstm)
(output, hidden) = func(input, self.all_cells, hx, hs, mask)
return (output, hidden) |
def build_vocab(cfg: Dict, dataset: BaseDataset=None, model_dir: Path=None) -> Tuple[(Vocabulary, Vocabulary)]:
if ((model_dir is not None) and (cfg['src'].get('voc_file', None) is None)):
assert (model_dir / 'src_vocab.txt').is_file()
cfg['src']['voc_file'] = (model_dir / 'src_vocab.txt').as_posix()
if ((model_dir is not None) and (cfg['trg'].get('voc_file', None) is None)):
assert (model_dir / 'trg_vocab.txt').is_file()
cfg['trg']['voc_file'] = (model_dir / 'trg_vocab.txt').as_posix()
src_vocab = _build_vocab(cfg['src'], dataset)
trg_vocab = _build_vocab(cfg['trg'], dataset)
assert (src_vocab.pad_index == trg_vocab.pad_index)
assert (src_vocab.bos_index == trg_vocab.bos_index)
assert (src_vocab.eos_index == trg_vocab.eos_index)
return (src_vocab, trg_vocab) |
def compute_hashes(X, A, H=None):
device = X.device
if (H is None):
H = torch.zeros(len(X), dtype=torch.int64, device=device)
else:
H.zero_()
if (A.shape[1] != (X.shape[1] + 1)):
raise ValueError('The hash requires a bias')
if (device.type == 'cpu'):
compute_hashes_cpu(X, A, H)
else:
compute_hashes_cuda(X, A, H)
return H |
def hard_volume(box_tensor: BoxTensor, log_scale: bool=True) -> torch.Tensor:
if log_scale:
return torch.sum(torch.log((box_tensor.Z - box_tensor.z).clamp_min(eps)), dim=(- 1))
return torch.prod((box_tensor.Z - box_tensor.z).clamp_min(0), dim=(- 1)) |
def create_log_dir(exp_prefix, exp_id=0, seed=0, base_log_dir=None, include_exp_prefix_sub_dir=True):
exp_name = create_exp_name(exp_prefix, exp_id=exp_id, seed=seed)
if (base_log_dir is None):
base_log_dir = conf.LOCAL_LOG_DIR
if include_exp_prefix_sub_dir:
log_dir = osp.join(base_log_dir, exp_prefix.replace('_', '-'), exp_name)
else:
log_dir = osp.join(base_log_dir, exp_name)
if osp.exists(log_dir):
print('WARNING: Log directory already exists {}'.format(log_dir))
os.makedirs(log_dir, exist_ok=True)
return log_dir |
class CLIPConfig(PretrainedConfig):
model_type = 'clip'
is_composition = True
def __init__(self, text_config_dict=None, vision_config_dict=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs):
super().__init__(text_config_dict=text_config_dict, vision_config_dict=vision_config_dict, **kwargs)
if (text_config_dict is None):
text_config_dict = {}
logger.info('text_config_dict is None. Initializing the CLIPTextConfig with default values.')
if (vision_config_dict is None):
vision_config_dict = {}
logger.info('vision_config_dict is None. initializing the CLIPVisionConfig with default values.')
self.text_config = CLIPTextConfig(**text_config_dict)
self.vision_config = CLIPVisionConfig(**vision_config_dict)
self.projection_dim = projection_dim
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = 1.0
def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs):
return cls(text_config_dict=text_config.to_dict(), vision_config_dict=vision_config.to_dict(), **kwargs)
def to_dict(self):
output = copy.deepcopy(self.__dict__)
output['text_config'] = self.text_config.to_dict()
output['vision_config'] = self.vision_config.to_dict()
output['model_type'] = self.__class__.model_type
return output |
def bit2dB(x):
ret = 0
if bit(x, 3):
ret += 24
if bit(x, 2):
ret += 12
if bit(x, 1):
ret += 6
if bit(x, 0):
ret += 3
ret = (- ret)
return ret |
def evaluate_simple(eval_file, answer_dict):
reference_corpus = []
translation_corpus = []
rouges = []
meteor = Meteor()
(res, gts) = ([], [])
for (key, answers) in answer_dict.items():
answers = sorted(answers, key=(lambda x: x[0]), reverse=True)
ground_truths = [list(map((lambda x: x.lower()), eval_file[key]['question_eval']))]
prediction = answers[0][1].lower().split()
translation_corpus.append(prediction)
reference_corpus.append(ground_truths)
rouge = compute_rouge_L(prediction, ground_truths)
rouges.append(rouge)
res.append(' '.join(prediction))
gts.append([' '.join(ground_truth) for ground_truth in ground_truths])
bleu = compute_bleu(reference_corpus, translation_corpus)
mete = meteor.compute_score(gts, res)
return {'bleu': (bleu[0] * 100), 'meteor': (mete[0] * 100), 'rougeL': (np.mean(rouges) * 100)} |
class Args():
model_id: str = 'google/bigbird-roberta-base'
logging_steps: int = 3000
save_steps: int = 10500
block_size: int = 128
num_random_blocks: int = 3
batch_size_per_device: int = 1
max_epochs: int = 5
lr: float = 3e-05
init_lr: float = 0.0
warmup_steps: int = 20000
weight_decay: float = 0.0095
save_dir: str = 'bigbird-roberta-natural-questions'
base_dir: str = 'training-expt'
tr_data_path: str = 'data/nq-training.jsonl'
val_data_path: str = 'data/nq-validation.jsonl'
def __post_init__(self):
os.makedirs(self.base_dir, exist_ok=True)
self.save_dir = os.path.join(self.base_dir, self.save_dir)
self.batch_size = (self.batch_size_per_device * jax.device_count()) |
def print_cluster_extra(out_errors, out_context, out, text, auto_cluster_set, covered, gold_parses, gold_heads):
print('Extra:', file=out_errors)
print('Extra:', file=out_context)
for entity in auto_cluster_set:
printed = 0
for mention in entity:
if (mention not in covered):
print_mention(out, False, gold_parses, gold_heads, text, mention, extra=True)
print_mention(out_errors, False, gold_parses, gold_heads, text, mention, extra=True)
print_mention(out_context, True, gold_parses, gold_heads, text, mention, extra=True)
printed += 1
if ((printed > 0) and (len(entity) != printed)):
print("Covered isn't being filled correctly (extra)", printed, len(entity), file=sys.stderr)
print(entity, file=sys.stderr)
for mention in entity:
if (mention not in covered):
print(mention, file=sys.stderr)
if (printed > 0):
print('', file=out_errors)
print('', file=out_context)
print('', file=out)
print(('-' * 60), file=out_errors)
print(('-' * 60), file=out_context)
print('', file=out_errors)
print('', file=out_context) |
class BaseArgs(ABC):
def __init__(self):
self.args = None
self.parser = argparse.ArgumentParser()
self.logger = logging.getLogger(self.__class__.__name__)
self.add_args()
self.parse()
self.validate()
self.process()
self.str_args = self.log()
def add_args(self):
self.parser.add_argument('--gpu', type=str, default='0')
self.parser.add_argument('--face_detection', action='store_true')
self.parser.add_argument('--resolution', type=int, default=256, choices=[256, 1024])
self.parser.add_argument('--load_checkpoint')
self.parser.add_argument('--pretrained_models_path', type=Path, required=True)
BaseArgs.add_bool_arg(self.parser, 'const_noise')
self.parser.add_argument('--batch_size', type=int, default=6)
self.parser.add_argument('--reals', action='store_true', help='Use real inputs')
BaseArgs.add_bool_arg(self.parser, 'test_real_attr')
self.parser.add_argument('name', type=str, help='Name under which run will be saved')
self.parser.add_argument('--results_dir', type=str, default='../results')
self.parser.add_argument('--log_debug', action='store_true')
self.parser.add_argument('--debug', action='store_true')
def parse(self):
self.args = self.parser.parse_args()
def log(self):
out_str = 'The arguments are:\n'
for (k, v) in self.args.__dict__.items():
out_str += f'''{k}: {v}
'''
return out_str
def add_bool_arg(parser, name, default=True):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(('--' + name), dest=name, action='store_true')
group.add_argument(('--no_' + name), dest=name, action='store_false')
parser.set_defaults(**{name: default})
def validate(self):
if (self.args.load_checkpoint and (not Path(self.args.load_checkpoint).exists())):
raise ValueError(f'Checkpoint directory {self.args.load_checkpoint} does not exist')
def process(self):
self.args.results_dir = Path(self.args.results_dir).joinpath(self.args.name)
if self.args.debug:
self.args.log_debug = True
if (self.args.debug or (not self.args.train)):
shutil.rmtree(self.args.results_dir, ignore_errors=True)
self.args.results_dir.mkdir(parents=True, exist_ok=True)
self.args.images_results = self.args.results_dir.joinpath('images')
self.args.images_results.mkdir(exist_ok=True)
if self.args.load_checkpoint:
self.args.load_checkpoint = Path(self.args.load_checkpoint) |
def summary(model, *inputs, batch_size=(- 1), show_input=True):
def register_hook(module):
def hook(module, input, output=None):
class_name = str(module.__class__).split('.')[(- 1)].split("'")[0]
module_idx = len(summary)
m_key = f'{class_name}-{(module_idx + 1)}'
summary[m_key] = OrderedDict()
summary[m_key]['input_shape'] = list(input[0].size())
summary[m_key]['input_shape'][0] = batch_size
if ((show_input is False) and (output is not None)):
if isinstance(output, (list, tuple)):
for out in output:
if isinstance(out, torch.Tensor):
summary[m_key]['output_shape'] = [([(- 1)] + list(out.size())[1:])][0]
else:
summary[m_key]['output_shape'] = [([(- 1)] + list(out[0].size())[1:])][0]
else:
summary[m_key]['output_shape'] = list(output.size())
summary[m_key]['output_shape'][0] = batch_size
params = 0
if (hasattr(module, 'weight') and hasattr(module.weight, 'size')):
params += torch.prod(torch.LongTensor(list(module.weight.size())))
summary[m_key]['trainable'] = module.weight.requires_grad
if (hasattr(module, 'bias') and hasattr(module.bias, 'size')):
params += torch.prod(torch.LongTensor(list(module.bias.size())))
summary[m_key]['nb_params'] = params
if ((not isinstance(module, nn.Sequential)) and (not isinstance(module, nn.ModuleList)) and (not (module == model))):
if (show_input is True):
hooks.append(module.register_forward_pre_hook(hook))
else:
hooks.append(module.register_forward_hook(hook))
summary = OrderedDict()
hooks = []
model.apply(register_hook)
model(*inputs)
for h in hooks:
h.remove()
print('')
if (show_input is True):
line_new = f"{'Layer (type)':>25} {'Input Shape':>25} {'Param #':>15}"
else:
line_new = f"{'Layer (type)':>25} {'Output Shape':>25} {'Param #':>15}"
print(line_new)
print('')
total_params = 0
total_output = 0
trainable_params = 0
for layer in summary:
if (show_input is True):
line_new = '{:>25} {:>25} {:>15}'.format(layer, str(summary[layer]['input_shape']), '{0:,}'.format(summary[layer]['nb_params']))
else:
line_new = '{:>25} {:>25} {:>15}'.format(layer, str(summary[layer]['output_shape']), '{0:,}'.format(summary[layer]['nb_params']))
total_params += summary[layer]['nb_params']
if (show_input is True):
total_output += np.prod(summary[layer]['input_shape'])
else:
total_output += np.prod(summary[layer]['output_shape'])
if ('trainable' in summary[layer]):
if summary[layer]['trainable']:
trainable_params += summary[layer]['nb_params']
print(line_new)
print('')
print(f'Total params: {total_params:0,}')
print(f'Trainable params: {trainable_params:0,}')
print(f'Non-trainable params: {(total_params - trainable_params):0,}')
print('') |
class PurePursuitParam():
look_ahead_minmax: tuple[(float, float)] = (3, 30)
k_lookahead: float = 0.8
min_distance: float = 2
max_extra_distance: float = 20
length: float = 3.5
lr: float = (3.5 / 2)
def from_vehicle_geo(cls, params: VehicleGeometry) -> 'PurePursuitParam':
return PurePursuitParam(length=params.wheelbase, lr=params.lr) |
def res2net50_v1b_26w_4s(pretrained=False, **kwargs):
model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['res2net50_v1b_26w_4s'], map_location='cpu'))
return model |
class Database():
signature_collection = 'signature'
similarity_collection = 'similarity'
argdef_collection = 'api_args'
def __init__(self) -> None:
pass
def database_config(self, host, port, database_name):
self.DB = pymongo.MongoClient(host=host, port=port)[database_name]
def index_name(self, api_name, arg_name):
record = self.DB[self.signature_collection].find_one({'api': api_name})
if (record == None):
print((api_name + ' no signature'))
return None
arg_names = record['args']
for (idx, name) in enumerate(arg_names):
if (name == arg_name):
return f'parameter:{idx}'
return None
def select_rand_over_db(self, api_name, arg_name):
if (api_name not in self.DB.list_collection_names()):
return (None, False)
record = self.DB[self.signature_collection].find_one({'api': api_name})
if (record == None):
return (None, False)
arg_names = record['args']
if arg_name.startswith('parameter:'):
index = int(arg_name[10:])
if (index >= len(arg_names)):
return (None, False)
arg_name = arg_names[index]
sim_dict = self.DB[self.similarity_collection].find_one({'api': api_name, 'arg': arg_name})
if (sim_dict == None):
return (None, False)
APIs = sim_dict['APIs']
probs = sim_dict['probs']
if (len(APIs) == 0):
return (None, False)
target_api = choice(APIs, p=probs)
idx_name = self.index_name(target_api, arg_name)
if (idx_name == None):
return (None, False)
select_data = self.DB[target_api].aggregate([{'$match': {'$or': [{arg_name: {'$exists': True}}, {idx_name: {'$exists': True}}]}}, {'$sample': {'size': 1}}])
if (not select_data.alive):
print(f'ERROR IN SIMILARITY: {target_api}, {api_name}')
return (None, False)
select_data = select_data.next()
if (arg_name in select_data.keys()):
return (select_data[arg_name], True)
else:
return (select_data[idx_name], True)
def get_rand_record(self, api_name):
record = self.DB[api_name].aggregate([{'$sample': {'size': 1}}])
if (not record.alive):
print(f'NO SUCH API: {api_name}')
assert 0
record = record.next()
record.pop('_id')
assert ('_id' not in record.keys())
return record
def get_all_records(self, api_name):
if (api_name not in self.DB.list_collection_names()):
print(f'NO SUCH API: {api_name}')
return []
temp = self.DB[api_name].find({}, {'_id': 0})
records = []
for t in temp:
assert ('_id' not in t.keys())
records.append(t)
return records
def get_argdef(self, api_name):
record = self.DB[self.argdef_collection].find_one({'api': api_name}, {'_id': 0})
if (record == None):
print(f'NO API_ARGS FOR: {api_name}')
dump_data(f'''NO API_ARGS FOR: {api_name}
''', 'db-error.txt', 'a')
assert 0
return record['args']
def get_signature(self, api_name):
record = self.DB[self.signature_collection].find_one({'api': api_name}, {'_id': 0})
if (record == None):
print(f'NO SIGNATURE FOR: {api_name}')
assert 0
return record['args']
def add_record(self, api_name, record):
try:
self.DB[api_name].insert_one(record.copy())
except Exception as e:
dump_data(f'''{api_name} {record}
''', 'database-log.txt', 'a')
def add_records(self, api_name, record):
self.DB[api_name].insert_many(record)
def add_signature(self, api_name, signature):
data = {'api': api_name, 'args': signature.copy()}
self.DB[self.signature_collection].insert_one(data)
def add_argdef(self, api_name, argdef):
data = {'api': api_name, 'args': argdef.copy()}
self.DB[self.argdef_collection].insert_one(data)
def delete_all_argdef(self, api_name):
self.DB[self.argdef_collection].delete_many({'api': api_name})
def get_api_list(DB, start_str):
api_list = []
for name in DB.list_collection_names():
if name.startswith(start_str):
api_list.append(name)
return api_list |
class MolGraph():
def __init__(self, smiles_list, args, path_input=None, path_mask=None):
self.smiles_list = smiles_list
self.args = args
self.device = args.device
self.mols = []
self.scope = []
self.rd_mols = []
self.path_input = path_input
self.path_mask = path_mask
self.ap_mapping = None
self._parse_molecules(smiles_list)
self.n_mols = len(self.mols)
def get_n_atoms(self):
assert (self.scope != [])
return (self.scope[(- 1)][0] + self.scope[(- 1)][1])
def _parse_molecules(self, smiles_list):
def skip_atom(atom_idx, max):
return ((max != 0) and (atom_idx >= max))
a_offset = 0
for smiles in smiles_list:
rd_mol = Chem.MolFromSmiles(smiles)
self.rd_mols.append(rd_mol)
mol_atoms = []
mol_bonds = []
for rd_atom in rd_mol.GetAtoms():
atom_idx = rd_atom.GetIdx()
mol_atoms.append(Atom(idx=atom_idx, rd_atom=rd_atom))
for rd_bond in rd_mol.GetBonds():
atom_1_idx = rd_bond.GetBeginAtom().GetIdx()
atom_2_idx = rd_bond.GetEndAtom().GetIdx()
bond_idx = len(mol_bonds)
new_bond = Bond(bond_idx, atom_1_idx, atom_2_idx, rd_bond)
mol_bonds.append(new_bond)
mol_atoms[atom_2_idx].add_bond(new_bond)
bond_idx = len(mol_bonds)
new_bond = Bond(bond_idx, atom_2_idx, atom_1_idx, rd_bond)
mol_bonds.append(new_bond)
mol_atoms[atom_1_idx].add_bond(new_bond)
new_mol = Molecule(mol_atoms, mol_bonds)
self.mols.append(new_mol)
self.scope.append((a_offset, len(mol_atoms)))
a_offset += len(mol_atoms)
def get_atom_inputs(self, output_tensors=True):
fatoms = []
for (mol_idx, mol) in enumerate(self.mols):
atoms = mol.atoms
for (atom_idx, atom) in enumerate(atoms):
atom_features = mol_features.get_atom_features(atom)
fatoms.append(atom_features)
fatoms = np.stack(fatoms, axis=0)
if output_tensors:
fatoms = torch.tensor(fatoms, device=self.device).float()
return (fatoms, self.scope)
def get_graph_inputs(self, output_tensors=True):
n_atom_feats = mol_features.N_ATOM_FEATS
n_bond_feats = mol_features.N_BOND_FEATS
max_neighbors = mol_features.MAX_NEIGHBORS
fatoms = []
fbonds = [np.zeros((n_atom_feats + n_bond_feats))]
agraph = []
bgraph = [np.zeros([1, max_neighbors])]
b_offset = 1
for (mol_idx, mol) in enumerate(self.mols):
(atoms, bonds) = (mol.atoms, mol.bonds)
cur_agraph = np.zeros([len(atoms), max_neighbors])
cur_bgraph = np.zeros([len(bonds), max_neighbors])
for (atom_idx, atom) in enumerate(atoms):
atom_features = mol_features.get_atom_features(atom)
fatoms.append(atom_features)
for (nei_idx, bond) in enumerate(atom.bonds):
cur_agraph[(atom.idx, nei_idx)] = (bond.idx + b_offset)
for bond in bonds:
out_atom = atoms[bond.out_atom_idx]
bond_features = np.concatenate([mol_features.get_atom_features(out_atom), mol_features.get_bond_features(bond)], axis=0)
fbonds.append(bond_features)
for (i, in_bond) in enumerate(out_atom.bonds):
if (bonds[in_bond.idx].out_atom_idx != bond.in_atom_idx):
cur_bgraph[(bond.idx, i)] = (in_bond.idx + b_offset)
agraph.append(cur_agraph)
bgraph.append(cur_bgraph)
b_offset += len(bonds)
fatoms = np.stack(fatoms, axis=0)
fbonds = np.stack(fbonds, axis=0)
agraph = np.concatenate(agraph, axis=0)
bgraph = np.concatenate(bgraph, axis=0)
if output_tensors:
fatoms = torch.tensor(fatoms, device=self.device).float()
fbonds = torch.tensor(fbonds, device=self.device).float()
agraph = torch.tensor(agraph, device=self.device).long()
bgraph = torch.tensor(bgraph, device=self.device).long()
graph_inputs = [fatoms, fbonds, agraph, bgraph]
return (graph_inputs, self.scope) |
def generate_rating_matrix_test(user_seq, num_users, num_items):
row = []
col = []
data = []
for (user_id, item_list) in enumerate(user_seq):
for item in item_list[:(- 1)]:
row.append(user_id)
col.append(item)
data.append(1)
row = np.array(row)
col = np.array(col)
data = np.array(data)
rating_matrix = csr_matrix((data, (row, col)), shape=(num_users, num_items))
return rating_matrix |
def replace_oov(x, oov_char, max_words):
return [(oov_char if (w >= max_words) else w) for w in x] |
def load_raw_spotting_predictions(saved_path: (Path | str), video_indexes: List[int], device: Any='cpu') -> Dict[(int, Dict[(int, Tensor)])]:
predictions = {video_index: None for video_index in video_indexes}
saved_path = Path(saved_path)
from_zip = zipfile.is_zipfile(saved_path)
if from_zip:
with zipfile.ZipFile(saved_path, 'r') as z:
for video_index in video_indexes:
with z.open(f'preds_video{video_index}.pth') as f:
predictions[video_index] = torch.load(f, map_location='cpu').to(device=device)
else:
for video_index in video_indexes:
predictions[video_index] = torch.load(str((saved_path / f'preds_video{video_index}.pth')), map_location='cpu').to(device=device)
return predictions |
def weights_init(module):
for m in module.children():
if (not init_std_modules(m)):
weights_init(m) |
def jitter(obj: Union[(bpy.types.Object, str)], translate_range: Tuple[Tuple[float]]=((0, 0), (0, 0), (0, 0)), rotate_range: Tuple[Tuple[float]]=((0, 0), (0, 0), (0, 0)), scale_range: Tuple[Tuple[float]]=((1.0, 1.0), (1.0, 1.0), (1.0, 1.0))) -> None:
obj = verify(obj)
translate(obj, translation=(random.uniform(translate_range[0][0], translate_range[0][1]), random.uniform(translate_range[1][0], translate_range[1][1]), random.uniform(translate_range[2][0], translate_range[2][1])))
rotate(obj, rotation=(random.uniform(rotate_range[0][0], rotate_range[0][1]), random.uniform(rotate_range[1][0], rotate_range[1][1]), random.uniform(rotate_range[2][0], rotate_range[2][1])))
scale(obj, scale=(random.uniform(scale_range[0][0], scale_range[0][1]), random.uniform(scale_range[1][0], scale_range[1][1]), random.uniform(scale_range[2][0], scale_range[2][1]))) |
class TransformerDecoderLayer(nn.Module):
def __init__(self, embed_dims, num_heads, feedforward_channels, dropout=0.0, order=('selfattn', 'norm', 'multiheadattn', 'norm', 'ffn', 'norm'), act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN'), num_fcs=2):
super(TransformerDecoderLayer, self).__init__()
assert (isinstance(order, tuple) and (len(order) == 6))
assert (set(order) == set(['selfattn', 'norm', 'multiheadattn', 'ffn']))
self.embed_dims = embed_dims
self.num_heads = num_heads
self.feedforward_channels = feedforward_channels
self.dropout = dropout
self.order = order
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.num_fcs = num_fcs
self.pre_norm = (order[0] == 'norm')
self.self_attn = MultiheadAttention(embed_dims, num_heads, dropout)
self.multihead_attn = MultiheadAttention(embed_dims, num_heads, dropout)
self.ffn = FFN(embed_dims, feedforward_channels, num_fcs, act_cfg, dropout)
self.norms = nn.ModuleList()
for _ in range(3):
self.norms.append(build_norm_layer(norm_cfg, embed_dims)[1])
def forward(self, x, memory, memory_pos=None, query_pos=None, memory_attn_mask=None, target_attn_mask=None, memory_key_padding_mask=None, target_key_padding_mask=None):
norm_cnt = 0
inp_residual = x
for layer in self.order:
if (layer == 'selfattn'):
query = key = value = x
x = self.self_attn(query, key, value, (inp_residual if self.pre_norm else None), query_pos, key_pos=query_pos, attn_mask=target_attn_mask, key_padding_mask=target_key_padding_mask)
inp_residual = x
elif (layer == 'norm'):
x = self.norms[norm_cnt](x)
norm_cnt += 1
elif (layer == 'multiheadattn'):
query = x
key = value = memory
x = self.multihead_attn(query, key, value, (inp_residual if self.pre_norm else None), query_pos, key_pos=memory_pos, attn_mask=memory_attn_mask, key_padding_mask=memory_key_padding_mask)
inp_residual = x
elif (layer == 'ffn'):
x = self.ffn(x, (inp_residual if self.pre_norm else None))
return x
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'feedforward_channels={self.feedforward_channels}, '
repr_str += f'dropout={self.dropout}, '
repr_str += f'order={self.order}, '
repr_str += f'act_cfg={self.act_cfg}, '
repr_str += f'norm_cfg={self.norm_cfg}, '
repr_str += f'num_fcs={self.num_fcs})'
return repr_str |
def _evaluatelinearPotentials(Pot, x, t=0.0):
if isinstance(Pot, list):
sum = 0.0
for pot in Pot:
sum += pot._call_nodecorator(x, t=t)
return sum
elif isinstance(Pot, linearPotential):
return Pot._call_nodecorator(x, t=t)
else:
raise PotentialError("Input to 'evaluatelinearPotentials' is neither a linearPotential-instance or a list of such instances") |
class VisdomPlotLogger(BaseVisdomLogger):
def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097, server='localhost', name=None, log_to_filename=None):
super(VisdomPlotLogger, self).__init__(fields, win, env, opts, port, server, log_to_filename)
valid_plot_types = {'scatter': self.viz.scatter, 'line': self.viz.line, 'stacked_line': self.viz.line}
self.plot_type = plot_type
if (plot_type not in valid_plot_types.keys()):
raise ValueError("plot_type '{}' not found. Must be one of {}".format(plot_type, valid_plot_types.keys()))
self.chart = valid_plot_types[plot_type]
def log(self, *args, **kwargs):
if ((self.win is not None) and self.viz.win_exists(win=self.win, env=self.env)):
if (len(args) != 2):
raise ValueError('When logging to {}, must pass in x and y values (and optionally z).'.format(type(self)))
(x, y) = args
(x, y) = ([x], [y])
if (self.plot_type == 'stacked_line'):
name = kwargs.pop('name')
for (i, (x, y)) in enumerate(zip(*args)):
self.chart(X=np.array([x]), Y=np.array([y]), update='append', name=self.opts['legend'][i], win=self.win, env=self.env, opts=self.update_opts, **kwargs)
else:
self.chart(X=np.array(x), Y=np.array(y), update='append', win=self.win, env=self.env, opts=self.opts, **kwargs)
else:
if (self.plot_type == 'scatter'):
chart_args = {'X': np.array([args])}
elif (self.plot_type == 'line'):
chart_args = {'X': np.array([args[0]]), 'Y': np.array([args[1]])}
elif (self.plot_type == 'stacked_line'):
chart_args = {'X': np.array([args[0]]), 'Y': np.array([args[1]])}
self.update_opts = {k: v for (k, v) in self.opts.items()}
self.update_opts.pop('legend')
else:
raise NotImplementedError('Plot type: {}'.format(self.plot_type))
self.win = self.chart(win=self.win, env=self.env, opts=self.opts, **chart_args)
self.log(*args, **kwargs) |
def scale_arr(arr, mode='minmax'):
if (mode == 'minmax'):
from sklearn.preprocessing import MinMaxScaler
scaled = MinMaxScaler().fit_transform(arr).astype('float32')
elif (mode == 'standard'):
from sklearn.preprocessing import StandardScaler
scaled = StandardScaler().fit_transform(arr).astype('float32')
else:
from bigdl.nano.utils.common import invalidInputError
invalidInputError(False, 'Unrecognized Mode')
return scaled |
def batchnorm(x):
(mean, variance) = tf.nn.moments(x, [0, 1, 2, 3])
return tf.nn.batch_normalization(x, mean=mean, variance=variance, offset=0, scale=1, variance_epsilon=1e-06) |
class Conv1DLayer(BaseConvLayer):
def __init__(self, incoming, num_filters, filter_size, stride=1, pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, flip_filters=True, convolution=conv.conv1d_mc0, **kwargs):
super(Conv1DLayer, self).__init__(incoming, num_filters, filter_size, stride, pad, untie_biases, W, b, nonlinearity, flip_filters, n=1, **kwargs)
self.convolution = convolution
def convolve(self, input, **kwargs):
border_mode = ('half' if (self.pad == 'same') else self.pad)
conved = self.convolution(input, self.W, self.input_shape, self.get_W_shape(), subsample=self.stride, border_mode=border_mode, filter_flip=self.flip_filters)
return conved |
class nnUNetTrainerV2_SGD_fixedSchedule2(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16)
def maybe_update_lr(self, epoch=None):
if (epoch is None):
ep = (self.epoch + 1)
else:
ep = epoch
if (0 <= ep < 500):
new_lr = self.initial_lr
elif (500 <= ep < 675):
new_lr = (self.initial_lr * 0.1)
elif (ep >= 675):
new_lr = poly_lr((ep - 675), (self.max_num_epochs - 675), (self.initial_lr * 0.1), 0.9)
else:
raise RuntimeError(('Really unexpected things happened, ep=%d' % ep))
self.optimizer.param_groups[0]['lr'] = new_lr
self.print_to_log_file('lr:', self.optimizer.param_groups[0]['lr']) |
def create_dataset_batch_queue(dataset):
from preprocessing import ssd_vgg_preprocessing
with tf.device('/cpu:0'):
with tf.name_scope((FLAGS.dataset_name + '_data_provider')):
provider = slim.dataset_data_provider.DatasetDataProvider(dataset, num_readers=FLAGS.num_readers, common_queue_capacity=(1000 * config.batch_size), common_queue_min=(700 * config.batch_size), shuffle=True)
[image, glabel, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get(['image', 'object/label', 'object/bbox', 'object/oriented_bbox/x1', 'object/oriented_bbox/x2', 'object/oriented_bbox/x3', 'object/oriented_bbox/x4', 'object/oriented_bbox/y1', 'object/oriented_bbox/y2', 'object/oriented_bbox/y3', 'object/oriented_bbox/y4'])
gxs = tf.transpose(tf.stack([x1, x2, x3, x4]))
gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
image = tf.identity(image, 'input_image')
(image, glabel, gbboxes, gxs, gys) = ssd_vgg_preprocessing.preprocess_image(image, glabel, gbboxes, gxs, gys, out_shape=config.train_image_shape, data_format=config.data_format, use_rotation=config.use_rotation, is_training=True)
image = tf.identity(image, 'processed_image')
(pixel_cls_label, pixel_cls_weight, pixel_link_label, pixel_link_weight) = pixel_link.tf_cal_gt_for_single_image(gxs, gys, glabel)
with tf.name_scope((FLAGS.dataset_name + '_batch')):
(b_image, b_pixel_cls_label, b_pixel_cls_weight, b_pixel_link_label, b_pixel_link_weight) = tf.train.batch([image, pixel_cls_label, pixel_cls_weight, pixel_link_label, pixel_link_weight], batch_size=config.batch_size_per_gpu, num_threads=FLAGS.num_preprocessing_threads, capacity=500)
with tf.name_scope((FLAGS.dataset_name + '_prefetch_queue')):
batch_queue = slim.prefetch_queue.prefetch_queue([b_image, b_pixel_cls_label, b_pixel_cls_weight, b_pixel_link_label, b_pixel_link_weight], capacity=50)
return batch_queue |
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if (args.multiprocessing_distributed and (args.gpu != 0)):
def print_pass(*args):
pass
builtins.print = print_pass
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
print("=> creating model '{}'".format(args.arch))
model = moco.builder.MoCo(Encoder)
print(model)
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
raise NotImplementedError('Only DistributedDataParallel is supported.')
else:
raise NotImplementedError('Only DistributedDataParallel is supported.')
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if (args.gpu is None):
checkpoint = torch.load(args.resume)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
DATASETS = {'ffhq_encode': {'transforms': EncodeTransforms, 'train_source_root': dataset_paths['ffhq_deg_q'], 'train_target_root': dataset_paths['ffhq_deg_k']}}
dataset_args = DATASETS['ffhq_encode']
transforms_dict = dataset_args['transforms']().get_transforms()
train_dataset = ImagesDataset(source_root=dataset_args['train_source_root'], target_root=dataset_args['train_target_root'], source_transform=transforms_dict['transform_source'], target_transform=transforms_dict['transform_gt_train'], opts=None)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
train(train_loader, model, criterion, optimizer, epoch, args)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, is_best=False, filename='checkpoint_{:04d}.pth.tar'.format(epoch)) |
class DensePASS13Segmentation(SegmentationDataset):
NUM_CLASS = 13
def __init__(self, root='datasets/DensePASS', split='val', mode=None, transform=None, fov=360, **kwargs):
super(DensePASS13Segmentation, self).__init__(root, split, mode, transform, **kwargs)
assert os.path.exists(self.root), 'Please put dataset in {SEG_ROOT}/datasets/DensePASS'
(self.images, self.mask_paths) = _get_city_pairs(self.root, self.split)
self.crop_size = [400, 2048]
assert (len(self.images) == len(self.mask_paths))
self.fov = fov
if (len(self.images) == 0):
raise RuntimeError((('Found 0 images in subfolders of:' + root) + '\n'))
self._key = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 12, 12, (- 1), 12, 12])
def _map19to13(self, mask):
values = np.unique(mask)
new_mask = np.zeros_like(mask)
new_mask -= 1
for value in values:
if (value == 255):
new_mask[(mask == value)] = (- 1)
else:
new_mask[(mask == value)] = self._key[value]
mask = new_mask
return mask
def _val_sync_transform_resize(self, img, mask):
(w, h) = img.size
(img, mask) = (self._img_transform(img), self._mask_transform(mask))
return (img, mask)
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if (self.mode == 'test'):
if (self.transform is not None):
img = self.transform(img)
return (img, os.path.basename(self.images[index]))
mask = Image.open(self.mask_paths[index])
if (self.mode == 'train'):
(img, mask) = self._sync_transform(img, mask, resize=True)
elif (self.mode == 'val'):
(img, mask) = self._val_sync_transform_resize(img, mask)
else:
assert (self.mode == 'testval')
(img, mask) = self._val_sync_transform_resize(img, mask)
if (self.transform is not None):
img = self.transform(img)
return (img, mask, os.path.basename(self.images[index]))
def _mask_transform(self, mask):
target = self._map19to13(np.array(mask).astype('int32'))
return torch.LongTensor(np.array(target).astype('int32'))
def __len__(self):
return len(self.images)
def pred_offset(self):
return 0
def classes(self):
return ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'car') |
def process_mmcif(mmcif_path: str, max_resolution: int, max_len: int, write_dir: str):
metadata = {}
mmcif_name = os.path.basename(mmcif_path).replace('.cif', '')
metadata['pdb_name'] = mmcif_name
mmcif_subdir = os.path.join(write_dir, mmcif_name[1:3].lower())
if (not os.path.isdir(mmcif_subdir)):
os.mkdir(mmcif_subdir)
processed_mmcif_path = os.path.join(mmcif_subdir, f'{mmcif_name}.pkl')
processed_mmcif_path = os.path.abspath(processed_mmcif_path)
metadata['processed_path'] = processed_mmcif_path
try:
with open(mmcif_path, 'r') as f:
parsed_mmcif = mmcif_parsing.parse(file_id=mmcif_name, mmcif_string=f.read())
except:
raise errors.FileExistsError(f'Error file do not exist {mmcif_path}')
metadata['raw_path'] = mmcif_path
if parsed_mmcif.errors:
raise errors.MmcifParsingError(f'Encountered errors {parsed_mmcif.errors}')
parsed_mmcif = parsed_mmcif.mmcif_object
raw_mmcif = parsed_mmcif.raw_string
if ('_pdbx_struct_assembly.oligomeric_count' in raw_mmcif):
raw_olig_count = raw_mmcif['_pdbx_struct_assembly.oligomeric_count']
oligomeric_count = ','.join(raw_olig_count).lower()
else:
oligomeric_count = None
if ('_pdbx_struct_assembly.oligomeric_details' in raw_mmcif):
raw_olig_detail = raw_mmcif['_pdbx_struct_assembly.oligomeric_details']
oligomeric_detail = ','.join(raw_olig_detail).lower()
else:
oligomeric_detail = None
metadata['oligomeric_count'] = oligomeric_count
metadata['oligomeric_detail'] = oligomeric_detail
mmcif_header = parsed_mmcif.header
mmcif_resolution = mmcif_header['resolution']
metadata['resolution'] = mmcif_resolution
metadata['structure_method'] = mmcif_header['structure_method']
if (mmcif_resolution >= max_resolution):
raise errors.ResolutionError(f'Too high resolution {mmcif_resolution}')
if (mmcif_resolution == 0.0):
raise errors.ResolutionError(f'Invalid resolution {mmcif_resolution}')
struct_chains = {chain.id.upper(): chain for chain in parsed_mmcif.structure.get_chains()}
metadata['num_chains'] = len(struct_chains)
struct_feats = []
all_seqs = set()
for (chain_id, chain) in struct_chains.items():
chain_id = du.chain_str_to_int(chain_id)
chain_prot = parsers.process_chain(chain, chain_id)
chain_dict = dataclasses.asdict(chain_prot)
chain_dict = du.parse_chain_feats(chain_dict)
all_seqs.add(tuple(chain_dict['aatype']))
struct_feats.append(chain_dict)
if (len(all_seqs) == 1):
metadata['quaternary_category'] = 'homomer'
else:
metadata['quaternary_category'] = 'heteromer'
complex_feats = du.concat_np_features(struct_feats, False)
complex_aatype = complex_feats['aatype']
modeled_idx = np.where((complex_aatype != 20))[0]
if (np.sum((complex_aatype != 20)) == 0):
raise errors.LengthError('No modeled residues')
min_modeled_idx = np.min(modeled_idx)
max_modeled_idx = np.max(modeled_idx)
metadata['seq_len'] = len(complex_aatype)
metadata['modeled_seq_len'] = ((max_modeled_idx - min_modeled_idx) + 1)
complex_feats['modeled_idx'] = modeled_idx
if (complex_aatype.shape[0] > max_len):
raise errors.LengthError(f'Too long {complex_aatype.shape[0]}')
try:
p = MMCIFParser()
struc = p.get_structure('', mmcif_path)
io = PDBIO()
io.set_structure(struc)
pdb_path = mmcif_path.replace('.cif', '.pdb')
io.save(pdb_path)
traj = md.load(pdb_path)
pdb_ss = md.compute_dssp(traj, simplified=True)
pdb_dg = md.compute_rg(traj)
os.remove(pdb_path)
except Exception as e:
os.remove(pdb_path)
raise errors.DataError(f'Mdtraj failed with error {e}')
chain_dict['ss'] = pdb_ss[0]
metadata['coil_percent'] = (np.sum((pdb_ss == 'C')) / metadata['modeled_seq_len'])
metadata['helix_percent'] = (np.sum((pdb_ss == 'H')) / metadata['modeled_seq_len'])
metadata['strand_percent'] = (np.sum((pdb_ss == 'E')) / metadata['modeled_seq_len'])
metadata['radius_gyration'] = pdb_dg[0]
du.write_pkl(processed_mmcif_path, complex_feats)
return metadata |
def test_crate():
gt_prefix = 'CrateModel'
(gt_data_root, gt_download_dir, gt_extract_dir) = get_test_data_dirs(gt_prefix)
crate = o3d.data.CrateModel()
assert Path(gt_download_dir).is_dir()
gt_path_map = {'crate_material': (Path(gt_extract_dir) / 'crate.mtl'), 'crate_model': (Path(gt_extract_dir) / 'crate.obj'), 'texture_image': (Path(gt_extract_dir) / 'crate.jpg')}
for file_name in crate.path_map:
assert (Path(crate.path_map[file_name]) == gt_path_map[file_name])
assert Path(crate.path_map[file_name]).is_file()
assert (Path(crate.path) == (gt_extract_dir / 'crate.obj'))
assert Path(crate.path).is_file()
assert (crate.prefix == gt_prefix)
assert (Path(crate.data_root) == gt_data_root)
assert (Path(crate.download_dir) == gt_download_dir)
assert (Path(crate.extract_dir) == gt_extract_dir) |
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
for (mod_type, counter_hook) in hook_mapping.items():
if issubclass(type(module), mod_type):
handle = module.register_forward_hook(counter_hook)
break
module.__flops_handle__ = handle |
def build_fake_yaml_footprint():
fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: x\n outputs: op_to_store\n device: cpu\n evaluation:\n accuracy:\n metric:\n topk: 1\n performance: {}\n tuning:\n objective: footprint\n strategy:\n name: fake\n accuracy_criterion:\n relative: 0.01\n workspace:\n path: saved\n '
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml_footprint.yaml', 'w', encoding='utf-8') as f:
yaml.dump(y, f)
f.close() |
def get_input_data_amount(name: available_models, l: str) -> list[int]:
if (name in ['resnet-50', 'resnet18']):
layer_loc = l.split('.', maxsplit=1)[0]
rows_adapted = []
if (layer_loc in ['layer1']):
rows_adapted = [1, 2, 4, 8]
elif (layer_loc == 'layer2'):
rows_adapted = [2, 4, 8, 16]
elif (layer_loc == 'layer3'):
rows_adapted = [8, 16, 32, 64]
elif (layer_loc == 'layer4'):
rows_adapted = [32, 64, 128, 256]
return rows_adapted
else:
raise Exception('Model name not supported: ', name) |
def next_varbprec_solution(wanted, maxprec, maxit, verbose):
from phcpy.phcpy2c3 import py2c_next_varbprec_solution
sol = py2c_next_varbprec_solution(wanted, maxprec, maxit, verbose)
return sol |
def show_ae(autoencoder):
encoder = autoencoder.Encoder()
decoder = autoencoder.Decoder()
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
n = 10
plt.figure(figsize=(20, 6))
for i in range(n):
ax = plt.subplot(3, n, (i + 1))
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, ((i + 1) + n))
plt.stem(encoded_imgs[i].reshape((- 1)))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, (((i + 1) + n) + n))
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() |
class GazeboEnv(gym.Env):
def __init__(self, ns: str, reward_fnc: str, is_action_space_discrete, safe_dist: float=None, goal_radius: float=0.1, max_steps_per_episode=100, train_mode: bool=True, debug: bool=False, task_mode: str='staged', PATHS: dict=dict(), extended_eval: bool=False, *args, **kwargs):
super(GazeboEnv, self).__init__()
self.ns = ns
try:
ns_int = int(ns.split('_')[1])
time.sleep((ns_int * 2))
except Exception:
rospy.logwarn(f"Can't not determinate the number of the environment, training script may crash!")
self.ns_prefix = ('' if ((ns == '') or (ns is None)) else (('/' + ns) + '/'))
if (not debug):
if train_mode:
rospy.init_node(f'train_env_{self.ns}', disable_signals=False)
else:
rospy.init_node(f'eval_env_{self.ns}', disable_signals=False)
self._extended_eval = extended_eval
self._is_train_mode = rospy.get_param('/train_mode')
self._is_action_space_discrete = is_action_space_discrete
self.setup_by_configuration(PATHS['robot_setting'], PATHS['robot_as'])
self.unpause = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
self.pause = rospy.ServiceProxy('/gazebo/pause_physics', Empty)
self.observation_collector = ObservationCollector(self.ns, self._laser_num_beams, self._laser_max_range, external_time_sync=True)
self.observation_space = self.observation_collector.get_observation_space()
if (safe_dist is None):
safe_dist = (1.6 * self._robot_radius)
self.reward_calculator = RewardCalculator(robot_radius=self._robot_radius, safe_dist=(1.6 * self._robot_radius), goal_radius=goal_radius, rule=reward_fnc, extended_eval=self._extended_eval)
if self._is_train_mode:
self.agent_action_pub = rospy.Publisher(f'{self.ns_prefix}cmd_vel', Twist, queue_size=1)
else:
self.agent_action_pub = rospy.Publisher(f'{self.ns_prefix}cmd_vel_pub', Twist, queue_size=1)
if self._is_train_mode:
self._service_name_step = f'{self.ns_prefix}step_world'
self.task = get_predefined_task(ns, mode=task_mode, start_stage=kwargs['curr_stage'], PATHS=PATHS)
self._steps_curr_episode = 0
self._max_steps_per_episode = max_steps_per_episode
self._action_frequency = (1 / rospy.get_param('/robot_action_rate'))
self._last_robot_pose = None
self._distance_travelled = 0
self._safe_dist_counter = 0
self._collisions = 0
self._in_crash = False
def setup_by_configuration(self, robot_xml_path: str, settings_yaml_path: str):
self._robot_radius = rospy.get_param('radius')
self._laser_num_beams = rospy.get_param('laser_beams')
self._laser_max_range = rospy.get_param('laser_range')
with open(settings_yaml_path, 'r') as fd:
setting_data = yaml.safe_load(fd)
if self._is_action_space_discrete:
self._discrete_acitons = setting_data['robot']['discrete_actions']
self.action_space = spaces.Discrete(len(self._discrete_acitons))
else:
linear_range = setting_data['robot']['continuous_actions']['linear_range']
angular_range = setting_data['robot']['continuous_actions']['angular_range']
self.action_space = spaces.Box(low=np.array([linear_range[0], angular_range[0]]), high=np.array([linear_range[1], angular_range[1]]), dtype=np.float)
def _pub_action(self, action):
action_msg = Twist()
action_msg.linear.x = action[0]
action_msg.angular.z = action[1]
self.agent_action_pub.publish(action_msg)
def _translate_disc_action(self, action):
new_action = np.array([])
new_action = np.append(new_action, self._discrete_acitons[action]['linear'])
new_action = np.append(new_action, self._discrete_acitons[action]['angular'])
return new_action
def step(self, action):
if self._is_action_space_discrete:
action = self._translate_disc_action(action)
self._pub_action(action)
self._steps_curr_episode += 1
rospy.wait_for_service('/gazebo/unpause_physics')
try:
self.unpause()
except rospy.ServiceException as e:
print('/gazebo/unpause_physics service call failed')
(merged_obs, obs_dict) = self.observation_collector.get_observations()
rospy.wait_for_service('/gazebo/pause_physics')
try:
self.pause()
except rospy.ServiceException as e:
print('/gazebo/pause_physics service call failed')
(reward, reward_info) = self.reward_calculator.get_reward(obs_dict['laser_scan'], obs_dict['goal_in_robot_frame'], action=action, global_plan=obs_dict['global_plan'], robot_pose=obs_dict['robot_pose'])
done = reward_info['is_done']
if self._extended_eval:
self._update_eval_statistics(obs_dict, reward_info)
info = {}
if done:
info['done_reason'] = reward_info['done_reason']
info['is_success'] = reward_info['is_success']
if (self._steps_curr_episode > self._max_steps_per_episode):
done = True
info['done_reason'] = 0
info['is_success'] = 0
if (self._extended_eval and done):
info['collisions'] = self._collisions
info['distance_travelled'] = round(self._distance_travelled, 2)
info['time_safe_dist'] = (self._safe_dist_counter * self._action_frequency)
info['time'] = (self._steps_curr_episode * self._action_frequency)
return (merged_obs, reward, done, info)
def reset(self):
rospy.wait_for_service('/gazebo/unpause_physics')
try:
self.unpause()
except rospy.ServiceException as e:
print('/gazebo/unpause_physics service call failed')
self.agent_action_pub.publish(Twist())
self.task.reset()
self.reward_calculator.reset()
self._steps_curr_episode = 0
if self._extended_eval:
self._last_robot_pose = None
self._distance_travelled = 0
self._safe_dist_counter = 0
self._collisions = 0
(obs, _) = self.observation_collector.get_observations()
rospy.wait_for_service('/gazebo/pause_physics')
try:
self.pause()
except rospy.ServiceException as e:
print('/gazebo/pause_physics service call failed')
return obs
def close(self):
pass
def _update_eval_statistics(self, obs_dict: dict, reward_info: dict):
if (self._last_robot_pose is not None):
self._distance_travelled += GazeboEnv.get_distance(self._last_robot_pose, obs_dict['robot_pose'])
if ('crash' in reward_info):
if (reward_info['crash'] and (not self._in_crash)):
self._collisions += 1
self._in_crash = True
else:
self._in_crash = False
if (('safe_dist' in reward_info) and reward_info['safe_dist']):
self._safe_dist_counter += 1
self._last_robot_pose = obs_dict['robot_pose']
def get_distance(pose_1: Pose2D, pose_2: Pose2D):
return math.hypot((pose_2.x - pose_1.x), (pose_2.y - pose_1.y)) |
_SAMPLERS.register_module()
class OHEMPixelSampler(BasePixelSampler):
def __init__(self, context, thresh=None, min_kept=100000):
super(OHEMPixelSampler, self).__init__()
self.context = context
assert (min_kept > 1)
self.thresh = thresh
self.min_kept = min_kept
def sample(self, seg_logit, seg_label):
with torch.no_grad():
assert (seg_logit.shape[2:] == seg_label.shape[2:])
assert (seg_label.shape[1] == 1)
seg_label = seg_label.squeeze(1).long()
batch_kept = (self.min_kept * seg_label.size(0))
valid_mask = (seg_label != self.context.ignore_index)
seg_weight = seg_logit.new_zeros(size=seg_label.size())
valid_seg_weight = seg_weight[valid_mask]
if (self.thresh is not None):
seg_prob = F.softmax(seg_logit, dim=1)
tmp_seg_label = seg_label.clone().unsqueeze(1)
tmp_seg_label[(tmp_seg_label == self.context.ignore_index)] = 0
seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1)
(sort_prob, sort_indices) = seg_prob[valid_mask].sort()
if (sort_prob.numel() > 0):
min_threshold = sort_prob[min(batch_kept, (sort_prob.numel() - 1))]
else:
min_threshold = 0.0
threshold = max(min_threshold, self.thresh)
valid_seg_weight[(seg_prob[valid_mask] < threshold)] = 1.0
else:
if (not isinstance(self.context.loss_decode, nn.ModuleList)):
losses_decode = [self.context.loss_decode]
else:
losses_decode = self.context.loss_decode
losses = 0.0
for loss_module in losses_decode:
losses += loss_module(seg_logit, seg_label, weight=None, ignore_index=self.context.ignore_index, reduction_override='none')
(_, sort_indices) = losses[valid_mask].sort(descending=True)
valid_seg_weight[sort_indices[:batch_kept]] = 1.0
seg_weight[valid_mask] = valid_seg_weight
return seg_weight |
def initialize_models(params: dict, vocab: Set[str], batch_first: bool, unk_token='UNK'):
if ('embedding_file' in params['embeddings']):
(embeddings, word_interner, de_interner) = extract_embeddings(vocab, params['embeddings']['embedding_file'], unk_token=unk_token)
if torch.cuda.is_available():
embeddings = embeddings.cuda()
else:
raise ValueError("No 'embedding_file' found in params!")
word_embedder = WordEmbedder(embeddings, params['embeddings']['dropout'])
query_encoder = RNNEncoder(word_embedder, batch_first=batch_first, condition=False, attention_mechanism=BahadanauAttention(word_embedder.output_dimension))
document_encoder = RNNEncoder(word_embedder, batch_first=batch_first, condition=True, attention_mechanism=BahadanauAttention(word_embedder.output_dimension, query_size=query_encoder.output_dimension))
evidence_identifier = AttentiveClassifier(document_encoder, query_encoder, 2, params['evidence_identifier']['mlp_size'], params['evidence_identifier']['dropout'])
query_encoder = RNNEncoder(word_embedder, batch_first=batch_first, condition=False, attention_mechanism=BahadanauAttention(word_embedder.output_dimension))
document_encoder = RNNEncoder(word_embedder, batch_first=batch_first, condition=True, attention_mechanism=BahadanauAttention(word_embedder.output_dimension, query_size=query_encoder.output_dimension))
evidence_classes = dict(((y, x) for (x, y) in enumerate(params['evidence_classifier']['classes'])))
evidence_classifier = AttentiveClassifier(document_encoder, query_encoder, len(evidence_classes), params['evidence_classifier']['mlp_size'], params['evidence_classifier']['dropout'])
return (evidence_identifier, evidence_classifier, word_interner, de_interner, evidence_classes) |
_ENCODERS.register_module()
class SparseEncoder(nn.Module):
def __init__(self, in_channels, sparse_shape, order=('conv', 'norm', 'act'), norm_cfg=dict(type='BN1d', eps=0.001, momentum=0.01), base_channels=16, output_channels=128, encoder_channels=((16,), (32, 32, 32), (64, 64, 64), (64, 64, 64)), encoder_paddings=((1,), (1, 1, 1), (1, 1, 1), ((0, 1, 1), 1, 1)), block_type='conv_module'):
super().__init__()
assert (block_type in ['conv_module', 'basicblock'])
self.sparse_shape = sparse_shape
self.in_channels = in_channels
self.order = order
self.base_channels = base_channels
self.output_channels = output_channels
self.encoder_channels = encoder_channels
self.encoder_paddings = encoder_paddings
self.stage_num = len(self.encoder_channels)
self.fp16_enabled = False
assert (isinstance(order, tuple) and (len(order) == 3))
assert (set(order) == {'conv', 'norm', 'act'})
if (self.order[0] != 'conv'):
self.conv_input = make_sparse_convmodule(in_channels, self.base_channels, 3, norm_cfg=norm_cfg, padding=1, indice_key='subm1', conv_type='SubMConv3d', order=('conv',))
else:
self.conv_input = make_sparse_convmodule(in_channels, self.base_channels, 3, norm_cfg=norm_cfg, padding=1, indice_key='subm1', conv_type='SubMConv3d')
encoder_out_channels = self.make_encoder_layers(make_sparse_convmodule, norm_cfg, self.base_channels, block_type=block_type)
self.conv_out = make_sparse_convmodule(encoder_out_channels, self.output_channels, kernel_size=(3, 1, 1), stride=(2, 1, 1), norm_cfg=norm_cfg, padding=0, indice_key='spconv_down2', conv_type='SparseConv3d')
_fp16(apply_to=('voxel_features',))
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
input_sp_tensor = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape, batch_size)
x = self.conv_input(input_sp_tensor)
encode_features = []
for encoder_layer in self.encoder_layers:
x = encoder_layer(x)
encode_features.append(x)
out = self.conv_out(encode_features[(- 1)])
spatial_features = out.dense()
(N, C, D, H, W) = spatial_features.shape
spatial_features = spatial_features.view(N, (C * D), H, W)
return spatial_features
def make_encoder_layers(self, make_block, norm_cfg, in_channels, block_type='conv_module', conv_cfg=dict(type='SubMConv3d')):
assert (block_type in ['conv_module', 'basicblock'])
self.encoder_layers = spconv.SparseSequential()
for (i, blocks) in enumerate(self.encoder_channels):
blocks_list = []
for (j, out_channels) in enumerate(tuple(blocks)):
padding = tuple(self.encoder_paddings[i])[j]
if ((i != 0) and (j == 0) and (block_type == 'conv_module')):
blocks_list.append(make_block(in_channels, out_channels, 3, norm_cfg=norm_cfg, stride=2, padding=padding, indice_key=f'spconv{(i + 1)}', conv_type='SparseConv3d'))
elif (block_type == 'basicblock'):
if ((j == (len(blocks) - 1)) and (i != (len(self.encoder_channels) - 1))):
blocks_list.append(make_block(in_channels, out_channels, 3, norm_cfg=norm_cfg, stride=2, padding=padding, indice_key=f'spconv{(i + 1)}', conv_type='SparseConv3d'))
else:
blocks_list.append(SparseBasicBlock(out_channels, out_channels, norm_cfg=norm_cfg, conv_cfg=conv_cfg))
else:
blocks_list.append(make_block(in_channels, out_channels, 3, norm_cfg=norm_cfg, padding=padding, indice_key=f'subm{(i + 1)}', conv_type='SubMConv3d'))
in_channels = out_channels
stage_name = f'encoder_layer{(i + 1)}'
stage_layers = spconv.SparseSequential(*blocks_list)
self.encoder_layers.add_module(stage_name, stage_layers)
return out_channels |
def exp_rampup(rampup_length):
'Exponential rampup from
def warpper(epoch):
if (epoch < rampup_length):
epoch = np.clip(epoch, 0.0, rampup_length)
phase = (1.0 - (epoch / rampup_length))
return float(np.exp((((- 5.0) * phase) * phase)))
else:
return 1.0
return warpper |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.