code stringlengths 101 5.91M |
|---|
def _get_base_dist(dist):
if isinstance(dist, Independent):
return _get_base_dist(dist.base_dist)
else:
return dist |
class InferenceOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
src_input_desc = '\n All source paths and it supports multiple paths, uses "|" as the separator between all paths. \n The format is "src_path_1|src_path_2|src_path_3". \n Each src_input is "path?=path1,name?=name1,bg_path?=bg_path1". \n It must contain \'path\'. If \'name\' and \'bg_path\' are empty, they will be ignored.\n \n The \'path\' could be an image path, a path of a directory contains source images, and a video path.\n \n The \'name\' is the rename of this source input, if it is empty, we will ignore it, \n and use the filename of the path.\n \n The \'bg_path\' is the actual background path if provided, otherwise we will ignore it.\n \n There are several examples of formated source paths,\n \n 1. "path?=path1,name?=name1,bg_path?=bg_path1|path?=path2,name?=name2,bg_path?=bg_path2", \n this input will be parsed as [{path: path1, name: name1, bg_path:bg_path1}, \n {path: path2, name: name2, bg_path: bg_path2}];\n \n 2. "path?=path1,name?=name1|path?=path2,name?=name2", this input will be parsed as \n [{path: path1, name:name1}, {path: path2, name: name2}];\n \n 3. "path?=path1", this input will be parsed as [{path: path1}]. \n \n 4. "path1", this will be parsed as [{path: path1}].\n '
self._parser.add_argument('--src_path', type=str, required=True, help=src_input_desc)
ref_input_desc = '\n All reference paths. It supports multiple paths, and uses "|" as the separator between all paths. \n The format is "ref_path_1|ref_path_2|ref_path_3". \n Each ref_path is "path?=path1,name?=name1,audio?=audio_path1,fps?=30,pose_fc?=400,cam_fc?=150".\n It must contain \'path\', and others could be empty, and they will be ignored.\n \n The \'path\' could be an image path, a path of a directory contains source images, and a video path.\n \n The \'name\' is the rename of this source input, if it is empty, we will ignore it,\n and use the filename of the path.\n \n The \'audio\' is the audio path, if it is empty, we will ignore it.\n \n The \'fps\' is fps of the final outputs, if it is empty, we will set it as the default fps 25.\n \n The \'pose_fc\' is the smooth factor of the temporal poses. The smaller of this value, the smoother of the \n temporal poses. If it is empty, we will set it as the default 400. In the most cases, using the default\n 400 is enough, and if you find the poses of the outputs are not stable, you can decrease this value.\n Otherwise, if you find the poses of the outputs are over stable, you can increase this value.\n \n The \'cam_fc\' is the smooth factor of the temporal cameras (locations in the image space). The smaller of\n this value, the smoother of the locations in sequences. If it is empty, we will set it as the default 150.\n In the most cases, the default 150 is enough.\n \n 1. "path?=path1,name?=name1,audio?=audio_path1,fps?=30,pose_fc?=400,cam_fc?=150|\n path?=path2,name?=name2,audio?=audio_path2,fps?=25,pose_fc?=450,cam_fc?=200", \n this input will be parsed as \n [{path: path1, name: name1, audio: audio_path1, fps: 30, pose_fc: 400, cam_fc: 150},\n {path: path2, name: name2, audio: audio_path2, fps: 25, pose_fc: 450, cam_fc: 200}]\n \n 2. "path?=path1,name?=name1, pose_fc?=450|path?=path2,name?=name2", this input will be parsed as \n [{path: path1, name: name1, fps: 25, pose_fc: 450, cam_fc: 150}, \n {path: path2, name: name2, fps: 25, pose_fc: 400, cam_fc: 150}]. \n \n 3. "path?=path1|path?=path2", this input will be parsed as \n [{path: path1, fps:25, pose_fc: 400, cam_fc: 150}, {path: path2, fps: 25, pose_fc: 400, cam_fc: 150}].\n \n 4. "path1|path2", this input will be parsed as\n [{path: path1, fps:25, pose_fc: 400, cam_fc: 150}, {path: path2, fps: 25, pose_fc: 400, cam_fc: 150}].\n \n 5. "path1", this will be parsed as [{path: path1, fps: 25, pose_fc: 400, cam_fc: 150}].\n\n '
self._parser.add_argument('--ref_path', type=str, help=ref_input_desc)
self._parser.add_argument('--has_personalize', action='store_true', help='has personalization or not.')
self._parser.add_argument('--T_pose', action='store_true', default=False, help='view as T pose or not in human novel view synthesis.')
self._parser.add_argument('--ip', type=str, default='', help='visdom ip')
self._parser.add_argument('--port', type=int, default=31150, help='visdom port')
self.is_train = False |
def sort_sp_doc_ids(doc1_id, doc1, doc2_id, doc2, answer, id, alt_doc1=None, alt_doc2=None):
_answer = re.escape(answer)
_answer_lower = _answer.lower()
ans_in_lower = False
a = re.search('({})'.format((((('(?<!([A-Za-z]))' + _answer) + '(?=(') + '|'.join([re.escape(t) for t in SHORT_PUNCT])) + '))')), doc1)
b = re.search('({})'.format((((('(?<!([A-Za-z]))' + _answer) + '(?=(') + '|'.join([re.escape(t) for t in SHORT_PUNCT])) + '))')), doc2)
_pass = False
if ((a is None) and (b is None)):
a = re.search('({})'.format((((('(?<!([A-Za-z]))' + _answer_lower) + '(?=(') + '|'.join([re.escape(t) for t in SHORT_PUNCT])) + '))')), doc1.lower())
b = re.search('({})'.format((((('(?<!([A-Za-z]))' + _answer_lower) + '(?=(') + '|'.join([re.escape(t) for t in SHORT_PUNCT])) + '))')), doc2.lower())
if (a or b):
ans_in_lower = True
_pass = True
elif (alt_doc1 and alt_doc2):
a = re.search('({})'.format((((('(?<!([A-Za-z]))' + _answer) + '(?=(') + '|'.join([re.escape(t) for t in SHORT_PUNCT])) + '))')), alt_doc1)
b = re.search('({})'.format((((('(?<!([A-Za-z]))' + _answer) + '(?=(') + '|'.join([re.escape(t) for t in SHORT_PUNCT])) + '))')), alt_doc2)
if (a or b):
_pass = True
else:
a = re.search('({})'.format((((('(?<!([A-Za-z]))' + _answer_lower) + '(?=(') + '|'.join([re.escape(t) for t in SHORT_PUNCT])) + '))')), alt_doc1.lower())
b = re.search('({})'.format((((('(?<!([A-Za-z]))' + _answer_lower) + '(?=(') + '|'.join([re.escape(t) for t in SHORT_PUNCT])) + '))')), alt_doc2.lower())
if (a or b):
ans_in_lower = True
_pass = True
else:
_pass = True
if (_pass is False):
a = re.search('({})'.format(_answer), doc1)
b = re.search('({})'.format(_answer), doc2)
if ((a is None) and (b is None)):
a = re.search('({})'.format(_answer_lower), doc1.lower())
b = re.search('({})'.format(_answer_lower), doc2.lower())
if (a or b):
ans_in_lower = True
elif ((alt_doc1 is None) or (alt_doc2 is None)):
assert False, (answer, id)
else:
a = re.search('({})'.format(_answer), alt_doc1)
b = re.search('({})'.format(_answer), alt_doc2)
if ((a is None) and (b is None)):
a = re.search('({})'.format(_answer_lower), alt_doc1.lower())
b = re.search('({})'.format(_answer_lower), alt_doc2.lower())
if (a or b):
ans_in_lower = True
else:
assert False, (answer, id)
if (a is None):
return (doc1_id, doc2_id, 1, ans_in_lower)
else:
return (doc2_id, doc1_id, (1 if (b is None) else 2), ans_in_lower) |
def train_rcnn(cfg, dataset, image_set, root_path, dataset_path, frequent, kvstore, flip, shuffle, resume, ctx, pretrained, epoch, prefix, begin_epoch, end_epoch, train_shared, lr, lr_step, proposal, logger=None, output_path=None):
mx.random.seed(3)
np.random.seed(3)
if (not logger):
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
sym_instance = eval(((cfg.symbol + '.') + cfg.symbol))()
sym = sym_instance.get_symbol_rcnn(cfg, is_train=True)
batch_size = len(ctx)
input_batch_size = (cfg.TRAIN.BATCH_IMAGES * batch_size)
pprint.pprint(cfg)
logger.info('training rcnn cfg:{}\n'.format(pprint.pformat(cfg)))
image_sets = [iset for iset in image_set.split('+')]
roidbs = [load_proposal_roidb(dataset, image_set, root_path, dataset_path, proposal=proposal, append_gt=True, flip=flip, result_path=output_path) for image_set in image_sets]
roidb = merge_roidb(roidbs)
roidb = filter_roidb(roidb, cfg)
(means, stds) = add_bbox_regression_targets(roidb, cfg)
train_data = ROIIter(roidb, cfg, batch_size=input_batch_size, shuffle=shuffle, ctx=ctx, aspect_grouping=cfg.TRAIN.ASPECT_GROUPING)
max_data_shape = [('data', (cfg.TRAIN.BATCH_IMAGES, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))]
data_shape_dict = dict((train_data.provide_data_single + train_data.provide_label_single))
sym_instance.infer_shape(data_shape_dict)
if resume:
print('continue training from ', begin_epoch)
(arg_params, aux_params) = load_param(prefix, begin_epoch, convert=True)
else:
(arg_params, aux_params) = load_param(pretrained, epoch, convert=True)
sym_instance.init_weight_rcnn(cfg, arg_params, aux_params)
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict)
data_names = [k[0] for k in train_data.provide_data_single]
label_names = [k[0] for k in train_data.provide_label_single]
if train_shared:
fixed_param_prefix = cfg.network.FIXED_PARAMS_SHARED
else:
fixed_param_prefix = cfg.network.FIXED_PARAMS
mod = MutableModule(sym, data_names=data_names, label_names=label_names, logger=logger, context=ctx, max_data_shapes=[max_data_shape for _ in range(batch_size)], fixed_param_prefix=fixed_param_prefix)
if cfg.TRAIN.RESUME:
mod._preload_opt_states = ('%s-%04d.states' % (prefix, begin_epoch))
eval_metric = metric.RCNNAccMetric(cfg)
cls_metric = metric.RCNNLogLossMetric(cfg)
bbox_metric = metric.RCNNL1LossMetric(cfg)
eval_metrics = mx.metric.CompositeEvalMetric()
for child_metric in [eval_metric, cls_metric, bbox_metric]:
eval_metrics.add(child_metric)
batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=frequent)
epoch_end_callback = [mx.callback.module_checkpoint(mod, prefix, period=1, save_optimizer_states=True), callback.do_checkpoint(prefix, means, stds)]
base_lr = lr
lr_factor = cfg.TRAIN.lr_factor
lr_epoch = [float(epoch) for epoch in lr_step.split(',')]
lr_epoch_diff = [(epoch - begin_epoch) for epoch in lr_epoch if (epoch > begin_epoch)]
lr = (base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff))))
lr_iters = [int(((epoch * len(roidb)) / batch_size)) for epoch in lr_epoch_diff]
print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters)
lr_scheduler = WarmupMultiFactorScheduler(lr_iters, lr_factor, cfg.TRAIN.warmup, cfg.TRAIN.warmup_lr, cfg.TRAIN.warmup_step)
optimizer_params = {'momentum': cfg.TRAIN.momentum, 'wd': cfg.TRAIN.wd, 'learning_rate': lr, 'lr_scheduler': lr_scheduler, 'rescale_grad': 1.0, 'clip_gradient': None}
if (not isinstance(train_data, PrefetchingIter)):
train_data = PrefetchingIter(train_data)
mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback, batch_end_callback=batch_end_callback, kvstore=kvstore, optimizer='sgd', optimizer_params=optimizer_params, arg_params=arg_params, aux_params=aux_params, begin_epoch=begin_epoch, num_epoch=end_epoch) |
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets) |
def test_digits_cosine_two_stage_sparse():
model = SaturatedCoverageSelection(100, 'precomputed', optimizer='two-stage')
model.fit(X_digits_cosine_sparse)
assert_array_equal(model.ranking, digits_cosine_ranking)
assert_array_almost_equal(model.gains, digits_cosine_gains, 4) |
class TestA1Slam(GtsamTestCase):
def setUp(self):
rospy.init_node('test_node', anonymous=True)
rospy.loginfo('Initialized test_node')
imu_topic = rospy.get_param('/imu/topic')
self.imu_pub = rospy.Publisher(imu_topic, HighState)
rospy.Subscriber('/pose_estimate', PoseStamped, self.poses_callback)
rospy.Subscriber('/traj_estimate', Path, self.traj_callback)
rospy.wait_for_service('get_results_service')
self.send_results = rospy.ServiceProxy('get_results_service', GetResults)
rospy.wait_for_service('reset_results_service')
self.reset_results = rospy.ServiceProxy('reset_results_service', GetResults)
rospy.wait_for_service('reset_imu_service')
self.reset_imu = rospy.ServiceProxy('reset_imu_service', GetResults)
self.poses = gtsam.Values()
self.traj = gtsam.Values()
rospy.sleep(1)
def publish_msgs(self, publisher, msgs, rate):
ros_rate = rospy.Rate(rate)
for msg in msgs:
publisher.publish(msg)
ros_rate.sleep()
def process_pose_msg(self, msg: PoseStamped):
rotation = gtsam.Rot3.Quaternion(msg.pose.orientation.w, msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z)
translation = gtsam.Point3(msg.pose.position.x, msg.pose.position.y, msg.pose.position.z)
pose = gtsam.Pose3(rotation, translation)
return pose
def poses_callback(self, msg: PoseStamped):
pose = self.process_pose_msg(msg)
self.poses.insert(X(self.poses.size()), pose)
def traj_callback(self, msg: Path):
new_traj = gtsam.Values()
for pose_msg in msg.poses:
pose = self.process_pose_msg(pose_msg)
new_traj.insert(X(new_traj.size()), pose)
self.traj = new_traj
def generate_no_acceleration_imu_data(self):
for _ in range(501):
highstate = HighState()
highstate.header.stamp = rospy.Time.now()
highstate.imu.accelerometer = [0, 0, 9.81]
highstate.imu.gyroscope = [0, 0, 0]
(yield highstate)
def generate_acceleration_imu_data(self):
for _ in range(501):
highstate = HighState()
highstate.header.stamp = rospy.Time.now()
highstate.imu.accelerometer = [1, 0, 9.81]
highstate.imu.gyroscope = [0, 0, 0]
(yield highstate)
def test_stationary_imu_poses(self):
self.reset_results()
self.reset_imu()
self.publish_msgs(self.imu_pub, self.generate_no_acceleration_imu_data(), 100)
expected_values = gtsam.Values()
expected_poses = gtsam.Values()
for i in range(6):
expected_values.insert(X(i), gtsam.Pose3())
expected_poses.insert(X(i), gtsam.Pose3())
expected_values.insert(V(i), gtsam.Point3(0, 0, 0))
expected_values.insert(B(0), gtsam.imuBias.ConstantBias())
rospy.sleep(1)
response = self.send_results()
actual = gtsam.Values()
actual.deserialize(response.results)
self.gtsamAssertEquals(actual, expected_values, 0.005)
self.gtsamAssertEquals(self.poses, expected_poses, 0.005)
self.gtsamAssertEquals(self.traj, expected_poses, 0.005)
self.poses.clear()
self.traj.clear()
def test_acceleration_imu_poses(self):
self.reset_results()
self.reset_imu()
self.publish_msgs(self.imu_pub, self.generate_acceleration_imu_data(), 100)
expected_values = gtsam.Values()
expected_poses = gtsam.Values()
for i in range(6):
expected_values.insert(X(i), gtsam.Pose3(gtsam.Pose2((0.5 * (i ** 2)), 0.0, 0.0)))
expected_poses.insert(X(i), gtsam.Pose3(gtsam.Pose2((0.5 * (i ** 2)), 0.0, 0.0)))
expected_values.insert(V(i), gtsam.Point3(i, 0, 0))
expected_values.insert(B(0), gtsam.imuBias.ConstantBias())
rospy.sleep(1)
response = self.send_results()
actual = gtsam.Values()
actual.deserialize(response.results)
self.gtsamAssertEquals(actual, expected_values, 0.005)
self.gtsamAssertEquals(self.poses, expected_poses, 0.005)
self.gtsamAssertEquals(self.traj, expected_poses, 0.005)
self.poses.clear()
self.traj.clear() |
_module()
class ResNeSt(ResNetV1d):
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)), 200: (Bottleneck, (3, 24, 36, 3))}
def __init__(self, groups=1, base_width=4, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs):
self.groups = groups
self.base_width = base_width
self.radix = radix
self.reduction_factor = reduction_factor
self.avg_down_stride = avg_down_stride
super(ResNeSt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
return ResLayer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, radix=self.radix, reduction_factor=self.reduction_factor, avg_down_stride=self.avg_down_stride, **kwargs) |
def validate_limit_model_concurrency(value):
if (value >= 0):
return value
else:
raise argparse.ArgumentTypeError('Limit model concurrency must be a non-negative integer.') |
class WeightedSequenceTagger(SequenceTagger):
def _calculate_loss(self, features: torch.tensor, sentences: List[Sentence]) -> float:
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
tag_list: List = []
weight_list: List[float] = []
for (s_id, sentence) in enumerate(sentences):
tag_idx: List[int] = [self.tag_dictionary.get_idx_for_item(token.get_tag(self.tag_type).value) for token in sentence]
tag = torch.tensor(tag_idx, device=flair.device)
tag_list.append(tag)
try:
weight = sentence.tokens[0].get_tag('weight').value
weight_list.append(float(weight))
except:
weight_list.append(1.0)
if self.use_crf:
(tags, _) = pad_tensors(tag_list)
forward_score = self._forward_alg(features, lengths)
gold_score = self._score_sentence(features, tags, lengths)
score = (forward_score - gold_score)
weight_list = torch.tensor(weight_list, device=flair.device)
score = (score * weight_list)
return score.mean()
else:
score = 0
for (sentence_feats, sentence_tags, sentence_length) in zip(features, tag_list, lengths):
sentence_feats = sentence_feats[:sentence_length]
score += torch.nn.functional.cross_entropy(sentence_feats, sentence_tags)
score /= len(features)
return score |
class TestLite(TestCase):
def setUp(self):
test_dir = os.path.dirname(__file__)
project_test_dir = os.path.abspath(os.path.join(test_dir, '..', '..', '..', '..', '..'))
os.environ['PYTHONPATH'] = project_test_dir
def test_torch_nano(self):
model = ResNet18(10, pretrained=False, include_top=False, freeze=True)
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
train_loader = create_data_loader(data_dir, batch_size, num_workers, data_transform)
regular_train_loop(model, optimizer, train_loader=train_loader, loss_func=loss_func)
def test_torch_nano_spawn(self):
model = ResNet18(10, pretrained=False, include_top=False, freeze=True)
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
train_loader = create_data_loader(data_dir, batch_size, num_workers, data_transform)
subprocess_train_loop(model, optimizer, train_loader=train_loader, loss_func=loss_func) |
def compute_live_dead_symbol_refs(code: Union[(str, ast.AST)]) -> Tuple[(Set[str], Set[str])]:
if isinstance(code, str):
code = textwrap.dedent(code)
(live, dead) = compute_live_dead_symbol_refs_with_stmts(code)
live = {ref.ref for ref in live}
(live, dead) = (_simplify_symbol_refs(live), _simplify_symbol_refs(dead))
return (live, dead) |
def run_kmeans(x, num_clusters, temperature):
print('performing kmeans clustering')
results = {'im2cluster': [], 'centroids': [], 'density': []}
for (seed, num_cluster) in enumerate(num_clusters):
d = x.shape[1]
k = int(num_cluster)
clus = faiss.Clustering(d, k)
clus.verbose = False
clus.niter = 20
clus.nredo = 5
clus.seed = seed
clus.max_points_per_centroid = 1000
clus.min_points_per_centroid = 5
res = faiss.StandardGpuResources()
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = False
cfg.device = 0
index = faiss.GpuIndexFlatL2(res, d, cfg)
clus.train(x, index)
(D, I) = index.search(x, 1)
im2cluster = [int(n[0]) for n in I]
centroids = faiss.vector_to_array(clus.centroids).reshape(k, d)
Dcluster = [[] for c in range(k)]
for (im, i) in enumerate(im2cluster):
Dcluster[i].append(D[im][0])
density = np.zeros(k)
for (i, dist) in enumerate(Dcluster):
if (len(dist) > 1):
d = ((np.asarray(dist) ** 0.5).mean() / np.log((len(dist) + 10)))
density[i] = d
dmax = density.max()
for (i, dist) in enumerate(Dcluster):
if (len(dist) <= 1):
density[i] = dmax
density = density.clip(np.percentile(density, 10), np.percentile(density, 90))
density = ((temperature * density) / density.mean())
centroids = torch.Tensor(centroids).cuda()
centroids = nn.functional.normalize(centroids, p=2, dim=1)
im2cluster = torch.LongTensor(im2cluster).cuda()
density = torch.Tensor(density).cuda()
results['centroids'].append(centroids)
results['density'].append(density)
results['im2cluster'].append(im2cluster)
return results |
def hammingSimilarity(l1=[], l2=[]):
hammingD = 0
nsensors = len(l1)
nNonZero = len(l1)
for i in range(0, nsensors):
if (l1[i] != l2[i]):
hammingD += 1
if ((l1[i] == 0) and (l2[i] == 0)):
nNonZero = (nNonZero - 1)
ratio = (float(hammingD) / nsensors)
hammingS = round((1 - ratio), 2)
return hammingS |
class MultimodalConfig():
batch_size: int
train_steps: int
optimizer_name: str = 'AdamW'
lr: float = 0.0008
image_enc_lr: float = None
min_lr: float = 0.0
lr_decay_iters: int = None
gradient_accumulation_steps: int = 1
image_size: int = 256
eval_every: int = 250
eval_steps: int = 25
zero_stage: int = 2
gradient_clipping: float = 1.0
warmup_num_steps: int = 100
weight_decay: float = 0.0
run_blind: bool = False
fine_tune: bool = False
load_optimizer: bool = True
save_every: int = 2500
save: str = None
load: str = None
train_dataset_name: str = 'conceptual captions'
eval_dataset_name: str = 'na'
train_dataset_dir: str = './'
eval_dataset_dir: str = './'
encs_dir: str = 'vanilla'
eval_dataset_pct: float = 0.1
encoder_name: str = 'clip'
tokenizer_name: str = 'gpt2'
lm_name: str = 'EleutherAI/gpt-j-6B'
image_seq_len: int = 2
pretrained_img_encoder: bool = True
seq_len: int = None
freeze_lm: bool = True
freeze_img_encoder: bool = True
image_embed_dropout_prob: float = 0.0
use_image_embed_layernorm: bool = False
adapter_config: dict = None
class_dict: dict = None
name: str = None
log_every: int = 1
wandb_project: str = 'magma'
def print(self):
if is_main():
print(('-' * 100))
pprint(self.__dict__, indent=4)
print(('-' * 100))
def __post_init__(self):
self.is_classifier = (self.class_dict is not None)
if (self.adapter_config is None):
self.adapter_config = {}
if (self.lr_decay_iters is None):
self.lr_scheduler = 'WarmupLR'
self.scheduler_dict = {'type': self.lr_scheduler, 'params': {'warmup_min_lr': self.min_lr, 'warmup_max_lr': self.lr, 'warmup_num_steps': self.warmup_num_steps}}
else:
self.lr_scheduler = 'WarmupDecayLR'
self.scheduler_dict = {'type': self.lr_scheduler, 'params': {'total_num_steps': self.lr_decay_iters, 'warmup_min_lr': self.min_lr, 'warmup_max_lr': 'auto', 'warmup_num_steps': self.warmup_num_steps, 'total_num_steps': 'auto'}}
self.deepspeed_config_params = {'train_batch_size': 'auto', 'gradient_accumulation_steps': self.gradient_accumulation_steps, 'gradient_clipping': self.gradient_clipping, 'bf16': {'enabled': True, 'loss_scale_window': 250}, 'scheduler': self.scheduler_dict, 'zero_optimization': {'stage': self.zero_stage, 'load_from_fp32_weights': False}}
if (self.name is None):
self.name = str(uuid.uuid4())[:8]
def from_yml(cls, path):
return cls(**load_config(path))
def to_dict(self):
return asdict(self) |
def multiple_run_tune_separate(default_params, tune_params, save_path):
start = time.time()
print('Setting up data stream')
data_continuum = continuum(default_params.data, default_params.cl_type, default_params)
data_end = time.time()
print('data setup time: {}'.format((data_end - start)))
if (default_params.num_val == (- 1)):
default_params.num_val = data_continuum.data_object.task_nums
result_path = load_yaml('config/global.yml', key='path')['result']
table_path = (((result_path + default_params.data) + '/') + default_params.cl_type)
for i in default_params.trick:
if default_params.trick[i]:
trick_name = i
table_path = (((((result_path + default_params.data) + '/') + default_params.cl_type) + '/') + trick_name)
break
print(table_path)
os.makedirs(table_path, exist_ok=True)
if (not save_path):
save_path = (((((default_params.model_name + '_') + default_params.data_name) + '_') + str(default_params.seed)) + '.pkl')
accuracy_list = []
params_keep = []
if isinstance(default_params.num_runs, int):
run_list = range(default_params.num_runs)
else:
run_list = default_params.num_runs
for run in run_list:
tmp_acc = []
run_start = time.time()
data_continuum.new_run()
if default_params.train_val:
single_tune_train_val(data_continuum, default_params, tune_params, params_keep, tmp_acc, run)
else:
single_tune(data_continuum, default_params, tune_params, params_keep, tmp_acc, run)
run_end = time.time()
print('run {}avg_end_acc {}train time {}'.format(run, np.mean(tmp_acc[(- 1)]), (run_end - run_start)))
accuracy_list.append(np.array(tmp_acc))
end = time.time()
accuracy_array = np.array(accuracy_list)
result = {'seed': default_params.seed}
result['time'] = (end - start)
result['acc_array'] = accuracy_array
result['ram'] = check_ram_usage()
result['best_params'] = params_keep
save_file = open(((table_path + '/') + save_path), 'wb')
pickle.dump(result, save_file)
save_file.close()
print(' Total {} run: {}s '.format(default_params.num_runs, (end - start)))
print(' Seed {} RAM: {}s '.format(default_params.seed, result['ram'])) |
def load_cdod_voc_instances(dirname: str, split: str, class_names: Union[(List[str], Tuple[(str, ...)])]):
with PathManager.open(os.path.join(dirname, 'ImageSets', 'Main', (split + '.txt'))) as f:
fileids = np.loadtxt(f, dtype=np.str)
annotation_dirname = PathManager.get_local_path(os.path.join(dirname, 'Annotations/'))
dicts = []
for fileid in fileids:
anno_file = os.path.join(annotation_dirname, (fileid + '.xml'))
jpeg_file = os.path.join(dirname, 'JPEGImages', (fileid + '.jpg'))
if (not os.path.isfile(anno_file)):
with Image.open(jpeg_file) as img:
(width, height) = img.size
r = {'file_name': jpeg_file, 'image_id': fileid, 'height': height, 'width': width}
instances = []
r['annotations'] = instances
dicts.append(r)
continue
with PathManager.open(anno_file) as f:
tree = ET.parse(f)
r = {'file_name': jpeg_file, 'image_id': fileid, 'height': int(tree.findall('./size/height')[0].text), 'width': int(tree.findall('./size/width')[0].text)}
instances = []
for obj in tree.findall('object'):
cls = obj.find('name').text
if (cls not in class_names):
continue
difficult = int(obj.find('difficult').text)
if (difficult == 1):
continue
bbox = obj.find('bndbox')
bbox = [float(bbox.find(x).text) for x in ['xmin', 'ymin', 'xmax', 'ymax']]
bbox[0] -= 1.0
bbox[1] -= 1.0
instances.append({'category_id': class_names.index(cls), 'bbox': bbox, 'bbox_mode': BoxMode.XYXY_ABS})
r['annotations'] = instances
dicts.append(r)
return dicts |
class TFDPRContextEncoder(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class LGBOptimizerOptuna(object):
def __init__(self, objective: str='binary', verbose: bool=False):
self.objective = objective
self.verbose = verbose
self.best: Dict[(str, Any)] = {}
def optimize(self, dtrain: lgbDataset, deval: lgbDataset):
params: Dict = {'objective': self.objective}
if self.verbose:
params['verbosity'] = 1
else:
params['verbosity'] = (- 1)
train_set = lgb.Dataset(data=pd.concat([dtrain.data, deval.data]).reset_index(drop=True), label=pd.concat([dtrain.label, deval.label]).reset_index(drop=True), categorical_feature=dtrain.categorical_feature, free_raw_data=False)
train_index = range(len(dtrain.data))
valid_index = range(len(dtrain.data), len(train_set.data))
self.tuner = LightGBMTunerCV(params=params, train_set=train_set, folds=[(train_index, valid_index)], verbose_eval=False, num_boost_round=1000, early_stopping_rounds=50)
self.tuner.run()
self.best = self.tuner.best_params
self.best['n_estimators'] = 1000 |
def classifier(pretrained=False, **kwargs):
model = Classifier(**kwargs)
if pretrained:
if os.path.isfile(pretrained):
checkpoint = torch.load(pretrained)
model.load_state_dict(checkpoint['state_dict'])
else:
raise RuntimeError(('Could not find weights file: %s' % pretrained))
return model |
def build_modelzoo(result_path: Union[(str, Path)], weights_path: Union[(str, Path)], bundle_path: Union[(str, Path)], inputs: str, outputs: str, preprocessing: list, postprocessing: list, doc: Union[(str, Path)], name: str, authors: list, algorithm: Algorithm, tf_version: str, cite: List[Dict], axes: str='byxc', files: list=[], **kwargs):
from bioimageio.core.build_spec import build_model
tags_dim = ('3d' if (len(axes) == 5) else '2d')
build_model(root=bundle_path, weight_uri=weights_path, test_inputs=[inputs], test_outputs=[outputs], input_axes=[axes], output_axes=[axes], output_path=result_path, name=name, description='Self-supervised denoising.', authors=authors, license='BSD-3-Clause', documentation=doc, tags=[tags_dim, 'unet', 'denoising', Algorithm.get_name(algorithm.value), 'tensorflow', 'napari'], preprocessing=[preprocessing], postprocessing=[postprocessing], tensorflow_version=tf_version, attachments={'files': files}, cite=cite, **kwargs) |
def transform_beziers_annotations(beziers, transforms):
beziers = np.asarray(beziers, dtype='float64').reshape((- 1), 2)
beziers = transforms.apply_coords(beziers).reshape((- 1))
do_hflip = ((sum((isinstance(t, T.HFlipTransform) for t in transforms.transforms)) % 2) == 1)
if do_hflip:
raise ValueError('Flipping text data is not supported (also disencouraged).')
return beziers |
class TestTrainer(unittest.TestCase):
def test_simple_trainer(self, device='cpu'):
device = torch.device(device)
model = SimpleModel(nn.Linear(10, 10)).to(device)
def data_loader():
while True:
(yield torch.rand(3, 3).to(device))
trainer = SimpleTrainer(model, data_loader(), torch.optim.SGD(model.parameters(), 0.1))
trainer.train(0, 10)
((not torch.cuda.is_available()), 'CUDA not available')
def test_simple_trainer_cuda(self):
self.test_simple_trainer(device='cuda') |
def close_progress_bar(pbar: Union[(tqdm, Tuple[(Progress, Live)], None)], show: bool, pretty: bool) -> None:
if (not show):
return
elif (show and (not pretty)):
pbar.close()
else:
(_, live) = pbar
live.stop() |
class Conv_V(nn.Module):
def __init__(self, input_channels, output_channels, filter_shape):
super(Conv_V, self).__init__()
self.conv = nn.Conv2d(input_channels, output_channels, filter_shape, padding=(0, (filter_shape[1] // 2)))
self.bn = nn.BatchNorm2d(output_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.bn(self.conv(x)))
freq = x.size(2)
out = nn.MaxPool2d((freq, 1), stride=(freq, 1))(x)
out = out.squeeze(2)
return out |
def main():
co_transform = pc_transforms.Compose([pc_transforms.ArrayToTensor(), transforms.Normalize(mean=[0.5, 0.5], std=[1, 1])])
input_transforms = transforms.Compose([pc_transforms.ArrayToTensor()])
target_transforms = transforms.Compose([pc_transforms.ArrayToTensor()])
[train_dataset, valid_dataset] = Datasets.__dict__[args.dataName](input_root=args.data, target_root=None, split=args.split_value, net_name='auto_encoder', input_transforms=input_transforms, target_transforms=target_transforms)
(input, target) = train_dataset[1]
omax = 0.0
omin = 0.0
for (i, (input_train, target)) in enumerate(train_dataset):
nmax = torch.max(input_train)
nmax = np.max([torch.Tensor.numpy(nmax), omax])
omax = nmax
nmin = torch.min(input_train)
nmin = np.min([torch.Tensor.numpy(nmin), omin])
omin = nmin
for (i, (input_valid, target)) in enumerate(valid_dataset):
nmax = torch.max(input_valid)
nmax = np.max([torch.Tensor.numpy(nmax), omax])
omax = nmax
nmin = torch.min(input_valid)
nmin = np.min([torch.Tensor.numpy(nmin), omin])
omin = nmin
print('ZMAX:', omax)
print('ZMIN:', omin) |
class Gym():
def make(self, env_id, render_save):
reset_type = env_id.split('-v')[1]
env = Pose_Env_Base(int(reset_type), render_save=render_save)
return env |
def train(ep, sess):
global batch_size, total_steps
total_loss = 0
start_time = time.time()
correct = 0
counter = 0
for (batch_idx, indices) in index_generator(n_train, batch_size):
x = train_x[indices]
y = train_y[indices]
x = np.reshape(x, (x.shape + (1,)))
(_, p, l) = sess.run([update_step, predictions, loss], feed_dict={inputs: x, labels: y})
correct += np.sum((p == y))
counter += p.size
total_loss += l.mean()
total_steps += 1
if ((batch_idx > 0) and ((batch_idx % args.log_interval) == 0)):
avg_loss = (total_loss / args.log_interval)
elapsed = (time.time() - start_time)
print('| Steps {:5d} | Epoch {:3d} | {:5d}/{:5d} batches | lr {:2.5f} | ms/batch {:5.2f} | loss {:5.8f} | accuracy {:5.4f}'.format(total_steps, ep, batch_idx, ((n_train // batch_size) + 1), args.lr, ((elapsed * 1000) / args.log_interval), avg_loss, ((100.0 * correct) / counter)))
start_time = time.time()
total_loss = 0
correct = 0
counter = 0 |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def disable_save_v3():
global _ENABLE_SAVE_V3
global ORI_SAVE_V2
global ORI_ADDRESTOREOPS
if (not _ENABLE_SAVE_V3):
return
_ENABLE_SAVE_V3 = False
io_ops.save_v2 = ORI_SAVE_V2
ORI_SAVE_V2 = None
for (saver_builder, origin_add_restore_ops) in zip(TFPLUS_SAVER_BUILDER, ORI_ADDRESTOREOPS):
saver_builder._AddRestoreOps = origin_add_restore_ops
ORI_ADDRESTOREOPS = None
logging.info('disable save_v3, enable save_v2.') |
def clip_grad_norm_for_ut(parameters, max_norm, norm_type=2, tp_group=None):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter((lambda p: (p.grad is not None)), parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if (norm_type == math.inf):
total_norm = max((p.grad.data.abs().max() for p in parameters))
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
if (tp_group is not None):
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=tp_group)
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0
for p in parameters:
if (tp_group is not None):
if ((torch.distributed.get_rank(tp_group) == 0) or is_tensor_parallel_parameter(p)):
param_norm = p.grad.data.norm(norm_type)
total_norm += (param_norm.item() ** norm_type)
else:
param_norm = p.grad.data.float().norm(norm_type)
total_norm += (param_norm.item() ** norm_type)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
if (tp_group is not None):
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=tp_group)
total_norm = (total_norm_cuda[0].item() ** (1.0 / norm_type))
clip_coef = (max_norm / (total_norm + 1e-06))
if (clip_coef < 1):
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm |
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if (padding_type == 'reflect'):
conv_block += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
conv_block += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 1
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if (padding_type == 'reflect'):
conv_block += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
conv_block += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 1
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = (x + self.conv_block(x))
return out |
class TestRemoveResetInZeroStateFixedPoint(QiskitTestCase):
def test_two_resets(self):
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.reset(qr[0])
circuit.reset(qr[0])
expected = QuantumCircuit(qr)
pass_manager = PassManager()
pass_manager.append([RemoveResetInZeroState(), DAGFixedPoint()], do_while=(lambda property_set: (not property_set['dag_fixed_point'])))
after = transpile(circuit, pass_manager=pass_manager)
self.assertEqual(expected, after) |
def _validate(sym: Any) -> Symbol:
if ((sym is None) or (not isinstance(sym, Symbol))):
raise ValueError('unable to lookup metadata for symbol')
return cast(Symbol, sym) |
def get_opts_base():
parser = configargparse.ArgParser()
parser.add_argument('--config_file', is_config_file=True)
parser.add_argument('--dataset_type', type=str, default='filesystem', choices=['filesystem', 'memory'], help='specifies whether to hold all images in CPU memory during training, or whether to write randomized\n batches or pixels/rays to disk')
parser.add_argument('--chunk_paths', type=str, nargs='+', default=None, help='scratch directory to write shuffled batches to when training using the filesystem dataset. \n Should be set to a non-existent path when first created, and can then be reused by subsequent training runs once all chunks are written')
parser.add_argument('--num_chunks', type=int, default=200, help='number of shuffled chunk files to write to disk. Each chunk should be small enough to fit into CPU memory')
parser.add_argument('--disk_flush_size', type=int, default=)
parser.add_argument('--train_every', type=int, default=1, help='if set to larger than 1, subsamples each n training images')
parser.add_argument('--cluster_mask_path', type=str, default=None, help='directory containing pixel masks for all training images (generated by create_cluster_masks.py)')
parser.add_argument('--ckpt_path', type=str, default=None, help='path towards serialized model checkpoint')
parser.add_argument('--container_path', type=str, default=None, help='path towards merged Mega-NeRF model generated by merged_submodules.py')
parser.add_argument('--near', type=float, default=1, help='ray near bounds')
parser.add_argument('--far', type=float, default=None, help='ray far bounds. Will be automatically set if not explicitly set')
parser.add_argument('--ray_altitude_range', nargs='+', type=float, default=None, help='constrains ray sampling to the given altitude')
parser.add_argument('--coarse_samples', type=int, default=256, help='number of coarse samples')
parser.add_argument('--fine_samples', type=int, default=512, help='number of additional fine samples')
parser.add_argument('--train_scale_factor', type=int, default=1, help='downsamples training images if greater than 1')
parser.add_argument('--val_scale_factor', type=int, default=4, help='downsamples validation images if greater than 1')
parser.add_argument('--pos_xyz_dim', type=int, default=12, help='frequency encoding dimension applied to xyz position')
parser.add_argument('--pos_dir_dim', type=int, default=4, help='frequency encoding dimension applied to view direction (set to 0 to disable)')
parser.add_argument('--layers', type=int, default=8, help='number of layers in MLP')
parser.add_argument('--skip_layers', type=int, nargs='+', default=[4], help='indices of the skip connections')
parser.add_argument('--layer_dim', type=int, default=256, help='number of channels in foreground MLP')
parser.add_argument('--bg_layer_dim', type=int, default=256, help='number of channels in background MLP')
parser.add_argument('--appearance_dim', type=int, default=48, help='dimension of appearance embedding vector (set to 0 to disable)')
parser.add_argument('--affine_appearance', default=False, action='store_true', help='set to true to use affine transformation for appearance instead of latent embedding')
parser.add_argument('--use_cascade', default=False, action='store_true', help='use separate MLPs to query coarse and fine samples')
parser.add_argument('--train_mega_nerf', type=str, default=None, help='directory train a Mega-NeRF architecture (point this towards the params.pt file generated by create_cluster_masks.py)')
parser.add_argument('--boundary_margin', type=float, default=1.15, help='overlap factor between different spatial cells')
parser.add_argument('--all_val', default=False, action='store_true', help='use all pixels for validation images instead of those specified in cluster masks')
parser.add_argument('--cluster_2d', default=False, action='store_true', help='cluster without altitude dimension')
parser.add_argument('--sh_deg', type=int, default=None, help='use spherical harmonics (pos_dir_dim should be set to 0)')
parser.add_argument('--no_center_pixels', dest='center_pixels', default=True, action='store_false', help='do not shift pixels by +0.5 when computing ray directions')
parser.add_argument('--no_shifted_softplus', dest='shifted_softplus', default=True, action='store_false', help='use ReLU instead of shifted softplus activation')
parser.add_argument('--batch_size', type=int, default=1024, help='batch size')
parser.add_argument('--image_pixel_batch_size', type=int, default=(64 * 1024), help='number of pixels to evaluate per split when rendering validation images')
parser.add_argument('--model_chunk_size', type=int, default=(32 * 1024), help='chunk size to split the input to avoid OOM')
parser.add_argument('--perturb', type=float, default=1.0, help='factor to perturb depth sampling points')
parser.add_argument('--noise_std', type=float, default=1.0, help='std dev of noise added to regularize sigma')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--lr_decay_factor', type=float, default=0.1, help='learning rate decay factor')
parser.add_argument('--no_bg_nerf', dest='bg_nerf', default=True, action='store_false', help='do not use background MLP')
parser.add_argument('--ellipse_scale_factor', type=float, default=1.1, help='Factor to scale foreground bounds')
parser.add_argument('--no_ellipse_bounds', dest='ellipse_bounds', default=True, action='store_false', help='use spherical foreground bounds instead of ellipse')
parser.add_argument('--train_iterations', type=int, default=183000, help='training iterations')
parser.add_argument('--val_interval', type=int, default=500001, help='validation interval')
parser.add_argument('--ckpt_interval', type=int, default=10000, help='checkpoint interval')
parser.add_argument('--no_resume_ckpt_state', dest='resume_ckpt_state', default=True, action='store_false')
parser.add_argument('--no_amp', dest='amp', default=True, action='store_false')
parser.add_argument('--detect_anomalies', default=False, action='store_true')
parser.add_argument('--random_seed', type=int, default=42)
return parser |
class BaseWrapperDataset(FairseqDataset):
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if hasattr(self.dataset, 'collater'):
return self.dataset.collater(samples)
else:
return default_collate(samples)
def sizes(self):
return self.dataset.sizes
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
return self.dataset.ordered_indices()
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
def get_batch_shapes(self):
return self.dataset.get_batch_shapes()
def batch_by_size(self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1):
return self.dataset.batch_by_size(indices, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple)
def set_epoch(self, epoch):
super().set_epoch(epoch)
if hasattr(self.dataset, 'set_epoch'):
self.dataset.set_epoch(epoch) |
class IMQSteinKernel(torch.nn.Module):
def __init__(self, alpha=0.5, beta=(- 0.5), bandwidth=None):
super(IMQSteinKernel, self).__init__()
assert (alpha > 0.0), 'alpha must be positive.'
assert (beta < 0.0), 'beta must be negative.'
self.alpha = alpha
self.beta = beta
self.bandwidth = bandwidth
def _bandwidth(self, norm_sq):
if (self.bandwidth is None):
num_particles = norm_sq.size(0)
index = torch.arange(num_particles)
norm_sq = norm_sq[((index > index.unsqueeze((- 1))), ...)]
median = norm_sq.median(dim=0)[0]
assert (median.shape == norm_sq.shape[(- 1):])
return (median / math.log((num_particles + 1)))
else:
return self.bandwidth
def forward(self, X, Y):
norm_sq = ((X.unsqueeze(0) - Y.unsqueeze(1)) ** 2)
assert (norm_sq.dim() == 3)
bandwidth = self._bandwidth(norm_sq)
base_term = (self.alpha + torch.sum((norm_sq / bandwidth), dim=(- 1)))
log_kernel = (self.beta * torch.log(base_term))
return log_kernel.exp() |
_module
class FastRCNN(TwoStageDetector):
def __init__(self, backbone, neck, bbox_roi_extractor, bbox_head, train_cfg, test_cfg, mask_roi_extractor=None, mask_head=None, pretrained=None):
super(FastRCNN, self).__init__(backbone=backbone, neck=neck, bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, train_cfg=train_cfg, test_cfg=test_cfg, mask_roi_extractor=mask_roi_extractor, mask_head=mask_head, pretrained=pretrained)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
for (var, name) in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if (not isinstance(var, list)):
raise TypeError('{} must be a list, but got {}'.format(name, type(var)))
num_augs = len(imgs)
if (num_augs != len(img_metas)):
raise ValueError('num of augmentations ({}) != num of image meta ({})'.format(len(imgs), len(img_metas)))
imgs_per_gpu = imgs[0].size(0)
assert (imgs_per_gpu == 1)
if (num_augs == 1):
return self.simple_test(imgs[0], img_metas[0], proposals[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, proposals, **kwargs) |
class Timer():
def __init__(self):
self.o = time.time()
def measure(self, p=1):
x = ((time.time() - self.o) / p)
x = int(x)
if (x >= 3600):
return '{:.1f}h'.format((x / 3600))
if (x >= 60):
return '{}m'.format(round((x / 60)))
return '{}s'.format(x) |
def get_peft_kwargs(rank_dict, network_alpha_dict, peft_state_dict, is_unet=True):
rank_pattern = {}
alpha_pattern = {}
r = lora_alpha = list(rank_dict.values())[0]
if (len(set(rank_dict.values())) > 1):
r = collections.Counter(rank_dict.values()).most_common()[0][0]
rank_pattern = dict(filter((lambda x: (x[1] != r)), rank_dict.items()))
rank_pattern = {k.split('.lora_B.')[0]: v for (k, v) in rank_pattern.items()}
if ((network_alpha_dict is not None) and (len(network_alpha_dict) > 0)):
if (len(set(network_alpha_dict.values())) > 1):
lora_alpha = collections.Counter(network_alpha_dict.values()).most_common()[0][0]
alpha_pattern = dict(filter((lambda x: (x[1] != lora_alpha)), network_alpha_dict.items()))
if is_unet:
alpha_pattern = {'.'.join(k.split('.lora_A.')[0].split('.')).replace('.alpha', ''): v for (k, v) in alpha_pattern.items()}
else:
alpha_pattern = {'.'.join(k.split('.down.')[0].split('.')[:(- 1)]): v for (k, v) in alpha_pattern.items()}
else:
lora_alpha = set(network_alpha_dict.values()).pop()
target_modules = list({name.split('.lora')[0] for name in peft_state_dict.keys()})
lora_config_kwargs = {'r': r, 'lora_alpha': lora_alpha, 'rank_pattern': rank_pattern, 'alpha_pattern': alpha_pattern, 'target_modules': target_modules}
return lora_config_kwargs |
class OptionNamespace():
def __init__(self):
pass
def get_value(self, name):
name = name.replace('-', '_')
if (name in self.__dict__):
return self.__dict__[name]
else:
raise Exception((('Option attribute: ' + name) + ' does not exist'))
def __contains__(self, key):
key = key.replace('-', '_')
return (key in self.__dict__) |
def SparseDenseNet161(sparse_func, sparsities):
return SparseDenseNet(SparseBottleneck, [6, 12, 36, 24], sparse_func, sparsities, growth_rate=48) |
def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True, pad_type='zero', norm_type=None, act_type='relu'):
padding = get_valid_padding(kernel_size, dilation)
p = (pad(pad_type, padding) if (pad_type and (pad_type != 'zero')) else None)
padding = (padding if (pad_type == 'zero') else 0)
c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups)
a = (activation(act_type) if act_type else None)
n = (norm(norm_type, out_nc) if norm_type else None)
return sequential(p, c, n, a) |
class AdaptiveFactorizationNetwork(torch.nn.Module):
def __init__(self, field_dims, embed_dim, LNN_dim, mlp_dims, dropouts):
super().__init__()
self.num_fields = len(field_dims)
self.linear = FeaturesLinear(field_dims)
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.LNN_dim = LNN_dim
self.LNN_output_dim = (self.LNN_dim * embed_dim)
self.LNN = LNN(self.num_fields, embed_dim, LNN_dim)
self.mlp = MultiLayerPerceptron(self.LNN_output_dim, mlp_dims, dropouts[0])
def forward(self, x):
embed_x = self.embedding(x)
lnn_out = self.LNN(embed_x)
x = (self.linear(x) + self.mlp(lnn_out))
return torch.sigmoid(x.squeeze(1)) |
def do_train(cur_step, optimizer, sim, net):
epoch = 0
while True:
steps = int(((1 * 20) * spf))
reset_sim(sim)
sigma = 0.1
x = ((((np.random.random() * sigma) - (0.5 * sigma)) + (np.random.randint(2) * 2)) - 1)
goal = torch.tensor([0.0, 0.0, 0.0, x, 0, ((2 + (np.random.random() * sigma)) - (0.5 * sigma))], dtype=torch.float64)
st = time.time()
(loss, ans) = run_sim(steps, sim, net, goal)
en0 = time.time()
optimizer.zero_grad()
loss.backward()
en1 = time.time()
print('')
f.write('epoch {}: loss={}\n ans = {}\n goal = {}\n'.format(epoch, loss.data, ans.data, goal.data))
print('epoch {}: loss={}\n ans = {}\n goal = {}\n'.format(epoch, loss.data, ans.data, goal.data))
print('forward tim = {}'.format((en0 - st)))
print('backward time = {}'.format((en1 - en0)))
if ((epoch % 5) == 0):
torch.save(net.state_dict(), torch_model_path)
optimizer.step()
epoch = (epoch + 1)
if (epoch >= 200):
quit() |
class grammardefaultParser(Parser):
def __init__(self, whitespace=re.compile('(?!.*)'), nameguard=None, comments_re=None, eol_comments_re=None, ignorecase=None, left_recursion=True, parseinfo=True, keywords=None, namechars='', buffer_class=grammardefaultBuffer, **kwargs):
if (keywords is None):
keywords = KEYWORDS
super(grammardefaultParser, self).__init__(whitespace=whitespace, nameguard=nameguard, comments_re=comments_re, eol_comments_re=eol_comments_re, ignorecase=ignorecase, left_recursion=left_recursion, parseinfo=parseinfo, keywords=keywords, namechars=namechars, buffer_class=buffer_class, **kwargs)
self.new_id_list = []
self.new_func_list = []
self.builtin_list = []
self.const_e = False
('Start')
def _start_(self):
def block0():
def block1():
self._separator_with_space_()
self._closure(block1)
def block2():
self._hspace_()
self._closure(block2)
self._valid_block_()
self.add_last_node_to_name('vblock')
def block4():
self._separator_with_space_()
self._closure(block4)
self._positive_closure(block0)
def block5():
self._blank_()
self._closure(block5)
self._check_eof()
self.ast._define([], ['vblock'])
()
def _BUILTIN_KEYWORDS_(self):
with self._choice():
with self._option():
self._DERIVATIVE_()
with self._option():
self._WHERE_()
with self._option():
self._GIVEN_()
with self._option():
self._SUM_()
with self._option():
self._MIN_()
with self._option():
self._MAX_()
with self._option():
self._ARGMIN_()
with self._option():
self._ARGMAX_()
with self._option():
self._INT_()
with self._option():
self._IF_()
with self._option():
self._OTHERWISE_()
with self._option():
self._IN_()
with self._option():
self._EXP_()
with self._option():
self._LOG_()
with self._option():
self._LN_()
with self._option():
self._SQRT_()
with self._option():
self._SUBJECT_TO_()
with self._option():
self._FROM_()
with self._option():
self._PI_()
with self._option():
self._token('|')
with self._option():
self._pattern('R')
with self._option():
self._pattern('Z')
with self._option():
self._pattern('T')
with self._option():
self._WITH_()
with self._option():
self._INITIAL_()
with self._option():
self._AND_()
with self._option():
self._OR_()
self._error('no available options')
()
def _TRACE_(self):
self._pattern('trace')
()
def _TR_(self):
self._pattern('tr')
()
def _VEC_(self):
self._pattern('vec')
()
def _DIAG_(self):
self._pattern('diag')
()
def _INV_(self):
self._pattern('inv')
()
def _DET_(self):
self._pattern('det')
()
def _RANK_(self):
self._pattern('rank')
()
def _NULL_(self):
self._pattern('null')
()
def _ORTH_(self):
self._pattern('orth')
()
def _QR_(self):
self._pattern('qr')
()
def _DERIVATIVE_(self):
self._pattern('')
()
def _WHERE_(self):
self._pattern('where')
()
def _GIVEN_(self):
self._pattern('given')
()
def _SUM_(self):
with self._choice():
with self._option():
self._pattern('sum')
with self._option():
self._pattern('')
self._error('no available options')
()
def _MIN_(self):
self._pattern('min')
()
def _MAX_(self):
self._pattern('max')
()
def _ARGMIN_(self):
self._pattern('argmin')
()
def _ARGMAX_(self):
self._pattern('argmax')
()
def _INT_(self):
self._pattern('int')
()
def _SPARSE_(self):
self._pattern('sparse')
()
def _IF_(self):
self._pattern('if')
()
def _OTHERWISE_(self):
self._pattern('otherwise')
()
def _IN_(self):
self._pattern('')
()
def _SIN_(self):
self._pattern('sin')
()
def _ASIN_(self):
self._pattern('asin')
()
def _ARCSIN_(self):
self._pattern('arcsin')
()
def _COS_(self):
self._pattern('cos')
()
def _ACOS_(self):
self._pattern('acos')
()
def _ARCCOS_(self):
self._pattern('arccos')
()
def _TAN_(self):
self._pattern('tan')
()
def _ATAN_(self):
self._pattern('atan')
()
def _ARCTAN_(self):
self._pattern('arctan')
()
def _SINH_(self):
self._pattern('sinh')
()
def _ASINH_(self):
self._pattern('asinh')
()
def _ARSINH_(self):
self._pattern('arsinh')
()
def _COSH_(self):
self._pattern('cosh')
()
def _ACOSH_(self):
self._pattern('acosh')
()
def _ARCOSH_(self):
self._pattern('arcosh')
()
def _TANH_(self):
self._pattern('tanh')
()
def _ATANH_(self):
self._pattern('atanh')
()
def _ARTANH_(self):
self._pattern('artanh')
()
def _COT_(self):
self._pattern('cot')
()
def _SEC_(self):
self._pattern('sec')
()
def _CSC_(self):
self._pattern('csc')
()
def _ATAN2_(self):
self._pattern('atan2')
()
def _EXP_(self):
self._pattern('exp')
()
def _LOG_(self):
self._pattern('log')
()
def _LN_(self):
self._pattern('ln')
()
def _SQRT_(self):
self._pattern('sqrt')
()
def _SUBJECT_TO_(self):
with self._choice():
with self._option():
self._pattern('s.t.')
with self._option():
self._pattern('subject to')
self._error('no available options')
()
def _FROM_(self):
self._pattern('from')
()
def _PI_(self):
self._pattern('')
()
def _WITH_(self):
self._pattern('with')
()
def _INITIAL_(self):
self._pattern('initial')
()
def _AND_(self):
self._pattern('and')
()
def _OR_(self):
self._pattern('or')
('Exponent')
def _exponent_(self):
self._pattern('[E][+-]?')
self.name_last_node('exp')
def block2():
self._digit_()
self._positive_closure(block2)
self.name_last_node('pow')
self.ast._define(['exp', 'pow'], [])
('Mantissa')
def _mantissa_(self):
with self._choice():
with self._option():
with self._group():
def block1():
self._digit_()
self._closure(block1)
self.name_last_node('d')
self._token('.')
def block3():
self._digit_()
self._positive_closure(block3)
self.name_last_node('f')
with self._option():
with self._group():
def block5():
self._digit_()
self._positive_closure(block5)
self.name_last_node('d')
self._token('.')
self._error('no available options')
self.ast._define(['d', 'f'], [])
('Float')
def _floating_point_(self):
self._mantissa_()
self.name_last_node('m')
with self._optional():
self._exponent_()
self.name_last_node('e')
self.ast._define(['e', 'm'], [])
('Double')
def _double_(self):
with self._choice():
with self._option():
self._integer_()
self.name_last_node('i')
self._exponent_()
self.name_last_node('exp')
with self._option():
self._floating_point_()
self.name_last_node('f')
self._error('no available options')
self.ast._define(['exp', 'f', 'i'], [])
('Fraction')
def _fraction_(self):
self._pattern('[\\u00BC-\\u00BE\\u2150-\\u215E]')
self.name_last_node('value')
self.ast._define(['value'], [])
()
def _number_(self):
with self._choice():
with self._option():
self._double_()
with self._option():
self._fraction_()
with self._option():
self._integer_()
self._error('no available options')
()
def _operations_(self):
with self._choice():
with self._option():
self._solver_operator_()
with self._option():
self._norm_operator_()
with self._option():
self._power_operator_()
with self._option():
self._inner_product_operator_()
with self._option():
self._frobenius_product_operator_()
with self._option():
self._hadamard_product_operator_()
with self._option():
self._cross_product_operator_()
with self._option():
self._kronecker_product_operator_()
with self._option():
self._sum_operator_()
with self._option():
self._integral_operator_()
with self._option():
self._trans_operator_()
with self._option():
self._sqrt_operator_()
with self._option():
self._function_operator_()
with self._option():
self._builtin_operators_()
with self._option():
self._pseudoinverse_operator_()
self._error('no available options')
('Add')
def _addition_(self):
self._expression_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('+')
self.name_last_node('op')
def block3():
self._hspace_()
self._closure(block3)
self._term_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('Subtract')
def _subtraction_(self):
self._expression_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('-')
self.name_last_node('op')
def block3():
self._hspace_()
self._closure(block3)
self._term_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('AddSub')
def _add_sub_operator_(self):
self._expression_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
with self._group():
with self._choice():
with self._option():
self._token('+-')
with self._option():
self._token('')
self._error('no available options')
self.name_last_node('op')
def block4():
self._hspace_()
self._closure(block4)
self._term_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('Multiply')
def _multiplication_(self):
with self._choice():
with self._option():
self._term_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('')
self.name_last_node('op')
def block3():
self._hspace_()
self._closure(block3)
self._factor_()
self.name_last_node('right')
with self._option():
self._term_()
self.name_last_node('left')
def block6():
self._hspace_()
self._closure(block6)
self._factor_()
self.name_last_node('right')
self._error('no available options')
self.ast._define(['left', 'op', 'right'], [])
('Divide')
def _division_(self):
self._term_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
with self._group():
with self._choice():
with self._option():
self._token('/')
with self._option():
self._token('')
self._error('no available options')
self.name_last_node('op')
def block4():
self._hspace_()
self._closure(block4)
self._factor_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('Power')
def _power_operator_(self):
with self._choice():
with self._option():
self._factor_()
self.name_last_node('base')
self._token('^T')
self.name_last_node('t')
with self._option():
self._factor_()
self.name_last_node('base')
with self._group():
with self._choice():
with self._option():
self._token('^(-1)')
with self._option():
self._token('1')
self._error('no available options')
self.name_last_node('r')
with self._option():
self._factor_()
self.name_last_node('base')
self._token('^')
self._factor_()
self.name_last_node('power')
with self._option():
self._factor_()
self.name_last_node('base')
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self.ast._define(['base', 'power', 'r', 't'], [])
('Solver')
def _solver_operator_(self):
with self._choice():
with self._option():
self._factor_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('\\')
def block2():
self._hspace_()
self._closure(block2)
self._factor_()
self.name_last_node('right')
with self._option():
self._factor_()
self.name_last_node('left')
def block5():
self._hspace_()
self._closure(block5)
with self._group():
with self._choice():
with self._option():
self._token('^(-1)')
with self._option():
self._token('1')
self._error('no available options')
self.name_last_node('p')
def block8():
self._hspace_()
self._closure(block8)
self._factor_()
self.name_last_node('right')
self._error('no available options')
self.ast._define(['left', 'p', 'right'], [])
('Summation')
def _sum_operator_(self):
with self._choice():
with self._option():
self._SUM_()
self._token('_')
self._identifier_alone_()
self.name_last_node('sub')
def block1():
self._hspace_()
self._positive_closure(block1)
self._term_()
self.name_last_node('exp')
with self._option():
self._SUM_()
self._token('_')
self._identifier_alone_()
self.name_last_node('sub')
with self._if():
self._token('(')
def block4():
self._hspace_()
self._closure(block4)
self._term_()
self.name_last_node('exp')
with self._option():
self._SUM_()
self._token('_(')
def block6():
self._hspace_()
self._closure(block6)
self._identifier_alone_()
self.name_last_node('id')
def block8():
self._hspace_()
self._closure(block8)
self._token('for')
def block9():
self._hspace_()
self._closure(block9)
self._if_condition_()
self.name_last_node('cond')
def block11():
self._hspace_()
self._closure(block11)
self._token(')')
def block12():
self._hspace_()
self._positive_closure(block12)
self._term_()
self.name_last_node('exp')
with self._option():
self._SUM_()
self._token('_(')
def block14():
self._hspace_()
self._closure(block14)
self._identifier_alone_()
self.add_last_node_to_name('enum')
def block16():
def block17():
self._hspace_()
self._closure(block17)
self._token(',')
def block18():
self._hspace_()
self._closure(block18)
self._identifier_alone_()
self.add_last_node_to_name('enum')
self._closure(block16)
def block20():
self._hspace_()
self._closure(block20)
self._IN_()
def block21():
self._hspace_()
self._closure(block21)
with self._group():
with self._choice():
with self._option():
self._function_operator_()
with self._option():
self._identifier_alone_()
self._error('no available options')
self.name_last_node('range')
def block24():
self._hspace_()
self._closure(block24)
self._token(')')
def block25():
self._hspace_()
self._positive_closure(block25)
self._term_()
self.name_last_node('exp')
self._error('no available options')
self.ast._define(['cond', 'exp', 'id', 'range', 'sub'], ['enum'])
('Optimize')
def _optimize_operator_(self):
def block0():
self._token('with')
def block1():
self._hspace_()
self._closure(block1)
self._token('initial')
def block2():
self._hspace_()
self._closure(block2)
self._statement_()
self.add_last_node_to_name('init')
def block4():
def block5():
self._hspace_()
self._closure(block5)
self._token(';')
def block6():
self._hspace_()
self._closure(block6)
self._statement_()
self.add_last_node_to_name('init')
self._closure(block4)
def block8():
self._hspace_()
self._closure(block8)
self._token('\n')
self._closure(block0)
with self._group():
with self._choice():
with self._option():
self._MIN_()
self.name_last_node('min')
with self._option():
self._MAX_()
self.name_last_node('max')
with self._option():
self._ARGMIN_()
self.name_last_node('amin')
with self._option():
self._ARGMAX_()
self.name_last_node('amax')
self._error('no available options')
self._token('_(')
def block14():
self._hspace_()
self._closure(block14)
self._where_condition_terse_()
self.add_last_node_to_name('defs')
def block16():
def block17():
self._hspace_()
self._closure(block17)
self._token(',')
def block18():
self._hspace_()
self._closure(block18)
self._where_condition_terse_()
self.add_last_node_to_name('defs')
self._closure(block16)
def block20():
self._hspace_()
self._closure(block20)
self._token(')')
def block21():
self._hspace_()
self._closure(block21)
self._expression_()
self.name_last_node('exp')
def block23():
def block24():
def block25():
self._hspace_()
self._closure(block25)
def block26():
self._separator_()
self._closure(block26)
def block27():
self._hspace_()
self._closure(block27)
self._closure(block24)
self._SUBJECT_TO_()
def block28():
def block29():
self._hspace_()
self._closure(block29)
def block30():
self._separator_()
self._closure(block30)
def block31():
self._hspace_()
self._closure(block31)
self._closure(block28)
self._multi_cond_()
self.name_last_node('cond')
self._closure(block23)
self.ast._define(['amax', 'amin', 'cond', 'exp', 'max', 'min'], ['defs', 'init'])
('MultiCond')
def _multi_cond_(self):
with self._choice():
with self._option():
def block0():
self._hspace_()
self._closure(block0)
self._multi_cond_()
self.name_last_node('m_cond')
self._separator_with_space_()
self._atom_condition_()
self.name_last_node('cond')
def block3():
self._hspace_()
self._closure(block3)
with self._option():
def block4():
self._hspace_()
self._closure(block4)
self._atom_condition_()
self.name_last_node('cond')
def block6():
self._hspace_()
self._closure(block6)
self._error('no available options')
self.ast._define(['cond', 'm_cond'], [])
('Integral')
def _integral_operator_(self):
with self._group():
with self._choice():
with self._option():
self._INT_()
with self._option():
self._token('')
self._error('no available options')
self._token('_')
with self._group():
with self._choice():
with self._option():
self._domain_()
self.name_last_node('d')
with self._option():
with self._group():
self._sub_factor_()
self.name_last_node('lower')
def block3():
self._hspace_()
self._closure(block3)
self._token('^')
def block4():
self._hspace_()
self._closure(block4)
self._sub_factor_()
self.name_last_node('upper')
self._error('no available options')
def block7():
self._hspace_()
self._closure(block7)
self._expression_()
self.name_last_node('exp')
def block9():
self._hspace_()
self._closure(block9)
self._token('')
self._identifier_alone_()
self.name_last_node('id')
self.ast._define(['d', 'exp', 'id', 'lower', 'upper'], [])
('Domain')
def _domain_(self):
self._token('[')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('lower')
def block2():
self._hspace_()
self._closure(block2)
self._token(',')
def block3():
self._hspace_()
self._closure(block3)
self._expression_()
self.name_last_node('upper')
self._token(']')
self.ast._define(['lower', 'upper'], [])
('Norm')
def _norm_operator_(self):
with self._group():
with self._choice():
with self._option():
self._token('||')
self.name_last_node('double')
def block1():
self._hspace_()
self._closure(block1)
self._expression_()
self.name_last_node('value')
def block3():
self._hspace_()
self._closure(block3)
self._token('||')
with self._option():
self._token('')
self.name_last_node('double')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('value')
def block7():
self._hspace_()
self._closure(block7)
self._token('')
with self._option():
self._token('|')
self.name_last_node('single')
def block9():
self._hspace_()
self._closure(block9)
self._expression_()
self.name_last_node('value')
def block11():
self._hspace_()
self._closure(block11)
self._token('|')
self._error('no available options')
with self._optional():
with self._choice():
with self._option():
with self._group():
with self._group():
with self._choice():
with self._option():
self._token('_')
with self._group():
with self._choice():
with self._option():
self._integer_()
with self._option():
self._token('*')
with self._option():
self._token('')
with self._option():
self._identifier_alone_()
self._error('no available options')
self.name_last_node('sub')
with self._option():
self._sub_integer_()
self.name_last_node('sub')
self._error('no available options')
with self._optional():
with self._choice():
with self._option():
self._token('^')
self._factor_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
with self._option():
with self._group():
self._token('_(')
with self._group():
with self._choice():
with self._option():
self._integer_()
with self._option():
self._token('*')
with self._option():
self._token('')
with self._option():
self._identifier_()
self._error('no available options')
self.name_last_node('sub')
self._token(')')
with self._optional():
with self._choice():
with self._option():
self._token('^')
self._factor_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
with self._option():
with self._group():
with self._group():
with self._choice():
with self._option():
self._token('^')
self._factor_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
with self._optional():
with self._choice():
with self._option():
self._token('_')
with self._group():
with self._choice():
with self._option():
self._integer_()
with self._option():
self._token('*')
with self._option():
self._token('')
with self._option():
self._identifier_alone_()
self._error('no available options')
self.name_last_node('sub')
with self._option():
self._sub_integer_()
self.name_last_node('sub')
self._error('no available options')
self._error('no available options')
self.ast._define(['double', 'power', 'single', 'sub', 'value'], [])
('InnerProduct')
def _inner_product_operator_(self):
with self._group():
with self._choice():
with self._option():
with self._group():
self._token('<')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('left')
def block2():
self._hspace_()
self._closure(block2)
self._token(',')
def block3():
self._hspace_()
self._closure(block3)
self._expression_()
self.name_last_node('right')
def block5():
self._hspace_()
self._closure(block5)
self._token('>')
with self._option():
with self._group():
self._token('')
def block6():
self._hspace_()
self._closure(block6)
self._expression_()
self.name_last_node('left')
def block8():
self._hspace_()
self._closure(block8)
self._token(',')
def block9():
self._hspace_()
self._closure(block9)
self._expression_()
self.name_last_node('right')
def block11():
self._hspace_()
self._closure(block11)
self._token('')
self._error('no available options')
def block13():
self._token('_')
self._identifier_()
self.name_last_node('sub')
self._closure(block13)
self.ast._define(['left', 'right', 'sub'], [])
('FroProduct')
def _frobenius_product_operator_(self):
self._factor_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token(':')
def block2():
self._hspace_()
self._closure(block2)
self._factor_()
self.name_last_node('right')
self.ast._define(['left', 'right'], [])
('HadamardProduct')
def _hadamard_product_operator_(self):
self._factor_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('')
def block2():
self._hspace_()
self._closure(block2)
self._factor_()
self.name_last_node('right')
self.ast._define(['left', 'right'], [])
('CrossProduct')
def _cross_product_operator_(self):
self._factor_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('')
def block2():
self._hspace_()
self._closure(block2)
self._factor_()
self.name_last_node('right')
self.ast._define(['left', 'right'], [])
('KroneckerProduct')
def _kronecker_product_operator_(self):
self._factor_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('')
def block2():
self._hspace_()
self._closure(block2)
self._factor_()
self.name_last_node('right')
self.ast._define(['left', 'right'], [])
('Transpose')
def _trans_operator_(self):
self._factor_()
self.name_last_node('f')
self._pattern('T')
self.ast._define(['f'], [])
('PseudoInverse')
def _pseudoinverse_operator_(self):
self._factor_()
self.name_last_node('f')
self._pattern('+')
self.ast._define(['f'], [])
('Squareroot')
def _sqrt_operator_(self):
self._pattern('')
self._factor_()
self.name_last_node('f')
self.ast._define(['f'], [])
('Function')
def _function_operator_(self):
self._func_id_()
self.name_last_node('name')
self._token('(')
def block1():
def block2():
self._hspace_()
self._closure(block2)
self._expression_()
self.add_last_node_to_name('params')
def block4():
def block5():
self._hspace_()
self._closure(block5)
self._params_separator_()
self.add_last_node_to_name('separators')
def block7():
self._hspace_()
self._closure(block7)
self._expression_()
self.add_last_node_to_name('params')
self._closure(block4)
self._closure(block1)
def block9():
self._hspace_()
self._closure(block9)
self._token(')')
self.ast._define(['name'], ['params', 'separators'])
()
def _predefined_built_operators_(self):
with self._choice():
with self._option():
self._exp_func_()
with self._option():
self._log_func_()
with self._option():
self._ln_func_()
with self._option():
self._sqrt_func_()
self._error('no available options')
('ExpFunc')
def _exp_func_(self):
self._EXP_()
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('param')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['param'], [])
('LogFunc')
def _log_func_(self):
with self._choice():
with self._option():
with self._group():
with self._group():
with self._choice():
with self._option():
self._pattern('log[\\u2082]')
self.name_last_node('f')
with self._option():
self._pattern('log[\\u2081][\\u2080]')
self.name_last_node('s')
self._error('no available options')
self._token('(')
def block3():
self._hspace_()
self._closure(block3)
self._expression_()
self.name_last_node('param')
def block5():
self._hspace_()
self._closure(block5)
self._token(')')
with self._option():
with self._group():
self._LOG_()
with self._optional():
with self._choice():
with self._option():
self._token('_2')
self.name_last_node('f')
with self._option():
self._token('_10')
self.name_last_node('s')
self._error('no available options')
self._token('(')
def block9():
self._hspace_()
self._closure(block9)
self._expression_()
self.name_last_node('param')
def block11():
self._hspace_()
self._closure(block11)
self._token(')')
self._error('no available options')
self.ast._define(['f', 'param', 's'], [])
('LnFunc')
def _ln_func_(self):
self._LN_()
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('param')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['param'], [])
('SqrtFunc')
def _sqrt_func_(self):
self._SQRT_()
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('param')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['param'], [])
('Matrix')
def _matrix_(self):
with self._choice():
with self._option():
self._token('[')
def block0():
self._hspace_()
self._closure(block0)
self._rows_()
self.name_last_node('value')
def block2():
self._hspace_()
self._closure(block2)
self._token(']')
with self._option():
self._token('')
def block3():
self._hspace_()
self._closure(block3)
self._rows_()
self.name_last_node('value')
def block5():
self._hspace_()
self._closure(block5)
self._token('')
self._error('no available options')
self.ast._define(['value'], [])
('Vector')
def _vector_(self):
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.add_last_node_to_name('exp')
def block2():
def block3():
self._hspace_()
self._closure(block3)
self._token(',')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.add_last_node_to_name('exp')
self._positive_closure(block2)
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
self.ast._define([], ['exp'])
('MultiCondExpr')
def _multi_cond_expr_(self):
self._token('{')
def block0():
self._hspace_()
self._closure(block0)
self._multi_if_conditions_()
self.name_last_node('ifs')
with self._optional():
def block2():
self._separator_with_space_()
self._positive_closure(block2)
def block3():
self._hspace_()
self._closure(block3)
self._expression_()
self.name_last_node('other')
def block5():
self._hspace_()
self._closure(block5)
self._OTHERWISE_()
self.ast._define(['ifs', 'other'], [])
('MultiIfs')
def _multi_if_conditions_(self):
with self._choice():
with self._option():
self._multi_if_conditions_()
self.name_last_node('ifs')
def block1():
self._separator_with_space_()
self._positive_closure(block1)
self._single_if_condition_()
self.name_last_node('value')
with self._option():
self._single_if_condition_()
self.name_last_node('value')
self._error('no available options')
self.ast._define(['ifs', 'value'], [])
('SingleIf')
def _single_if_condition_(self):
with self._choice():
with self._option():
self._expression_()
self.name_last_node('stat')
def block1():
self._hspace_()
self._closure(block1)
self._IF_()
def block2():
self._hspace_()
self._closure(block2)
self._if_condition_()
self.name_last_node('cond')
with self._option():
self._if_condition_()
self.name_last_node('cond')
def block5():
self._hspace_()
self._closure(block5)
self._token(':')
def block6():
self._hspace_()
self._closure(block6)
self._expression_()
self.name_last_node('stat')
self._error('no available options')
self.ast._define(['cond', 'stat'], [])
('MatrixRows')
def _rows_(self):
with self._choice():
with self._option():
self._rows_()
self.name_last_node('rs')
def block1():
self._separator_with_space_()
self._positive_closure(block1)
self._row_()
self.name_last_node('r')
def block3():
self._hspace_()
self._closure(block3)
with self._option():
self._rows_()
self.name_last_node('rs')
def block5():
self._separator_with_space_()
self._positive_closure(block5)
with self._option():
self._row_()
self.name_last_node('r')
def block7():
self._hspace_()
self._closure(block7)
self._error('no available options')
self.ast._define(['r', 'rs'], [])
('MatrixRow')
def _row_(self):
with self._choice():
with self._option():
self._token('|')
def block0():
self._hspace_()
self._closure(block0)
self._row_()
self.add_last_node_to_name('value')
def block2():
self._hspace_()
self._closure(block2)
self._token('|')
with self._option():
self._row_with_commas_()
self.name_last_node('rc')
def block4():
self._hspace_()
self._closure(block4)
self._expr_in_matrix_()
self.name_last_node('exp')
with self._option():
self._row_with_commas_()
self.name_last_node('rc')
with self._option():
self._expr_in_matrix_()
self.name_last_node('exp')
self._error('no available options')
self.ast._define(['exp', 'rc'], ['value'])
('MatrixRowCommas')
def _row_with_commas_(self):
with self._choice():
with self._option():
self._row_with_commas_()
self.name_last_node('value')
def block1():
self._hspace_()
self._closure(block1)
self._expr_in_matrix_()
self.name_last_node('exp')
with self._group():
with self._choice():
with self._option():
def block3():
self._hspace_()
self._closure(block3)
self._token(',')
with self._option():
def block4():
self._hspace_()
self._positive_closure(block4)
self._error('no available options')
with self._option():
def block6():
self._hspace_()
self._closure(block6)
self._expr_in_matrix_()
self.name_last_node('exp')
with self._group():
with self._choice():
with self._option():
def block8():
self._hspace_()
self._closure(block8)
self._token(',')
with self._option():
def block9():
self._hspace_()
self._positive_closure(block9)
self._error('no available options')
self._error('no available options')
self.ast._define(['exp', 'value'], [])
('ExpInMatrix')
def _expr_in_matrix_(self):
with self._choice():
with self._option():
self._addition_in_matrix_()
self.name_last_node('value')
with self._option():
self._subtraction_in_matrix_()
self.name_last_node('value')
with self._option():
with self._optional():
self._token('-')
self.name_last_node('sign')
self._term_in_matrix_()
self.name_last_node('value')
self._error('no available options')
self.ast._define(['sign', 'value'], [])
('Add')
def _addition_in_matrix_(self):
self._expr_in_matrix_()
self.name_last_node('left')
self._token('+')
self.name_last_node('op')
self._term_in_matrix_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('Subtract')
def _subtraction_in_matrix_(self):
self._expr_in_matrix_()
self.name_last_node('left')
self._token('-')
self.name_last_node('op')
self._term_in_matrix_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
()
def _term_in_matrix_(self):
with self._choice():
with self._option():
self._multiplication_in_matrix_()
with self._option():
self._division_in_matrix_()
with self._option():
self._factor_in_matrix_()
self._error('no available options')
('Multiply')
def _multiplication_in_matrix_(self):
with self._choice():
with self._option():
self._term_in_matrix_()
self.name_last_node('left')
self._token('')
self.name_last_node('op')
self._factor_in_matrix_()
self.name_last_node('right')
with self._option():
self._term_in_matrix_()
self.name_last_node('left')
self._factor_in_matrix_()
self.name_last_node('right')
self._error('no available options')
self.ast._define(['left', 'op', 'right'], [])
('Divide')
def _division_in_matrix_(self):
self._term_in_matrix_()
self.name_last_node('left')
with self._group():
with self._choice():
with self._option():
self._token('/')
with self._option():
self._token('')
self._error('no available options')
self.name_last_node('op')
self._factor_in_matrix_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('NumMatrix')
def _number_matrix_(self):
with self._choice():
with self._option():
with self._group():
with self._choice():
with self._option():
self._token('0')
with self._option():
self._token('1')
with self._option():
self._token('1')
self._error('no available options')
self.name_last_node('left')
self._token('_')
with self._group():
with self._choice():
with self._option():
self._integer_()
with self._option():
self._identifier_()
self._error('no available options')
self.name_last_node('id1')
def block4():
self._token(',')
with self._group():
with self._choice():
with self._option():
self._integer_()
with self._option():
self._identifier_()
self._error('no available options')
self.name_last_node('id2')
self._closure(block4)
with self._option():
self._pattern('[01\\u1D7D9]')
self.name_last_node('left')
self._sub_integer_()
self.name_last_node('id1')
def block9():
self._token(',')
self._sub_integer_()
self.name_last_node('id2')
self._closure(block9)
with self._option():
with self._group():
with self._choice():
with self._option():
self._token('0')
with self._option():
self._token('1')
with self._option():
self._token('1')
self._error('no available options')
self.name_last_node('left')
self._token('_')
self._token('(')
def block13():
self._hspace_()
self._closure(block13)
with self._group():
with self._choice():
with self._option():
self._integer_()
with self._option():
self._identifier_()
self._error('no available options')
self.name_last_node('id1')
def block16():
def block17():
self._hspace_()
self._closure(block17)
with self._group():
with self._choice():
with self._option():
self._token(',')
with self._option():
self._token('')
self._error('no available options')
def block19():
self._hspace_()
self._closure(block19)
with self._group():
with self._choice():
with self._option():
self._integer_()
with self._option():
self._identifier_()
self._error('no available options')
self.name_last_node('id2')
self._closure(block16)
def block22():
self._hspace_()
self._closure(block22)
self._token(')')
self._error('no available options')
self.ast._define(['id1', 'id2', 'left'], [])
('Factor')
def _factor_in_matrix_(self):
with self._choice():
with self._option():
self._operations_in_matrix_()
self.name_last_node('op')
with self._option():
self._subexpression_()
self.name_last_node('sub')
with self._option():
self._number_matrix_()
self.name_last_node('nm')
with self._option():
self._identifier_()
self.name_last_node('id0')
with self._option():
self._number_()
self.name_last_node('num')
with self._option():
self._matrix_()
self.name_last_node('m')
with self._option():
self._vector_()
self.name_last_node('v')
with self._option():
self._constant_()
self.name_last_node('c')
self._error('no available options')
self.ast._define(['c', 'id0', 'm', 'nm', 'num', 'op', 'sub', 'v'], [])
()
def _operations_in_matrix_(self):
with self._choice():
with self._option():
self._solver_in_matrix_operator_()
with self._option():
self._norm_operator_()
with self._option():
self._power_in_matrix_operator_()
with self._option():
self._inner_product_operator_()
with self._option():
self._frobenius_product_in_matrix_operator_()
with self._option():
self._hadamard_product_in_matrix_operator_()
with self._option():
self._cross_product_in_matrix_operator_()
with self._option():
self._kronecker_product_in_matrix_operator_()
with self._option():
self._sum_in_matrix_operator_()
with self._option():
self._integral_operator_()
with self._option():
self._trans_in_matrix_operator_()
with self._option():
self._sqrt_in_matrix_operator_()
with self._option():
self._function_operator_()
with self._option():
self._builtin_operators_()
with self._option():
self._pseudoinverse_in_matrix_operator_()
self._error('no available options')
('Power')
def _power_in_matrix_operator_(self):
with self._choice():
with self._option():
self._factor_in_matrix_()
self.name_last_node('base')
self._token('^T')
self.name_last_node('t')
with self._option():
self._factor_in_matrix_()
self.name_last_node('base')
with self._group():
with self._choice():
with self._option():
self._token('^(-1)')
with self._option():
self._token('1')
self._error('no available options')
self.name_last_node('r')
with self._option():
self._factor_in_matrix_()
self.name_last_node('base')
self._token('^')
self._factor_in_matrix_()
self.name_last_node('power')
with self._option():
self._factor_in_matrix_()
self.name_last_node('base')
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self.ast._define(['base', 'power', 'r', 't'], [])
('FroProduct')
def _frobenius_product_in_matrix_operator_(self):
self._factor_in_matrix_()
self.name_last_node('left')
self._token(':')
self._factor_in_matrix_()
self.name_last_node('right')
self.ast._define(['left', 'right'], [])
('HadamardProduct')
def _hadamard_product_in_matrix_operator_(self):
self._factor_in_matrix_()
self.name_last_node('left')
self._token('')
self._factor_in_matrix_()
self.name_last_node('right')
self.ast._define(['left', 'right'], [])
('CrossProduct')
def _cross_product_in_matrix_operator_(self):
self._factor_in_matrix_()
self.name_last_node('left')
self._token('')
self._factor_in_matrix_()
self.name_last_node('right')
self.ast._define(['left', 'right'], [])
('KroneckerProduct')
def _kronecker_product_in_matrix_operator_(self):
self._factor_in_matrix_()
self.name_last_node('left')
self._token('')
self._factor_in_matrix_()
self.name_last_node('right')
self.ast._define(['left', 'right'], [])
('Transpose')
def _trans_in_matrix_operator_(self):
self._factor_in_matrix_()
self.name_last_node('f')
self._pattern('T')
self.ast._define(['f'], [])
('PseudoInverse')
def _pseudoinverse_in_matrix_operator_(self):
self._factor_in_matrix_()
self.name_last_node('f')
self._pattern('+')
self.ast._define(['f'], [])
('Squareroot')
def _sqrt_in_matrix_operator_(self):
self._pattern('')
self._factor_in_matrix_()
self.name_last_node('f')
self.ast._define(['f'], [])
('Solver')
def _solver_in_matrix_operator_(self):
with self._choice():
with self._option():
self._factor_in_matrix_()
self.name_last_node('left')
self._token('\\')
self._factor_in_matrix_()
self.name_last_node('right')
with self._option():
self._factor_in_matrix_()
self.name_last_node('left')
with self._group():
with self._choice():
with self._option():
self._token('^(-1)')
with self._option():
self._token('1')
self._error('no available options')
self.name_last_node('p')
self._factor_in_matrix_()
self.name_last_node('right')
self._error('no available options')
self.ast._define(['left', 'p', 'right'], [])
('Summation')
def _sum_in_matrix_operator_(self):
with self._choice():
with self._option():
self._SUM_()
self._token('_')
self._identifier_alone_()
self.name_last_node('sub')
with self._if():
self._token('(')
def block1():
self._hspace_()
self._closure(block1)
self._term_in_matrix_()
self.name_last_node('exp')
with self._option():
self._SUM_()
self._token('_(')
def block3():
self._hspace_()
self._closure(block3)
self._identifier_alone_()
self.name_last_node('id')
def block5():
self._hspace_()
self._closure(block5)
self._token('for')
def block6():
self._hspace_()
self._closure(block6)
self._if_condition_()
self.name_last_node('cond')
def block8():
self._hspace_()
self._closure(block8)
self._token(')')
self._term_in_matrix_()
self.name_last_node('exp')
with self._option():
self._SUM_()
self._token('_(')
def block10():
self._hspace_()
self._closure(block10)
self._identifier_alone_()
self.add_last_node_to_name('enum')
def block12():
def block13():
self._hspace_()
self._closure(block13)
self._token(',')
def block14():
self._hspace_()
self._closure(block14)
self._identifier_alone_()
self.add_last_node_to_name('enum')
self._closure(block12)
def block16():
self._hspace_()
self._closure(block16)
self._IN_()
def block17():
self._hspace_()
self._closure(block17)
with self._group():
with self._choice():
with self._option():
self._function_operator_()
with self._option():
self._identifier_alone_()
self._error('no available options')
self.name_last_node('range')
def block20():
self._hspace_()
self._closure(block20)
self._token(')')
self._term_()
self.name_last_node('exp')
self._error('no available options')
self.ast._define(['cond', 'exp', 'id', 'range', 'sub'], ['enum'])
()
def _hspace_(self):
with self._choice():
with self._option():
self._token(' ')
with self._option():
self._token('\t')
self._error('no available options')
()
def _line_(self):
with self._choice():
with self._option():
self._token('\n')
with self._option():
self._token('\r')
with self._option():
self._token('\x0c')
self._error('no available options')
()
def _lines_(self):
def block0():
self._line_()
self._positive_closure(block0)
()
def _identifier_(self):
with self._choice():
with self._option():
self._identifier_with_subscript_()
with self._option():
self._identifier_alone_()
self._error('no available options')
('IdentifierSubscript')
def _identifier_with_subscript_(self):
with self._choice():
with self._option():
with self._group():
self._identifier_alone_()
self.name_last_node('left')
self._token('_')
with self._group():
with self._choice():
with self._option():
self._integer_()
with self._option():
self._token('*')
with self._option():
self._identifier_alone_()
self._error('no available options')
self.add_last_node_to_name('right')
def block3():
with self._choice():
with self._option():
with self._group():
self._token(',')
self._token('*')
self.add_last_node_to_name('right')
with self._option():
with self._group():
def block5():
self._token(',')
self._closure(block5)
with self._group():
with self._choice():
with self._option():
self._integer_()
with self._option():
self._identifier_alone_()
self._error('no available options')
self.add_last_node_to_name('right')
self._error('no available options')
self._closure(block3)
with self._option():
with self._group():
self._identifier_alone_()
self.name_last_node('left')
self._sub_integer_()
self.add_last_node_to_name('right')
def block11():
with self._choice():
with self._option():
with self._group():
self._token(',')
self._token('*')
self.add_last_node_to_name('right')
with self._option():
with self._group():
def block13():
self._token(',')
self._closure(block13)
with self._group():
self._sub_integer_()
self.add_last_node_to_name('right')
self._error('no available options')
self._closure(block11)
self._error('no available options')
self.ast._define(['left'], ['right'])
()
def _keyword_str_(self):
self._pattern('[A-Za-z][A-Za-z0-9]*')
('IdentifierAlone')
def _multi_str_(self):
with self._group():
with self._choice():
with self._option():
self._pattern('[A-Za-z\\p{Ll}\\p{Lu}\\p{Lo}]\\p{M}*([A-Z0-9a-z\\p{Ll}\\p{Lu}\\p{Lo}]\\p{M}*)*')
self.name_last_node('value')
with self._option():
self._token('`')
self._pattern('[^`]*')
self.name_last_node('id')
self._token('`')
self._error('no available options')
self.ast._define(['id', 'value'], [])
()
def _description_(self):
self._pattern('[^;\\n\\r\\f]*')
()
def _desc_identifier_(self):
with self._ifnot():
self._KEYWORDS_()
self._token('`')
self._pattern('[A-Za-z][[A-Za-z0-9]*')
self._token('`')
()
def _separator_(self):
with self._choice():
with self._option():
self._line_()
with self._option():
self._token(';')
self._error('no available options')
()
def _separator_with_space_(self):
def block0():
self._hspace_()
self._closure(block0)
self._separator_()
def block1():
self._hspace_()
self._closure(block1)
()
def _blank_(self):
def block0():
with self._group():
with self._choice():
with self._option():
self._hspace_()
with self._option():
self._separator_()
self._error('no available options')
self._closure(block0)
()
def _params_separator_(self):
with self._choice():
with self._option():
self._token(',')
with self._option():
self._token(';')
with self._option():
self._token('x')
with self._option():
self._token('')
self._error('no available options')
('Pi')
def _pi_(self):
self._pattern('')
('E')
def _e_(self):
self._pattern('e')
('SinFunc')
def _sin_func_(self):
self._SIN_()
def block0():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block0)
self._token('(')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('param')
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
self.ast._define(['param', 'power'], [])
('AsinFunc')
def _asin_func_(self):
self._ASIN_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('AsinFunc')
def _arcsin_func_(self):
self._ARCSIN_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('CosFunc')
def _cos_func_(self):
self._COS_()
def block0():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block0)
self._token('(')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('param')
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
self.ast._define(['param', 'power'], [])
('AcosFunc')
def _acos_func_(self):
self._ACOS_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('AcosFunc')
def _arccos_func_(self):
self._ARCCOS_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('TanFunc')
def _tan_func_(self):
self._TAN_()
def block0():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block0)
self._token('(')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('param')
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
self.ast._define(['param', 'power'], [])
('AtanFunc')
def _atan_func_(self):
self._ATAN_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('AtanFunc')
def _arctan_func_(self):
self._ARCTAN_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('SinhFunc')
def _sinh_func_(self):
self._SINH_()
def block0():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block0)
self._token('(')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('param')
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
self.ast._define(['param', 'power'], [])
('AsinhFunc')
def _asinh_func_(self):
self._ASINH_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('AsinhFunc')
def _arsinh_func_(self):
self._ARSINH_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('CoshFunc')
def _cosh_func_(self):
self._COSH_()
def block0():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block0)
self._token('(')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('param')
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
self.ast._define(['param', 'power'], [])
('AcoshFunc')
def _acosh_func_(self):
self._ACOSH_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('AcoshFunc')
def _arcosh_func_(self):
self._ARCOSH_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('TanhFunc')
def _tanh_func_(self):
self._TANH_()
def block0():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block0)
self._token('(')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('param')
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
self.ast._define(['param', 'power'], [])
('AtanhFunc')
def _atanh_func_(self):
self._ATANH_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('AtanhFunc')
def _artanh_func_(self):
self._ARTANH_()
self.name_last_node('name')
def block1():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block1)
self._token('(')
def block5():
self._hspace_()
self._closure(block5)
self._expression_()
self.name_last_node('param')
def block7():
self._hspace_()
self._closure(block7)
self._token(')')
self.ast._define(['name', 'param', 'power'], [])
('CotFunc')
def _cot_func_(self):
self._COT_()
def block0():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block0)
self._token('(')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('param')
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
self.ast._define(['param', 'power'], [])
('SecFunc')
def _sec_func_(self):
self._SEC_()
def block0():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block0)
self._token('(')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('param')
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
self.ast._define(['param', 'power'], [])
('CscFunc')
def _csc_func_(self):
self._CSC_()
def block0():
with self._choice():
with self._option():
with self._group():
self._token('^')
self._integer_()
self.name_last_node('power')
with self._option():
self._sup_integer_()
self.name_last_node('power')
self._error('no available options')
self._closure(block0)
self._token('(')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('param')
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
self.ast._define(['param', 'power'], [])
('Atan2Func')
def _atan2_func_(self):
self._ATAN2_()
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('param')
def block2():
self._hspace_()
self._closure(block2)
self._params_separator_()
self.name_last_node('separator')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('second')
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
self.ast._define(['param', 'second', 'separator'], [])
('TraceFunc')
def _trace_func_(self):
self._TRACE_()
self.name_last_node('name')
self._token('(')
def block1():
self._hspace_()
self._closure(block1)
self._expression_()
self.name_last_node('param')
def block3():
self._hspace_()
self._closure(block3)
self._token(')')
self.ast._define(['name', 'param'], [])
('TraceFunc')
def _tr_func_(self):
self._TR_()
self.name_last_node('name')
self._token('(')
def block1():
self._hspace_()
self._closure(block1)
self._expression_()
self.name_last_node('param')
def block3():
self._hspace_()
self._closure(block3)
self._token(')')
self.ast._define(['name', 'param'], [])
('DiagFunc')
def _diag_func_(self):
self._DIAG_()
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('param')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['param'], [])
('VecFunc')
def _vec_func_(self):
self._VEC_()
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('param')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['param'], [])
('DetFunc')
def _det_func_(self):
self._DET_()
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('param')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['param'], [])
('RankFunc')
def _rank_func_(self):
self._RANK_()
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('param')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['param'], [])
('NullFunc')
def _null_func_(self):
self._NULL_()
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('param')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['param'], [])
('OrthFunc')
def _orth_func_(self):
self._ORTH_()
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('param')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['param'], [])
('InvFunc')
def _inv_func_(self):
self._INV_()
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('param')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['param'], [])
()
def _Directive_(self):
self._import_()
('Import')
def _import_(self):
self._multi_str_()
self.add_last_node_to_name('names')
def block1():
def block2():
self._hspace_()
self._closure(block2)
self._token(',')
def block3():
self._hspace_()
self._closure(block3)
self._multi_str_()
self.add_last_node_to_name('names')
self._closure(block1)
def block5():
self._hspace_()
self._closure(block5)
self._FROM_()
def block6():
self._hspace_()
self._positive_closure(block6)
self._multi_str_()
self.name_last_node('package')
def block8():
self._hspace_()
self._closure(block8)
def block9():
self._token('(')
def block10():
def block11():
self._hspace_()
self._closure(block11)
self._identifier_alone_()
self.add_last_node_to_name('params')
def block13():
def block14():
self._hspace_()
self._closure(block14)
self._params_separator_()
self.add_last_node_to_name('separators')
def block16():
self._hspace_()
self._closure(block16)
self._identifier_alone_()
self.add_last_node_to_name('params')
self._closure(block13)
self._closure(block10)
def block18():
self._hspace_()
self._closure(block18)
self._token(')')
self._closure(block9)
def block19():
self._hspace_()
self._closure(block19)
self.ast._define(['package'], ['names', 'params', 'separators'])
('WhereConditions')
def _where_conditions_(self):
def block0():
self._hspace_()
self._closure(block0)
self._where_condition_()
self.add_last_node_to_name('value')
def block2():
def block3():
self._separator_with_space_()
self._positive_closure(block3)
self._where_condition_()
self.add_last_node_to_name('value')
self._closure(block2)
self.ast._define([], ['value'])
('WhereCondition')
def _where_condition_(self):
self._identifier_()
self.add_last_node_to_name('id')
def block1():
def block2():
self._hspace_()
self._closure(block2)
self._token(',')
def block3():
self._hspace_()
self._closure(block3)
self._identifier_()
self.add_last_node_to_name('id')
self._closure(block1)
def block5():
self._hspace_()
self._closure(block5)
with self._group():
with self._choice():
with self._option():
self._token(':')
with self._option():
self._IN_()
self._error('no available options')
def block7():
self._hspace_()
self._closure(block7)
self._la_type_()
self.name_last_node('type')
def block9():
def block10():
self._hspace_()
self._closure(block10)
self._token('index')
self.name_last_node('index')
self._closure(block9)
def block12():
def block13():
self._hspace_()
self._closure(block13)
self._token(':')
def block14():
self._hspace_()
self._closure(block14)
self._description_()
self.name_last_node('desc')
self._closure(block12)
self.ast._define(['desc', 'index', 'type'], ['id'])
('WhereCondition')
def _where_condition_terse_(self):
self._identifier_()
self.add_last_node_to_name('id')
def block1():
def block2():
self._hspace_()
self._closure(block2)
self._token(',')
def block3():
self._hspace_()
self._closure(block3)
self._identifier_()
self.add_last_node_to_name('id')
self._closure(block1)
def block5():
self._hspace_()
self._closure(block5)
with self._group():
with self._choice():
with self._option():
self._token(':')
with self._option():
self._IN_()
self._error('no available options')
def block7():
self._hspace_()
self._closure(block7)
self._la_type_()
self.name_last_node('type')
def block9():
def block10():
self._hspace_()
self._closure(block10)
self._token('index')
self.name_last_node('index')
self._closure(block9)
self.ast._define(['index', 'type'], ['id'])
('MatrixType')
def _matrix_type_(self):
with self._choice():
with self._option():
self._pattern('matrix')
def block0():
self._hspace_()
self._closure(block0)
self._token('(')
def block1():
self._hspace_()
self._closure(block1)
self._dimension_()
self.name_last_node('id1')
def block3():
self._hspace_()
self._closure(block3)
self._token(',')
def block4():
self._hspace_()
self._closure(block4)
self._dimension_()
self.name_last_node('id2')
def block6():
self._hspace_()
self._closure(block6)
self._token(')')
def block7():
def block8():
self._hspace_()
self._positive_closure(block8)
self._matrix_attribute_()
self.add_last_node_to_name('attr')
self._closure(block7)
with self._option():
self._pattern('[RZ]')
self.name_last_node('type')
def block11():
self._hspace_()
self._closure(block11)
self._token('^')
def block12():
self._hspace_()
self._closure(block12)
self._token('(')
def block13():
self._hspace_()
self._closure(block13)
self._dimension_()
self.name_last_node('id1')
def block15():
self._hspace_()
self._closure(block15)
self._token('')
def block16():
self._hspace_()
self._closure(block16)
self._dimension_()
self.name_last_node('id2')
def block18():
self._hspace_()
self._closure(block18)
self._token(')')
def block19():
def block20():
self._hspace_()
self._positive_closure(block20)
self._matrix_attribute_()
self.add_last_node_to_name('attr')
self._closure(block19)
self._error('no available options')
self.ast._define(['id1', 'id2', 'type'], ['attr'])
()
def _matrix_attribute_(self):
self._SPARSE_()
('VectorType')
def _vector_type_(self):
with self._choice():
with self._option():
self._pattern('vector')
def block0():
self._hspace_()
self._closure(block0)
self._token('(')
def block1():
self._hspace_()
self._closure(block1)
self._dimension_()
self.name_last_node('id1')
def block3():
self._hspace_()
self._closure(block3)
self._token(')')
with self._option():
self._pattern('[RZ]')
self.name_last_node('type')
def block5():
self._hspace_()
self._closure(block5)
self._token('^')
def block6():
self._hspace_()
self._closure(block6)
self._token('(')
def block7():
self._hspace_()
self._closure(block7)
self._dimension_()
self.name_last_node('id1')
def block9():
self._hspace_()
self._closure(block9)
self._token(')')
with self._option():
self._pattern('[RZ]')
self.name_last_node('type')
def block11():
self._hspace_()
self._closure(block11)
self._token('^')
def block12():
self._hspace_()
self._closure(block12)
self._dimension_()
self.name_last_node('id1')
with self._option():
self._pattern('[RZ]')
self.name_last_node('type')
self._sup_integer_()
self.name_last_node('id1')
self._error('no available options')
self.ast._define(['id1', 'type'], [])
('ScalarType')
def _scalar_type_(self):
with self._choice():
with self._option():
self._pattern('scalar')
with self._option():
self._pattern('R')
with self._option():
self._pattern('Z')
self.name_last_node('z')
self._error('no available options')
self.ast._define(['z'], [])
('SetType')
def _set_type_(self):
with self._choice():
with self._option():
self._token('{')
def block0():
self._hspace_()
self._closure(block0)
self._pattern('[RZ]')
self.add_last_node_to_name('type')
def block2():
self._hspace_()
self._closure(block2)
def block3():
self._token('')
def block4():
self._hspace_()
self._closure(block4)
self._pattern('[RZ]')
self.add_last_node_to_name('type')
def block6():
self._hspace_()
self._closure(block6)
self._closure(block3)
self._token('}')
with self._option():
self._token('{')
def block7():
self._hspace_()
self._closure(block7)
self._pattern('[RZ]')
self.name_last_node('type1')
def block9():
self._hspace_()
self._closure(block9)
self._token('^')
def block10():
self._hspace_()
self._closure(block10)
with self._group():
self._integer_()
self.name_last_node('cnt')
def block12():
self._hspace_()
self._closure(block12)
self._token('}')
with self._option():
self._token('{')
def block13():
self._hspace_()
self._closure(block13)
self._pattern('[RZ]')
self.name_last_node('type2')
with self._optional():
self._sup_integer_()
self.name_last_node('cnt')
def block16():
self._hspace_()
self._closure(block16)
self._token('}')
self._error('no available options')
self.ast._define(['cnt', 'type1', 'type2'], ['type'])
()
def _dimension_(self):
self._arithmetic_expression_()
()
def _la_type_(self):
with self._choice():
with self._option():
self._function_type_()
with self._option():
self._matrix_type_()
with self._option():
self._vector_type_()
with self._option():
self._set_type_()
with self._option():
self._scalar_type_()
self._error('no available options')
()
def _params_type_(self):
with self._choice():
with self._option():
self._matrix_type_()
with self._option():
self._vector_type_()
with self._option():
self._scalar_type_()
with self._option():
self._set_type_()
self._error('no available options')
('FunctionType')
def _function_type_(self):
with self._group():
with self._choice():
with self._option():
with self._group():
self._params_type_()
self.add_last_node_to_name('params')
def block1():
def block2():
self._hspace_()
self._closure(block2)
self._params_separator_()
self.add_last_node_to_name('separators')
def block4():
self._hspace_()
self._closure(block4)
self._params_type_()
self.add_last_node_to_name('params')
self._closure(block1)
with self._option():
self._token('')
self.name_last_node('empty')
with self._option():
self._token('{')
def block7():
self._hspace_()
self._closure(block7)
self._token('}')
self._error('no available options')
def block9():
self._hspace_()
self._closure(block9)
with self._group():
with self._choice():
with self._option():
self._token('')
with self._option():
self._token('->')
self._error('no available options')
def block11():
self._hspace_()
self._closure(block11)
self._params_type_()
self.add_last_node_to_name('ret')
def block13():
def block14():
self._hspace_()
self._closure(block14)
self._params_separator_()
self.add_last_node_to_name('ret_separators')
def block16():
self._hspace_()
self._closure(block16)
self._params_type_()
self.add_last_node_to_name('ret')
self._closure(block13)
self.ast._define(['empty'], ['params', 'ret', 'ret_separators', 'separators'])
('Integer')
def _integer_(self):
def block1():
self._digit_()
self._positive_closure(block1)
self.name_last_node('value')
self.ast._define(['value'], [])
('SupInteger')
def _sup_integer_(self):
def block1():
self._pattern('[\\u2070\\u00B9\\u00B2\\u00B3\\u2074-\\u2079]')
self._positive_closure(block1)
self.name_last_node('value')
self.ast._define(['value'], [])
('SubInteger')
def _sub_integer_(self):
def block1():
self._pattern('[\\u2080-\\u2089]')
self._positive_closure(block1)
self.name_last_node('value')
self.ast._define(['value'], [])
()
def _digit_(self):
self._pattern('\\d')
()
def _valid_block_(self):
with self._choice():
with self._option():
self._Directive_()
with self._option():
self._params_block_()
with self._option():
self._statements_()
self._error('no available options')
('ParamsBlock')
def _params_block_(self):
def block0():
with self._group():
with self._choice():
with self._option():
self._WHERE_()
with self._option():
self._GIVEN_()
self._error('no available options')
self.name_last_node('annotation')
def block3():
self._separator_with_space_()
self._positive_closure(block3)
self._closure(block0)
self._where_conditions_()
self.name_last_node('conds')
self.ast._define(['annotation', 'conds'], [])
()
def _builtin_operators_(self):
if (len(self.builtin_list) > 0):
with self._choice():
for new_builtin in self.builtin_list:
with self._option():
func = getattr(self, '_{}_'.format(new_builtin), None)
func()
with self._option():
self._predefined_built_operators_()
self._error('no available options')
else:
self._predefined_built_operators_()
('Statements')
def _statements_(self):
self._statement_()
self.name_last_node('stat')
self.ast._define(['stat'], [])
()
def _statement_(self):
with self._choice():
with self._option():
self._local_func_()
with self._option():
self._assignment_()
with self._option():
self._right_hand_side_()
self._error('no available options')
('Expression')
def _expression_(self):
with self._choice():
with self._option():
self._addition_()
self.name_last_node('value')
with self._option():
self._subtraction_()
self.name_last_node('value')
with self._option():
self._add_sub_operator_()
self.name_last_node('value')
with self._option():
with self._optional():
self._token('-')
self.name_last_node('sign')
self._term_()
self.name_last_node('value')
self._error('no available options')
self.ast._define(['sign', 'value'], [])
('Assignment')
def _assignment_(self):
with self._choice():
with self._option():
self._identifier_()
self.add_last_node_to_name('left')
def block1():
def block2():
self._hspace_()
self._closure(block2)
self._token(',')
def block3():
self._hspace_()
self._closure(block3)
self._identifier_()
self.add_last_node_to_name('left')
self._closure(block1)
def block5():
self._hspace_()
self._closure(block5)
self._token('=')
self.name_last_node('op')
def block7():
self._hspace_()
self._closure(block7)
def block8():
self._separator_with_space_()
self._closure(block8)
self._right_hand_side_()
self.add_last_node_to_name('right')
def block10():
def block11():
self._hspace_()
self._closure(block11)
self._token(',')
def block12():
self._hspace_()
self._closure(block12)
self._expression_()
self.add_last_node_to_name('right')
self._closure(block10)
with self._option():
self._identifier_()
self.add_last_node_to_name('left')
def block15():
def block16():
self._hspace_()
self._closure(block16)
self._token(',')
def block17():
self._hspace_()
self._closure(block17)
self._identifier_()
self.add_last_node_to_name('left')
self._closure(block15)
def block19():
self._hspace_()
self._closure(block19)
self._token('+=')
self.name_last_node('op')
def block21():
self._hspace_()
self._closure(block21)
def block22():
self._separator_with_space_()
self._closure(block22)
self._right_hand_side_()
self.add_last_node_to_name('right')
def block24():
def block25():
self._hspace_()
self._closure(block25)
self._token(',')
def block26():
self._hspace_()
self._closure(block26)
self._expression_()
self.add_last_node_to_name('right')
self._closure(block24)
self._error('no available options')
self.ast._define(['op'], ['left', 'right'])
('LocalFunc')
def _local_func_(self):
with self._choice():
with self._option():
self._identifier_()
self.name_last_node('name')
self._token('(')
self.name_last_node('def_p')
def block2():
def block3():
self._hspace_()
self._closure(block3)
self._identifier_alone_()
self.add_last_node_to_name('params')
def block5():
def block6():
self._hspace_()
self._closure(block6)
self._params_separator_()
self.add_last_node_to_name('separators')
def block8():
self._hspace_()
self._closure(block8)
self._identifier_alone_()
self.add_last_node_to_name('params')
self._closure(block5)
self._closure(block2)
def block10():
self._hspace_()
self._closure(block10)
self._token(')')
def block11():
self._hspace_()
self._closure(block11)
self._token('=')
self.name_last_node('op')
def block13():
self._hspace_()
self._closure(block13)
self._right_hand_side_()
self.add_last_node_to_name('expr')
def block15():
def block16():
self._hspace_()
self._closure(block16)
self._token(',')
def block17():
self._hspace_()
self._closure(block17)
self._right_hand_side_()
self.add_last_node_to_name('expr')
self._closure(block15)
with self._optional():
def block19():
self._hspace_()
self._closure(block19)
self._line_()
def block20():
self._hspace_()
self._closure(block20)
with self._group():
with self._choice():
with self._option():
self._WHERE_()
with self._option():
self._GIVEN_()
self._error('no available options')
def block22():
self._hspace_()
self._closure(block22)
self._where_condition_()
self.add_last_node_to_name('defs')
def block24():
def block25():
self._hspace_()
self._closure(block25)
self._token(',')
def block26():
self._hspace_()
self._closure(block26)
self._where_condition_()
self.add_last_node_to_name('defs')
self._closure(block24)
with self._option():
self._identifier_()
self.name_last_node('name')
self._token('[')
def block29():
def block30():
self._hspace_()
self._closure(block30)
self._identifier_alone_()
self.add_last_node_to_name('params')
def block32():
def block33():
self._hspace_()
self._closure(block33)
self._params_separator_()
self.add_last_node_to_name('separators')
def block35():
self._hspace_()
self._closure(block35)
self._identifier_alone_()
self.add_last_node_to_name('params')
self._closure(block32)
self._closure(block29)
def block37():
self._hspace_()
self._closure(block37)
self._token(']')
def block38():
self._hspace_()
self._closure(block38)
self._token('=')
self.name_last_node('op')
def block40():
self._hspace_()
self._closure(block40)
self._right_hand_side_()
self.add_last_node_to_name('expr')
def block42():
def block43():
self._hspace_()
self._closure(block43)
self._token(',')
def block44():
self._hspace_()
self._closure(block44)
self._right_hand_side_()
self.add_last_node_to_name('expr')
self._closure(block42)
with self._optional():
def block46():
self._hspace_()
self._closure(block46)
self._line_()
def block47():
self._hspace_()
self._closure(block47)
with self._group():
with self._choice():
with self._option():
self._WHERE_()
with self._option():
self._GIVEN_()
self._error('no available options')
def block49():
self._hspace_()
self._closure(block49)
self._where_condition_()
self.add_last_node_to_name('defs')
def block51():
def block52():
self._hspace_()
self._closure(block52)
self._token(',')
def block53():
self._hspace_()
self._closure(block53)
self._where_condition_()
self.add_last_node_to_name('defs')
self._closure(block51)
self._error('no available options')
self.ast._define(['def_p', 'name', 'op'], ['defs', 'expr', 'params', 'separators'])
()
def _right_hand_side_(self):
with self._choice():
with self._option():
self._expression_()
with self._option():
self._optimize_operator_()
with self._option():
self._multi_cond_expr_()
self._error('no available options')
()
def _term_(self):
with self._choice():
with self._option():
self._multiplication_()
with self._option():
self._division_()
with self._option():
self._factor_()
self._error('no available options')
('Factor')
def _factor_(self):
with self._choice():
with self._option():
self._operations_()
self.name_last_node('op')
with self._option():
self._subexpression_()
self.name_last_node('sub')
with self._option():
self._number_matrix_()
self.name_last_node('nm')
with self._option():
self._identifier_()
self.name_last_node('id0')
with self._option():
self._number_()
self.name_last_node('num')
with self._option():
self._matrix_()
self.name_last_node('m')
with self._option():
self._vector_()
self.name_last_node('v')
with self._option():
self._constant_()
self.name_last_node('c')
self._error('no available options')
self.ast._define(['c', 'id0', 'm', 'nm', 'num', 'op', 'sub', 'v'], [])
()
def _sub_factor_(self):
with self._choice():
with self._option():
self._subexpression_()
with self._option():
self._identifier_alone_()
with self._option():
self._number_()
with self._option():
self._constant_()
self._error('no available options')
()
def _constant_(self):
if self.const_e:
with self._choice():
with self._option():
self._pi_()
with self._option():
self._e_()
self._error('no available options')
else:
self._pi_()
()
def _KEYWORDS_(self):
if self.const_e:
with self._choice():
with self._option():
self._BUILTIN_KEYWORDS_()
with self._option():
self._e_()
self._error('no available options')
else:
self._BUILTIN_KEYWORDS_()
('Subexpression')
def _subexpression_(self):
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.name_last_node('value')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['value'], [])
('IfCondition')
def _if_condition_(self):
with self._choice():
with self._option():
self._if_condition_()
self.name_last_node('se')
def block1():
self._hspace_()
self._closure(block1)
self._OR_()
def block2():
self._hspace_()
self._closure(block2)
self._and_condition_()
self.name_last_node('other')
with self._option():
self._and_condition_()
self.name_last_node('single')
self._error('no available options')
self.ast._define(['other', 'se', 'single'], [])
('AndCondition')
def _and_condition_(self):
with self._choice():
with self._option():
self._and_condition_()
self.name_last_node('se')
def block1():
self._hspace_()
self._closure(block1)
self._AND_()
def block2():
self._hspace_()
self._closure(block2)
self._atom_condition_()
self.name_last_node('other')
with self._option():
self._atom_condition_()
self.name_last_node('atom')
self._error('no available options')
self.ast._define(['atom', 'other', 'se'], [])
('AtomCondition')
def _atom_condition_(self):
with self._choice():
with self._option():
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._if_condition_()
self.name_last_node('p')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
with self._option():
self._not_equal_()
self.name_last_node('cond')
with self._option():
self._equal_()
self.name_last_node('cond')
with self._option():
self._in_()
self.name_last_node('cond')
with self._option():
self._not_in_()
self.name_last_node('cond')
with self._option():
self._greater_()
self.name_last_node('cond')
with self._option():
self._greater_equal_()
self.name_last_node('cond')
with self._option():
self._less_()
self.name_last_node('cond')
with self._option():
self._less_equal_()
self.name_last_node('cond')
self._error('no available options')
self.ast._define(['cond', 'p'], [])
('InCondition')
def _in_(self):
with self._choice():
with self._option():
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.add_last_node_to_name('left')
def block2():
self._hspace_()
self._closure(block2)
def block3():
self._token(',')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.add_last_node_to_name('left')
def block6():
self._hspace_()
self._closure(block6)
self._closure(block3)
self._token(')')
def block7():
self._hspace_()
self._closure(block7)
self._IN_()
def block8():
self._hspace_()
self._closure(block8)
with self._group():
with self._choice():
with self._option():
self._function_operator_()
with self._option():
self._identifier_()
self._error('no available options')
self.name_last_node('right')
with self._option():
self._expression_()
self.add_last_node_to_name('left')
def block12():
self._hspace_()
self._closure(block12)
self._IN_()
def block13():
self._hspace_()
self._closure(block13)
with self._group():
with self._choice():
with self._option():
self._function_operator_()
with self._option():
self._identifier_()
self._error('no available options')
self.name_last_node('right')
self._error('no available options')
self.ast._define(['right'], ['left'])
('NotInCondition')
def _not_in_(self):
with self._choice():
with self._option():
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._expression_()
self.add_last_node_to_name('left')
def block2():
self._hspace_()
self._closure(block2)
def block3():
self._token(',')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.add_last_node_to_name('left')
def block6():
self._hspace_()
self._closure(block6)
self._closure(block3)
self._token(')')
def block7():
self._hspace_()
self._closure(block7)
self._token('')
def block8():
self._hspace_()
self._closure(block8)
with self._group():
with self._choice():
with self._option():
self._function_operator_()
with self._option():
self._identifier_()
self._error('no available options')
self.name_last_node('right')
with self._option():
self._expression_()
self.add_last_node_to_name('left')
def block12():
self._hspace_()
self._closure(block12)
self._token('')
def block13():
self._hspace_()
self._closure(block13)
with self._group():
with self._choice():
with self._option():
self._function_operator_()
with self._option():
self._identifier_()
self._error('no available options')
self.name_last_node('right')
self._error('no available options')
self.ast._define(['right'], ['left'])
('NeCondition')
def _not_equal_(self):
self._expression_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
with self._group():
with self._choice():
with self._option():
self._token('=')
with self._option():
self._token('!=')
self._error('no available options')
self.name_last_node('op')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('EqCondition')
def _equal_(self):
self._expression_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
with self._group():
with self._choice():
with self._option():
self._token('==')
with self._option():
self._token('=')
self._error('no available options')
self.name_last_node('op')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('GreaterCondition')
def _greater_(self):
self._expression_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('>')
self.name_last_node('op')
def block3():
self._hspace_()
self._closure(block3)
self._expression_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('GreaterEqualCondition')
def _greater_equal_(self):
self._expression_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
with self._group():
with self._choice():
with self._option():
self._token('>=')
with self._option():
self._token('')
with self._option():
self._token('')
self._error('no available options')
self.name_last_node('op')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('LessCondition')
def _less_(self):
self._expression_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('<')
self.name_last_node('op')
def block3():
self._hspace_()
self._closure(block3)
self._expression_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('LessEqualCondition')
def _less_equal_(self):
self._expression_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
with self._group():
with self._choice():
with self._option():
self._token('<=')
with self._option():
self._token('')
with self._option():
self._token('')
self._error('no available options')
self.name_last_node('op')
def block4():
self._hspace_()
self._closure(block4)
self._expression_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('ArithExpression')
def _arithmetic_expression_(self):
with self._choice():
with self._option():
self._arithmetic_addition_()
self.name_last_node('value')
with self._option():
self._arithmetic_subtraction_()
self.name_last_node('value')
with self._option():
with self._optional():
self._token('-')
self.name_last_node('sign')
self._arithmetic_term_()
self.name_last_node('value')
self._error('no available options')
self.ast._define(['sign', 'value'], [])
('ArithAdd')
def _arithmetic_addition_(self):
self._arithmetic_expression_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('+')
self.name_last_node('op')
def block3():
self._hspace_()
self._closure(block3)
self._arithmetic_term_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('ArithSubtract')
def _arithmetic_subtraction_(self):
self._arithmetic_expression_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('-')
self.name_last_node('op')
def block3():
self._hspace_()
self._closure(block3)
self._arithmetic_term_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
()
def _arithmetic_term_(self):
with self._choice():
with self._option():
self._arithmetic_multiplication_()
with self._option():
self._arithmetic_division_()
with self._option():
self._arithmetic_factor_()
self._error('no available options')
('ArithMultiply')
def _arithmetic_multiplication_(self):
with self._choice():
with self._option():
self._arithmetic_term_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
self._token('')
self.name_last_node('op')
def block3():
self._hspace_()
self._closure(block3)
self._arithmetic_factor_()
self.name_last_node('right')
with self._option():
self._arithmetic_term_()
self.name_last_node('left')
def block6():
self._hspace_()
self._closure(block6)
self._arithmetic_factor_()
self.name_last_node('right')
self._error('no available options')
self.ast._define(['left', 'op', 'right'], [])
('ArithDivide')
def _arithmetic_division_(self):
self._arithmetic_term_()
self.name_last_node('left')
def block1():
self._hspace_()
self._closure(block1)
with self._group():
with self._choice():
with self._option():
self._token('/')
with self._option():
self._token('')
self._error('no available options')
self.name_last_node('op')
def block4():
self._hspace_()
self._closure(block4)
self._arithmetic_factor_()
self.name_last_node('right')
self.ast._define(['left', 'op', 'right'], [])
('ArithFactor')
def _arithmetic_factor_(self):
with self._choice():
with self._option():
self._arithmetic_subexpression_()
self.name_last_node('sub')
with self._option():
self._identifier_()
self.name_last_node('id0')
with self._option():
self._number_()
self.name_last_node('num')
self._error('no available options')
self.ast._define(['id0', 'num', 'sub'], [])
('ArithSubexpression')
def _arithmetic_subexpression_(self):
self._token('(')
def block0():
self._hspace_()
self._closure(block0)
self._arithmetic_expression_()
self.name_last_node('value')
def block2():
self._hspace_()
self._closure(block2)
self._token(')')
self.ast._define(['value'], [])
()
def _func_id_(self):
if (len(self.new_func_list) > 0):
with self._choice():
for new_id in self.new_func_list:
with self._option():
self._pattern(new_id)
self._error('no available options')
else:
self._token('!!!')
('IdentifierAlone')
def _identifier_alone_(self):
if (len(self.new_id_list) > 0):
with self._choice():
with self._option():
with self._group():
with self._choice():
for new_id in self.new_id_list:
with self._option():
self._pattern(new_id)
self._error('no available options')
self.name_last_node('const')
with self._option():
with self._group():
with self._choice():
with self._option():
with self._ifnot():
with self._group():
with self._choice():
with self._option():
self._KEYWORDS_()
for new_id in self.new_id_list:
with self._option():
self._pattern(new_id)
self._error('no available options')
self._pattern('[A-Za-z\\p{Ll}\\p{Lu}\\p{Lo}]\\p{M}*')
self.name_last_node('value')
with self._option():
self._token('`')
self._pattern('[^`]*')
self.name_last_node('id')
self._token('`')
self._error('no available options')
self._error('no available options')
self.ast._define(['const', 'id', 'value'], [])
else:
with self._ifnot():
self._KEYWORDS_()
with self._group():
with self._choice():
with self._option():
self._pattern('[A-Za-z\\p{Ll}\\p{Lu}\\p{Lo}]\\p{M}*')
self.name_last_node('value')
with self._option():
self._token('`')
self._pattern('[^`]*')
self.name_last_node('id')
self._token('`')
self._error('no available options')
self.ast._define(['id', 'value'], []) |
def macd(df, n_fast, n_slow):
EMAfast = pd.Series(df['Close'].ewm(span=n_fast, min_periods=n_slow).mean())
EMAslow = pd.Series(df['Close'].ewm(span=n_slow, min_periods=n_slow).mean())
MACD = pd.Series((EMAfast - EMAslow), name=((('MACD_' + str(n_fast)) + '_') + str(n_slow)))
MACDsign = pd.Series(MACD.ewm(span=9, min_periods=9).mean(), name=((('MACDsign_' + str(n_fast)) + '_') + str(n_slow)))
MACDdiff = pd.Series((MACD - MACDsign), name=((('MACDdiff_' + str(n_fast)) + '_') + str(n_slow)))
df = df.join(MACD)
df = df.join(MACDsign)
df = df.join(MACDdiff)
return df |
def create_double_value_function(value_fn, *args, **kwargs):
value_fns = tuple((value_fn(*args, **kwargs) for i in range(2)))
return value_fns |
class Conv2dNormRelu(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size=3, stride=1, padding=0, bias=True, norm_type='Unknown'):
super(Conv2dNormRelu, self).__init__()
self.conv = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size, stride, padding, bias=bias), get_norm(norm_type, out_ch), nn.ReLU(inplace=True))
def forward(self, x):
return self.conv(x) |
class orderedSampler(Sampler):
def __init__(self, data_source, batch_size, nb_classes=10, shuffle=True):
self.data_source = data_source
target_lists = collections.defaultdict(list)
for (i, (data, label)) in enumerate(self.data_source):
target_lists[label].append(i)
self.target_lists = target_lists
self.nb_examples = len(data_source)
self.shuffle = shuffle
self.cur_class = 0
self.nb_classes = nb_classes
self.batch_size = batch_size
def __iter__(self):
batch = []
count = 0
idices = collections.defaultdict(int)
while (count < self.nb_examples):
batch.append(self.target_lists[self.cur_class][idices[self.cur_class]])
idices[self.cur_class] += 1
if (idices[self.cur_class] >= len(self.target_lists[self.cur_class])):
if self.shuffle:
np.random.shuffle(self.target_lists[self.cur_class])
(yield batch)
batch = []
self.cur_class = ((self.cur_class + 1) % self.nb_classes)
if (len(batch) == self.batch_size):
(yield batch)
batch = []
self.cur_class = ((self.cur_class + 1) % self.nb_classes)
count += 1
def __len__(self):
return self.nb_examples |
def actionAngleFreqAngleStaeckel_c(pot, delta, R, vR, vT, z, vz, phi, u0=None, order=10):
if (u0 is None):
(u0, dummy) = coords.Rz_to_uv(R, z, delta=numpy.atleast_1d(delta))
from ..orbit.integrateFullOrbit import _parse_pot
from ..orbit.integratePlanarOrbit import _prep_tfuncs
(npot, pot_type, pot_args, pot_tfuncs) = _parse_pot(pot, potforactions=True)
pot_tfuncs = _prep_tfuncs(pot_tfuncs)
delta = numpy.atleast_1d(delta)
ndelta = len(delta)
jr = numpy.empty(len(R))
jz = numpy.empty(len(R))
Omegar = numpy.empty(len(R))
Omegaphi = numpy.empty(len(R))
Omegaz = numpy.empty(len(R))
Angler = numpy.empty(len(R))
Anglephi = numpy.empty(len(R))
Anglez = numpy.empty(len(R))
err = ctypes.c_int(0)
ndarrayFlags = ('C_CONTIGUOUS', 'WRITEABLE')
actionAngleStaeckel_actionsFunc = _lib.actionAngleStaeckel_actionsFreqsAngles
actionAngleStaeckel_actionsFunc.argtypes = [ctypes.c_int, ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ctypes.c_int, ndpointer(dtype=numpy.int32, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ctypes.c_void_p, ctypes.c_int, ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ctypes.c_int, ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ctypes.POINTER(ctypes.c_int)]
f_cont = [R.flags['F_CONTIGUOUS'], vR.flags['F_CONTIGUOUS'], vT.flags['F_CONTIGUOUS'], z.flags['F_CONTIGUOUS'], vz.flags['F_CONTIGUOUS'], u0.flags['F_CONTIGUOUS'], delta.flags['F_CONTIGUOUS']]
R = numpy.require(R, dtype=numpy.float64, requirements=['C', 'W'])
vR = numpy.require(vR, dtype=numpy.float64, requirements=['C', 'W'])
vT = numpy.require(vT, dtype=numpy.float64, requirements=['C', 'W'])
z = numpy.require(z, dtype=numpy.float64, requirements=['C', 'W'])
vz = numpy.require(vz, dtype=numpy.float64, requirements=['C', 'W'])
u0 = numpy.require(u0, dtype=numpy.float64, requirements=['C', 'W'])
delta = numpy.require(delta, dtype=numpy.float64, requirements=['C', 'W'])
jr = numpy.require(jr, dtype=numpy.float64, requirements=['C', 'W'])
jz = numpy.require(jz, dtype=numpy.float64, requirements=['C', 'W'])
Omegar = numpy.require(Omegar, dtype=numpy.float64, requirements=['C', 'W'])
Omegaphi = numpy.require(Omegaphi, dtype=numpy.float64, requirements=['C', 'W'])
Omegaz = numpy.require(Omegaz, dtype=numpy.float64, requirements=['C', 'W'])
Angler = numpy.require(Angler, dtype=numpy.float64, requirements=['C', 'W'])
Anglephi = numpy.require(Anglephi, dtype=numpy.float64, requirements=['C', 'W'])
Anglez = numpy.require(Anglez, dtype=numpy.float64, requirements=['C', 'W'])
actionAngleStaeckel_actionsFunc(len(R), R, vR, vT, z, vz, u0, ctypes.c_int(npot), pot_type, pot_args, pot_tfuncs, ctypes.c_int(ndelta), delta, ctypes.c_int(order), jr, jz, Omegar, Omegaphi, Omegaz, Angler, Anglephi, Anglez, ctypes.byref(err))
if f_cont[0]:
R = numpy.asfortranarray(R)
if f_cont[1]:
vR = numpy.asfortranarray(vR)
if f_cont[2]:
vT = numpy.asfortranarray(vT)
if f_cont[3]:
z = numpy.asfortranarray(z)
if f_cont[4]:
vz = numpy.asfortranarray(vz)
if f_cont[5]:
u0 = numpy.asfortranarray(u0)
if f_cont[6]:
delta = numpy.asfortranarray(delta)
badAngle = (Anglephi != 9999.99)
Anglephi[badAngle] = ((Anglephi[badAngle] + (phi[badAngle] % (2.0 * numpy.pi))) % (2.0 * numpy.pi))
Anglephi[(Anglephi < 0.0)] += (2.0 * numpy.pi)
return (jr, jz, Omegar, Omegaphi, Omegaz, Angler, Anglephi, Anglez, err.value) |
def objects365v1_classes() -> list:
return ['person', 'sneakers', 'chair', 'hat', 'lamp', 'bottle', 'cabinet/shelf', 'cup', 'car', 'glasses', 'picture/frame', 'desk', 'handbag', 'street lights', 'book', 'plate', 'helmet', 'leather shoes', 'pillow', 'glove', 'potted plant', 'bracelet', 'flower', 'tv', 'storage box', 'vase', 'bench', 'wine glass', 'boots', 'bowl', 'dining table', 'umbrella', 'boat', 'flag', 'speaker', 'trash bin/can', 'stool', 'backpack', 'couch', 'belt', 'carpet', 'basket', 'towel/napkin', 'slippers', 'barrel/bucket', 'coffee table', 'suv', 'toy', 'tie', 'bed', 'traffic light', 'pen/pencil', 'microphone', 'sandals', 'canned', 'necklace', 'mirror', 'faucet', 'bicycle', 'bread', 'high heels', 'ring', 'van', 'watch', 'sink', 'horse', 'fish', 'apple', 'camera', 'candle', 'teddy bear', 'cake', 'motorcycle', 'wild bird', 'laptop', 'knife', 'traffic sign', 'cell phone', 'paddle', 'truck', 'cow', 'power outlet', 'clock', 'drum', 'fork', 'bus', 'hanger', 'nightstand', 'pot/pan', 'sheep', 'guitar', 'traffic cone', 'tea pot', 'keyboard', 'tripod', 'hockey', 'fan', 'dog', 'spoon', 'blackboard/whiteboard', 'balloon', 'air conditioner', 'cymbal', 'mouse', 'telephone', 'pickup truck', 'orange', 'banana', 'airplane', 'luggage', 'skis', 'soccer', 'trolley', 'oven', 'remote', 'baseball glove', 'paper towel', 'refrigerator', 'train', 'tomato', 'machinery vehicle', 'tent', 'shampoo/shower gel', 'head phone', 'lantern', 'donut', 'cleaning products', 'sailboat', 'tangerine', 'pizza', 'kite', 'computer box', 'elephant', 'toiletries', 'gas stove', 'broccoli', 'toilet', 'stroller', 'shovel', 'baseball bat', 'microwave', 'skateboard', 'surfboard', 'surveillance camera', 'gun', 'life saver', 'cat', 'lemon', 'liquid soap', 'zebra', 'duck', 'sports car', 'giraffe', 'pumpkin', 'piano', 'stop sign', 'radiator', 'converter', 'tissue ', 'carrot', 'washing machine', 'vent', 'cookies', 'cutting/chopping board', 'tennis racket', 'candy', 'skating and skiing shoes', 'scissors', 'folder', 'baseball', 'strawberry', 'bow tie', 'pigeon', 'pepper', 'coffee machine', 'bathtub', 'snowboard', 'suitcase', 'grapes', 'ladder', 'pear', 'american football', 'basketball', 'potato', 'paint brush', 'printer', 'billiards', 'fire hydrant', 'goose', 'projector', 'sausage', 'fire extinguisher', 'extension cord', 'facial mask', 'tennis ball', 'chopsticks', 'electronic stove and gas stove', 'pie', 'frisbee', 'kettle', 'hamburger', 'golf club', 'cucumber', 'clutch', 'blender', 'tong', 'slide', 'hot dog', 'toothbrush', 'facial cleanser', 'mango', 'deer', 'egg', 'violin', 'marker', 'ship', 'chicken', 'onion', 'ice cream', 'tape', 'wheelchair', 'plum', 'bar soap', 'scale', 'watermelon', 'cabbage', 'router/modem', 'golf ball', 'pine apple', 'crane', 'fire truck', 'peach', 'cello', 'notepaper', 'tricycle', 'toaster', 'helicopter', 'green beans', 'brush', 'carriage', 'cigar', 'earphone', 'penguin', 'hurdle', 'swing', 'radio', 'CD', 'parking meter', 'swan', 'garlic', 'french fries', 'horn', 'avocado', 'saxophone', 'trumpet', 'sandwich', 'cue', 'kiwi fruit', 'bear', 'fishing rod', 'cherry', 'tablet', 'green vegetables', 'nuts', 'corn', 'key', 'screwdriver', 'globe', 'broom', 'pliers', 'volleyball', 'hammer', 'eggplant', 'trophy', 'dates', 'board eraser', 'rice', 'tape measure/ruler', 'dumbbell', 'hamimelon', 'stapler', 'camel', 'lettuce', 'goldfish', 'meat balls', 'medal', 'toothpaste', 'antelope', 'shrimp', 'rickshaw', 'trombone', 'pomegranate', 'coconut', 'jellyfish', 'mushroom', 'calculator', 'treadmill', 'butterfly', 'egg tart', 'cheese', 'pig', 'pomelo', 'race car', 'rice cooker', 'tuba', 'crosswalk sign', 'papaya', 'hair drier', 'green onion', 'chips', 'dolphin', 'sushi', 'urinal', 'donkey', 'electric drill', 'spring rolls', 'tortoise/turtle', 'parrot', 'flute', 'measuring cup', 'shark', 'steak', 'poker card', 'binoculars', 'llama', 'radish', 'noodles', 'yak', 'mop', 'crab', 'microscope', 'barbell', 'bread/bun', 'baozi', 'lion', 'red cabbage', 'polar bear', 'lighter', 'seal', 'mangosteen', 'comb', 'eraser', 'pitaya', 'scallop', 'pencil case', 'saw', 'table tennis paddle', 'okra', 'starfish', 'eagle', 'monkey', 'durian', 'game board', 'rabbit', 'french horn', 'ambulance', 'asparagus', 'hoverboard', 'pasta', 'target', 'hotair balloon', 'chainsaw', 'lobster', 'iron', 'flashlight'] |
def get_dataset_distributed(name, world_size, rank, batch_size, **kwargs):
dataset = globals()[name](**kwargs)
sampler = torch.utils.data.distributed.DistributedSampler(dataset, num_replicas=world_size, rank=rank)
dataloader = torch.utils.data.DataLoader(dataset, sampler=sampler, batch_size=batch_size, shuffle=False, drop_last=True, pin_memory=True, num_workers=4)
return (dataloader, 3) |
def nfsp_measure_exploitability_nonlstm(rllib_policies: List[Policy], poker_game_version: str, open_spiel_env_config: dict=None):
if (open_spiel_env_config is None):
if (poker_game_version in ['kuhn_poker', 'leduc_poker']):
open_spiel_env_config = {'players': pyspiel.GameParameter(2)}
else:
open_spiel_env_config = {}
open_spiel_env_config = {k: (pyspiel.GameParameter(v) if (not isinstance(v, pyspiel.GameParameter)) else v) for (k, v) in open_spiel_env_config.items()}
openspiel_game = pyspiel.load_game(poker_game_version, open_spiel_env_config)
if (poker_game_version == 'oshi_zumo'):
openspiel_game = pyspiel.convert_to_turn_based(openspiel_game)
opnsl_policies = []
for rllib_policy in rllib_policies:
openspiel_policy = openspiel_policy_from_nonlstm_rllib_policy(openspiel_game=openspiel_game, rllib_policy=rllib_policy, game_version=poker_game_version, game_parameters=open_spiel_env_config)
opnsl_policies.append(openspiel_policy)
nfsp_policy = JointPlayerPolicy(game=openspiel_game, policies=opnsl_policies)
if (poker_game_version == 'universal_poker'):
print('Measuring exploitability for universal_poker policy. This will take a while...')
exploitability_result = exploitability(game=openspiel_game, policy=nfsp_policy)
return exploitability_result |
def get_env_infos(env, env_config):
env_infos = {}
if is_arena_env(env):
dummy_env = ArenaRllibEnv(env=env, env_config=env_config)
env_infos['number_agents'] = dcopy(dummy_env.number_agents)
else:
dummy_env = gym.make(env)
env_infos['number_agents'] = 1
env_infos['obs_space'] = dcopy(dummy_env.observation_space)
env_infos['act_space'] = dcopy(dummy_env.action_space)
dummy_env.close()
return env_infos |
class NormalDataset(Dataset):
def __init__(self, files: List, config: Namespace):
self.files = files
self.center = config.center
self.transforms = T.Compose([T.Resize(config.image_size, T.InterpolationMode.LANCZOS), T.CenterCrop(config.image_size), T.ToTensor()])
with Pool(cpu_count()) as pool:
self.preload = pool.map(partial(self.load_file), files)
def load_file(self, file):
image = Image.open(file)
image = self.transforms(image)
return image
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
img = self.preload[idx]
idx2 = np.random.randint(0, len(self))
img2 = self.preload[idx2]
(img, mask) = pii(img.numpy(), img2.numpy(), is_mri=False)
if self.center:
img = ((img - 0.5) * 2)
return (torch.FloatTensor(img), torch.FloatTensor(mask)) |
def group_norm(input, group, running_mean, running_var, weight=None, bias=None, use_input_stats=True, momentum=0.1, eps=1e-05):
if ((not use_input_stats) and ((running_mean is None) or (running_var is None))):
raise ValueError('Expected running_mean and running_var to be not None when use_input_stats=False')
(b, c) = (input.size(0), input.size(1))
if (weight is not None):
weight = weight.repeat(b)
if (bias is not None):
bias = bias.repeat(b)
def _instance_norm(input, group, running_mean=None, running_var=None, weight=None, bias=None, use_input_stats=None, momentum=None, eps=None):
if (running_mean is not None):
running_mean_orig = running_mean
running_mean = running_mean_orig.repeat(b)
if (running_var is not None):
running_var_orig = running_var
running_var = running_var_orig.repeat(b)
input_reshaped = input.contiguous().view(1, int(((b * c) / group)), group, *input.size()[2:])
out = F.batch_norm(input_reshaped, running_mean, running_var, weight=weight, bias=bias, training=use_input_stats, momentum=momentum, eps=eps)
if (running_mean is not None):
running_mean_orig.copy_(running_mean.view(b, int((c / group))).mean(0, keepdim=False))
if (running_var is not None):
running_var_orig.copy_(running_var.view(b, int((c / group))).mean(0, keepdim=False))
return out.view(b, c, *input.size()[2:])
return _instance_norm(input, group, running_mean=running_mean, running_var=running_var, weight=weight, bias=bias, use_input_stats=use_input_stats, momentum=momentum, eps=eps) |
def check_model_contexts(config_dir, nnet_edits=None, existing_model=None):
contexts = {}
for file_name in ['init', 'ref']:
if os.path.exists('{0}/{1}.config'.format(config_dir, file_name)):
contexts[file_name] = {}
common_lib.execute_command('nnet3-init {0} {1}/{2}.config {1}/{2}.raw'.format((existing_model if (existing_model is not None) else ''), config_dir, file_name))
model = '{0}/{1}.raw'.format(config_dir, file_name)
if ((nnet_edits is not None) and (file_name != 'init')):
model = "nnet3-copy --edits='{0}' {1} - |".format(nnet_edits, model)
out = common_lib.get_command_stdout('nnet3-info "{0}"'.format(model))
for line in out.split('\n')[:4]:
parts = line.split(':')
if (len(parts) != 2):
continue
key = parts[0].strip()
value = int(parts[1].strip())
if (key in ['left-context', 'right-context']):
contexts[file_name][key] = value
if ('init' in contexts):
assert ('ref' in contexts)
if (('left-context' in contexts['init']) and ('left-context' in contexts['ref'])):
if ((contexts['init']['left-context'] > contexts['ref']['left-context']) or (contexts['init']['right-context'] > contexts['ref']['right-context'])):
raise Exception('Model specified in {0}/init.config requires greater context than the model specified in {0}/ref.config. This might be due to use of label-delay at the output in ref.config. Please use delay=$label_delay in the initial fixed-affine-layer of the network, to avoid this issue.') |
class TFBertMainLayer(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class nnUNetTrainerV2_independentScalePerAxis(nnUNetTrainerV2):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params['independent_scale_factor_for_each_axis'] = True |
def take_while_two(pred_first: Callable[([T], bool)], pred: Callable[([T, T], bool)], iterable: Iterable[T]) -> tuple[(list[T], Iterator[T])]:
iterator = iter(iterable)
try:
first_elem = next(iterator)
if (not pred_first(first_elem)):
return ([], itertools.chain([first_elem], iterator))
except StopIteration:
return ([], iter([]))
elements: list[T] = [first_elem]
return_iter = iterator
for elem in iterator:
if pred(first_elem, elem):
elements.append(elem)
first_elem = elem
else:
return_iter = itertools.chain([elem], iterator)
break
return (elements, return_iter) |
class SEU(object):
num_classes = 20
inputchannel = 1
def __init__(self, data_dir, normlizetype):
self.data_dir = data_dir
self.normlizetype = normlizetype
def data_preprare(self, test=False):
list_data = get_files(self.data_dir, test)
if test:
test_dataset = dataset(list_data=list_data, test=True, transform=None)
return test_dataset
else:
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
(train_pd, val_pd) = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd['label'])
train_dataset = dataset(list_data=train_pd, transform=data_transforms('train', self.normlizetype))
val_dataset = dataset(list_data=val_pd, transform=data_transforms('val', self.normlizetype))
return (train_dataset, val_dataset) |
def plotFile(filename):
legend = []
(name, a1, p1) = read_file(filename)
legend.append((str(name) + '_actual'))
legend.append((str(name) + '_predicted'))
plt.plot(range(400, 800, 2), a1)
plt.plot(range(400, 800, 2), p1)
plt.title('Comparing spectrums')
plt.ylabel('Cross Scattering Amplitude')
plt.xlabel('Wavelength (nm)')
plt.legend(legend, loc='top left')
plt.show() |
def vgg11_bn(num_classes=1000, pretrained='imagenet'):
model = models.vgg11_bn(pretrained=False)
if (pretrained is not None):
settings = pretrained_settings['vgg11_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
return model |
class Path(Enum):
SDK = qiskit_path[0]
TEST = os.path.normpath(os.path.join(SDK, '..', 'test', 'python'))
EXAMPLES = os.path.normpath(os.path.join(SDK, '..', 'examples'))
SCHEMAS = os.path.normpath(os.path.join(SDK, 'schemas'))
CASSETTES = os.path.normpath(os.path.join(TEST, '..', 'cassettes'))
QASMS = os.path.normpath(os.path.join(TEST, 'qasm')) |
def get_act_fn(name='relu'):
if (not name):
return None
if (not (is_no_jit() or is_exportable() or is_scriptable())):
if (name in _ACT_FN_ME):
return _ACT_FN_ME[name]
if (not is_no_jit()):
if (name in _ACT_FN_JIT):
return _ACT_FN_JIT[name]
return _ACT_FN_DEFAULT[name] |
class ImagesSpotClipSampler(SpotClipSampler):
def __init__(self, data_source: Spot, images_per_video: (int | None)=None, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
self.images_per_video = images_per_video
def __iter__(self) -> List[Any]:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
indices = [None for i in range(len(self))]
global_idx = 0
for idx in range(self.data_source.num_videos):
video_metadata = self.data_source.get_video_metadata(idx)
num_frames = video_metadata['num_frames']
if (self.images_per_video is None):
for i in range(num_frames):
indices[global_idx] = (idx, i, i)
global_idx += 1
else:
random_frames = torch.randperm(num_frames, generator=g)[:self.images_per_video]
random_frames = torch.sort(random_frames)[0].tolist()
for i in random_frames:
indices[global_idx] = (idx, i, i)
global_idx += 1
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return (self.data_source._annotated_videos.cum_num_frames_per_video[(- 1)] if (self.images_per_video is None) else (self.images_per_video * self.data_source.num_videos))
def __repr__(self) -> str:
return f'{__class__.__name__}(images_per_video={self.images_per_video}, shuffle={self._shuffle}, seed={self.seed})' |
class Viz_WSOL(object):
def __init__(self):
super(Viz_WSOL, self).__init__()
self.gt_col = _GT_COLOR
self.pred_col = _PRED_COLOR
self.dpi = 50
self.alpha = 128
self.heatmap_cmap = plt.get_cmap('jet')
self.mask_cmap_seg = get_bin_mask_colormap_segm()
self.mask_cmap_bbox = get_bin_mask_colormap_bbx()
def tagax(self, ax, text: str):
assert isinstance(text, str)
if text:
ax.text(3, 40, text, bbox={'facecolor': 'white', 'pad': 1, 'alpha': 0.8})
def get_acc(self, gt_mask: np.ndarray, pred_mask: np.ndarray) -> float:
_gt_mask = gt_mask.flatten()
_pred_mask = pred_mask.flatten()
assert (_gt_mask.size == _pred_mask.size)
diff = np.abs((_gt_mask - _pred_mask))
return (diff == 0).mean()
def convert_bbox(self, bbox_xyxy: np.ndarray):
check_box_convention(bbox_xyxy, 'x0y0x1y1')
assert (bbox_xyxy.shape == (1, 4))
(x0, y0, x1, y1) = bbox_xyxy.flatten()
width = (x1 - x0)
height = (y1 - y0)
anchor = (x0, y1)
return (anchor, width, height)
def _plot_bbox(self, ax, img, gt_bbox, pred_bbox: Optional[np.ndarray]=None, cam: Optional[np.ndarray]=None, tag='', camcolormap=None, alpha=None):
if (camcolormap is None):
camcolormap = self.heatmap_cmap
if (alpha is None):
alpha = self.alpha
ax.imshow(img)
gt_info = self.convert_bbox(gt_bbox)
rect_gt = patches.Rectangle(gt_info[0], gt_info[1], (- gt_info[2]), linewidth=1.5, edgecolor=self.gt_col, facecolor='none')
if (pred_bbox is not None):
pred_info = self.convert_bbox(pred_bbox)
rect_pred = patches.Rectangle(pred_info[0], pred_info[1], (- pred_info[2]), linewidth=1.5, edgecolor=self.pred_col, facecolor='none')
if (cam is not None):
if (cam.dtype in [np.float32, np.float64]):
ax.imshow(cam, interpolation='bilinear', cmap=camcolormap, alpha=alpha)
elif (cam.dtype == np.bool_):
cam_ = (cam * 1.0)
masked_cam = np.ma.masked_where((cam_ == 0), cam_)
ax.imshow(masked_cam, interpolation=None, cmap=self.mask_cmap_bbox, vmin=0.0, vmax=255.0, alpha=self.alpha)
ax.add_patch(rect_gt)
if (pred_bbox is not None):
ax.add_patch(rect_pred)
self.tagax(ax, tag)
def _plot_mask(self, ax, img, gt_mask, cam, tag=''):
ax.imshow(img)
if (cam.dtype in [np.float32, np.float64]):
ax.imshow(cam, interpolation='bilinear', cmap=self.heatmap_cmap, alpha=self.alpha)
elif (cam.dtype in [np.bool_, np.uint8]):
cam_ = (cam * 1.0)
_gt_mask = gt_mask.astype(np.float32)
tmp_gt = np.copy(_gt_mask)
tmp_cam = np.copy(cam_)
tmp_cam[(tmp_cam == 1)] = 2.0
show_mask = (tmp_gt + tmp_cam)
show_mask = np.ma.masked_where((show_mask == 0), show_mask)
ax.imshow(show_mask, interpolation=None, cmap=self.mask_cmap_seg, vmin=0.0, vmax=255.0, alpha=self.alpha)
self.tagax(ax, tag)
def plot_single(self, datum: dict, outf: str, orient: str=constants.PLOT_VER):
assert (orient in constants.PLOT_ORIENTATIONS)
if (orient == constants.PLOT_HOR):
(fig, axes) = plt.subplots(nrows=1, ncols=2, squeeze=False)
elif (orient == constants.PLOT_VER):
(fig, axes) = plt.subplots(nrows=2, ncols=1, squeeze=False)
else:
raise ValueError(f'Orientiation {orient}')
if ('gt_bbox' in datum.keys()):
self._plot_bbox(axes[(0, 0)], img=datum['img'], gt_bbox=datum['gt_bbox'], pred_bbox=datum['pred_bbox'], cam=datum['cam'], tag=self.get_tag(datum))
mask = (datum['cam'] >= datum['tau'])
next_ax = (axes[(0, 1)] if (orient == constants.PLOT_HOR) else axes[(1, 0)])
self._plot_bbox(next_ax, img=datum['img'], gt_bbox=datum['gt_bbox'], pred_bbox=datum['pred_bbox'], cam=mask, tag=self.get_tag(datum))
elif ('gt_mask' in datum.keys()):
cam = datum['cam']
pred_mask = (datum['cam'] >= datum['tau'])
acc = self.get_acc(gt_mask=datum['gt_mask'], pred_mask=pred_mask.astype(np.float32))
self._plot_mask(axes[(0, 0)], img=datum['img'], gt_mask=datum['gt_mask'], cam=cam, tag=self.get_tag(datum, acc=acc))
next_ax = (axes[(0, 1)] if (orient == constants.PLOT_HOR) else axes[(1, 0)])
self._plot_mask(next_ax, img=datum['img'], gt_mask=datum['gt_mask'], cam=pred_mask, tag=self.get_tag(datum, acc=acc))
else:
raise NotImplementedError
self.closing(fig, outf)
def plot_single_cam_on_img(self, datum: dict, outf: str, tagit: bool):
(fig, axes) = plt.subplots(nrows=1, ncols=1, squeeze=False)
if ('gt_bbox' in datum.keys()):
tag = (self.get_tag(datum) if tagit else '')
self._plot_bbox(axes[(0, 0)], img=datum['img'], gt_bbox=datum['gt_bbox'], pred_bbox=datum['pred_bbox'], cam=datum['cam'], tag=tag)
elif ('gt_mask' in datum.keys()):
cam = datum['cam']
pred_mask = (datum['cam'] >= datum['tau'])
acc = self.get_acc(gt_mask=datum['gt_mask'], pred_mask=pred_mask.astype(np.float32))
tag = (self.get_tag(datum, acc=acc) if tagit else '')
self._plot_mask(axes[(0, 0)], img=datum['img'], gt_mask=datum['gt_mask'], cam=cam, tag=tag)
else:
raise NotImplementedError
self.closing(fig, outf)
def plot_single_gt_on_img(self, datum: dict, outf: str):
(fig, axes) = plt.subplots(nrows=1, ncols=1, squeeze=False)
if ('gt_bbox' in datum.keys()):
self._plot_bbox(axes[(0, 0)], img=datum['img'], gt_bbox=datum['gt_bbox'], pred_bbox=None, cam=None, tag='')
elif ('gt_mask' in datum.keys()):
self._plot_mask(axes[(0, 0)], img=datum['img'], gt_mask=datum['gt_mask'], cam=datum['gt_mask'], tag='')
else:
raise NotImplementedError
self.closing(fig, outf)
def plot_multiple(self, data: list, outf: str):
nrows = 2
ncols = len(data)
(him, wim) = data[0]['img'].shape[:2]
r = (him / float(wim))
fw = 10
r_prime = (r * (nrows / float(ncols)))
fh = (r_prime * fw)
(fig, axes) = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, squeeze=False, figsize=(fw, fh))
if ('gt_bbox' in data[0].keys()):
for (i, datum) in enumerate(data):
self._plot_bbox(axes[(0, i)], img=datum['img'], gt_bbox=datum['gt_bbox'], pred_bbox=datum['pred_bbox'], cam=datum['cam'], tag=self.get_tag(datum))
mask = (datum['cam'] >= datum['tau'])
self._plot_bbox(axes[(1, i)], img=datum['img'], gt_bbox=datum['gt_bbox'], pred_bbox=datum['pred_bbox'], cam=mask, tag=self.get_tag(datum))
elif ('gt_mask' in data[0].keys()):
for (i, datum) in enumerate(data):
cam = datum['cam']
pred_mask = (datum['cam'] >= datum['tau'])
acc = self.get_acc(gt_mask=datum['gt_mask'], pred_mask=pred_mask.astype(np.float32))
self._plot_mask(axes[(0, i)], img=datum['img'], gt_mask=datum['gt_mask'], cam=cam, tag=self.get_tag(datum, acc=acc))
self._plot_mask(axes[(1, i)], img=datum['img'], gt_mask=datum['gt_mask'], cam=pred_mask, tag=self.get_tag(datum, acc=acc))
else:
raise NotImplementedError
self.closing(fig, outf)
def plot_cam_raw(self, cam: np.ndarray, outf: str, interpolation: str):
(fig, ax) = plt.subplots(nrows=1, ncols=1, squeeze=False)
ax[(0, 0)].imshow(cam, interpolation=interpolation, cmap=self.heatmap_cmap, alpha=self.alpha)
self.closing(fig, outf)
def get_tag(self, datum, acc=0.0):
if ('gt_bbox' in datum.keys()):
tag = 'IoU={:.3f}, $\\tau$={:.2f}$\\sigma$={:.2f}'.format(datum['iou'], datum['tau'], datum['sigma'])
elif ('gt_mask' in datum.keys()):
z = ('*' if datum['best_tau'] else '')
tag = 'acc={:.3f}, $\\tau$={:.2f}{}'.format(acc, datum['tau'], z)
else:
raise NotImplementedError
return tag
def closing(self, fig, outf):
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
for ax in fig.axes:
ax.axis('off')
ax.margins(0, 0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
fig.savefig(outf, pad_inches=0, bbox_inches='tight', dpi=self.dpi, optimize=True)
plt.close(fig)
def _watch_plot_entropy(self, data: dict, outf: str):
nrows = 1
ncols = (len(list(data['visu'].keys())) + 1)
(him, wim) = data['raw_img'].shape[:2]
r = (him / float(wim))
fw = 10
r_prime = (r * (nrows / float(ncols)))
fh = (r_prime * fw)
(fig, axes) = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, squeeze=False, figsize=(fw, fh))
if ('gt_bbox' in data.keys()):
self._plot_bbox(axes[(0, 0)], img=data['raw_img'], gt_bbox=data['gt_bbox'], pred_bbox=None, cam=None, tag='Input')
for (i, datumkey) in enumerate(list(data['visu'].keys())):
self._plot_bbox(axes[(0, (i + 1))], img=data['raw_img'], gt_bbox=data['gt_bbox'], pred_bbox=None, cam=data['visu'][datumkey], tag=data['tags'][datumkey])
elif ('gt_mask' in data[0].keys()):
for (i, datum) in enumerate(data):
cam = datum['cam']
pred_mask = (datum['cam'] >= datum['tau'])
acc = self.get_acc(gt_mask=datum['gt_mask'], pred_mask=pred_mask.astype(np.float32))
self._plot_mask(axes[(0, i)], img=datum['img'], gt_mask=datum['gt_mask'], cam=cam, tag=self.get_tag(datum, acc=acc))
self._plot_mask(axes[(1, i)], img=datum['img'], gt_mask=datum['gt_mask'], cam=pred_mask, tag=self.get_tag(datum, acc=acc))
else:
raise NotImplementedError
self.closing(fig, outf)
def _watch_plot_histogram_activations(self, density: np.ndarray, bins: np.ndarray, outf: str, split: str):
(fig, axes) = plt.subplots(nrows=1, ncols=1, squeeze=False)
widths = (bins[:(- 1)] - bins[1:])
widths = 10.0
x = range(density.size)
axes[(0, 0)].bar(x, medfilt(volume=density, kernel_size=5), width=widths)
axes[(0, 0)].set_xlabel('Normalized CAM activations')
axes[(0, 0)].set_ylabel('Percentage from total {} set.'.format(split))
scale_x = 1000.0
ticks_x = ticker.FuncFormatter((lambda xx, pos: '{0:g}'.format((xx / scale_x))))
axes[(0, 0)].xaxis.set_major_formatter(ticks_x)
fig.savefig(outf, bbox_inches='tight', dpi=self.dpi, optimize=True)
plt.close(fig)
def _plot_meter(self, metrics: dict, fout: str, perfs_keys: list, title: str='', xlabel: str='', best_iter: int=None):
ncols = 4
ks = perfs_keys
if (len(ks) > ncols):
nrows = math.ceil((len(ks) / float(ncols)))
else:
nrows = 1
ncols = len(ks)
(fig, axes) = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, squeeze=False)
t = 0
for i in range(nrows):
for j in range(ncols):
if (t >= len(ks)):
axes[(i, j)].set_visible(False)
t += 1
continue
val = metrics[ks[t]]['value_per_epoch']
x = list(range(len(val)))
axes[(i, j)].plot(x, val, color='tab:orange')
axes[(i, j)].set_title(ks[t], fontsize=4)
axes[(i, j)].xaxis.set_tick_params(labelsize=4)
axes[(i, j)].yaxis.set_tick_params(labelsize=4)
axes[(i, j)].set_xlabel('#{}'.format(xlabel), fontsize=4)
axes[(i, j)].grid(True)
if (best_iter is not None):
axes[(i, j)].plot([x[best_iter]], [val[best_iter]], marker='o', markersize=5, color='red')
t += 1
fig.suptitle(title, fontsize=4)
plt.tight_layout()
fig.savefig(fout, bbox_inches='tight', dpi=300)
def _clean_metrics(self, metric: dict) -> dict:
_metric = deepcopy(metric)
l = []
for k in _metric.keys():
cd = (_metric[k]['value_per_epoch'] == [])
cd |= (_metric[k]['value_per_epoch'] == [np.inf])
cd |= (_metric[k]['value_per_epoch'] == [(- np.inf)])
if cd:
l.append(k)
for k in l:
_metric.pop(k, None)
return _metric
def _watch_plot_perfs_meter(self, meters: dict, split: str, perfs: list, fout: str):
xlabel = 'epochs'
best_epoch = meters[constants.VALIDSET]['localization']['best_epoch']
title = 'Split: {}. Best iter.: {} {}'.format(split, best_epoch, xlabel)
self._plot_meter(self._clean_metrics(meters[split]), fout=fout, perfs_keys=perfs, title=title, xlabel=xlabel, best_iter=best_epoch)
out = dict()
out[split] = dict()
for k in perfs:
val = self._clean_metrics(meters[split])[k]['value_per_epoch']
out[split][k] = dict()
out[split][k] = {'vals': val, 'best_epoch': best_epoch}
return out
def _watch_plot_thresh(self, data: dict, outf: str):
nrows = 1
ncols = (len(list(data['visu'].keys())) + 1)
(him, wim) = data['raw_img'].shape[:2]
r = (him / float(wim))
fw = 10
r_prime = (r * (nrows / float(ncols)))
fh = (r_prime * fw)
(fig, axes) = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, squeeze=False, figsize=(fw, fh))
if ('gt_bbox' in data.keys()):
self._plot_bbox(axes[(0, 0)], img=data['raw_img'], gt_bbox=data['gt_bbox'], pred_bbox=None, cam=None, tag='Input')
for (i, datumkey) in enumerate(list(data['visu'].keys())):
if (datumkey == 'density'):
(density, bins) = data['visu'][datumkey]
widths = (bins[:(- 1)] - bins[1:])
axes[(0, (i + 1))].bar(bins[1:], density, width=widths)
axes[(0, (i + 1))].axvline(data['otsu_thresh'], label='otsu_thresh', color='r')
axes[(0, (i + 1))].axvline(data['li_thres'], label='li_thres', color='b')
axes[(0, (i + 1))].legend()
elif (datumkey == 'discrete_cam'):
axes[(0, (i + 1))].imshow(data['visu'][datumkey], cmap=cm.gray)
elif (datumkey in ['bin_otsu', 'bin_li', 'otsu_bin_eroded', 'li_bin_eroded', 'fg_auto']):
gt_info = self.convert_bbox(data['gt_bbox'])
rect_gt = patches.Rectangle(gt_info[0], gt_info[1], (- gt_info[2]), linewidth=1.5, edgecolor=self.gt_col, facecolor='none')
axes[(0, (i + 1))].imshow(data['visu'][datumkey], cmap=cm.gray)
axes[(0, (i + 1))].add_patch(rect_gt)
self.tagax(axes[(0, (i + 1))], data['tags'][datumkey])
else:
self._plot_bbox(axes[(0, (i + 1))], img=data['raw_img'], gt_bbox=data['gt_bbox'], pred_bbox=None, cam=data['visu'][datumkey], tag=data['tags'][datumkey])
elif ('gt_mask' in data.keys()):
axes[(0, 0)].imshow(data['raw_img'])
show_mask = data['gt_mask']
show_mask = np.ma.masked_where((data['gt_mask'] == 0), show_mask)
axes[(0, 0)].imshow(show_mask, interpolation=None, cmap=get_simple_bin_mask_colormap_mask(), vmin=0.0, vmax=255.0, alpha=self.alpha)
self.tagax(axes[(0, 0)], 'Input')
for (i, datumkey) in enumerate(list(data['visu'].keys())):
if (datumkey == 'density'):
(density, bins) = data['visu'][datumkey]
widths = (bins[:(- 1)] - bins[1:])
axes[(0, (i + 1))].bar(bins[1:], density, width=widths)
axes[(0, (i + 1))].axvline(data['otsu_thresh'], label='otsu_thresh', color='r')
axes[(0, (i + 1))].axvline(data['li_thres'], label='li_thres', color='b')
axes[(0, (i + 1))].legend()
elif (datumkey == 'discrete_cam'):
axes[(0, (i + 1))].imshow(data['visu'][datumkey], cmap=cm.gray)
elif (datumkey in ['bin_otsu', 'bin_li', 'otsu_bin_eroded', 'li_bin_eroded', 'fg_auto']):
axes[(0, (i + 1))].imshow(data['visu'][datumkey], cmap=cm.gray)
self.tagax(axes[(0, (i + 1))], data['tags'][datumkey])
elif (datumkey in ['cam', 'cam_normalized']):
axes[(0, (i + 1))].imshow(data['visu'][datumkey], interpolation='bilinear', cmap=self.heatmap_cmap, alpha=self.alpha)
self.tagax(axes[(0, (i + 1))], data['tags'][datumkey])
else:
raise NotImplementedError
else:
raise NotImplementedError
self.closing(fig, outf) |
class MistralModel(BaseModel):
def match(self, model_path: str):
return ('mistral' in model_path.lower())
def get_default_conv_template(self, model_path: str) -> Conversation:
return get_conv_template('mistral') |
def create_sentence_vectors(body_copy):
doc2 = body_copy
docu = []
analyzed = namedtuple('Analyzed', 'words tags')
for (i, f) in enumerate(doc2):
wor = f.split()
tags = [i]
docu.append(analyzed(wor, tags))
model = doc2vec.Doc2Vec(docu, size=100, window=300, min_count=1, workers=4)
sent_vectors = model.docvecs.vectors_docs
return list(sent_vectors) |
class ElectronicSpatialExtent(OutputModel):
def __init__(self, hidden_channels, activation='silu'):
super(ElectronicSpatialExtent, self).__init__(allow_prior_model=False)
act_class = act_class_mapping[activation]
self.output_network = nn.Sequential(nn.Linear(hidden_channels, (hidden_channels // 2)), act_class(), nn.Linear((hidden_channels // 2), 1))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.output_network[0].weight)
self.output_network[0].bias.data.fill_(0)
nn.init.xavier_uniform_(self.output_network[2].weight)
self.output_network[2].bias.data.fill_(0)
def pre_reduce(self, x, v: Optional[torch.Tensor], z, pos, batch):
x = self.output_network(x)
x = ((torch.norm(pos, dim=1, keepdim=True) ** 2) * x)
return x |
class Discriminator(nn.Module):
def __init__(self, ngpu, nc=3, ndf=160, ngf=160, nz=100):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(nn.Conv2d(nc, ndf, 4, 4, 6, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ndf, (ndf * 2), 4, 3, 3, bias=False), nn.BatchNorm2d((ndf * 2)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((ndf * 2), (ndf * 4), 4, 2, 1, bias=False), nn.BatchNorm2d((ndf * 4)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((ndf * 4), (ndf * 8), 4, 2, 1, bias=False), nn.BatchNorm2d((ndf * 8)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((ndf * 8), 1, 4, 1, 0, bias=False))
def forward(self, input):
return self.main(input) |
def CHECKNAN(tensor, name):
if torch.isnan(tensor.max()):
logging.error(('NaN found in tensor: %s' % name))
if torch.isinf(tensor.min()):
logging.error(('Inf found in tensor: %s' % name)) |
def find_forward_params(x_input: torch.tensor, y_ouput: torch.tensor, random_flow_fn: typing.Callable=None, num_restarts: int=1, optimizer_fn=None, num_epochs=None, seed=0, verbose=0, verbose_level=0) -> Flow:
if (random_flow_fn is None):
raise RuntimeError('random_flow_fn must be specified')
if (optimizer_fn is None):
warnings.warn('Using default optimizer (optim.Adam(trainable_params, lr=0.01))', Warning)
optimizer_fn = (lambda trainable_params: optim.Adam(trainable_params, lr=0.01))
if (num_epochs is None):
warnings.warn('Using default number of epochs (100)', Warning)
num_epochs = 100
np.random.seed(seed)
found_flows = []
found_min_losses = []
found_losses = []
for r in range(num_restarts):
_flow = random_flow_fn()
def mse_loss(x_input_t, y_ouput_t):
return torch.mean(((_flow.forward(x_input_t) - y_ouput_t) ** 2))
trainable_params = []
for (n, p) in _flow.named_parameters():
trainable_params.append(p)
optimizer = optimizer_fn(trainable_params)
if verbose:
print('Restart {r}'.format(r=r), end='\r')
loss_arr = []
for e in range(num_epochs):
optimizer.zero_grad()
loss = mse_loss(torch.tensor(x_input), torch.tensor(y_ouput))
loss.backward()
optimizer.step()
optimizer.zero_grad()
if verbose:
_end = '\r'
if (e >= (num_epochs - 1)):
_end = '\n'
if (verbose_level != 1):
_end = '\n'
print('Restart {} Step {} - {}'.format(r, e, loss.detach().item()), end=_end)
loss_arr.append(loss.detach().to('cpu').numpy())
found_flows.append(_flow)
found_min_losses.append(loss_arr[(- 1)])
found_losses.append(loss_arr)
idx = numpy.logical_not(numpy.isnan(found_min_losses))
found_min_losses = numpy.array(found_min_losses)[idx]
found_losses = numpy.array(found_losses)[idx].tolist()
found_flows = numpy.asarray(found_flows, dtype=object)[idx].tolist()
return (found_flows[np.argmin(found_min_losses)], found_losses[np.argmin(found_min_losses)]) |
class FlaxTimesteps(nn.Module):
dim: int = 32
flip_sin_to_cos: bool = False
freq_shift: float = 1
def __call__(self, timesteps):
return get_sinusoidal_embeddings(timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift) |
class TestAddEmbeddings(unittest.TestCase):
def setUpClass(self):
pass
def tearDownClass(self):
pass
def test_add_embeddings_with_seq_len_first(self):
graph = Graph()
graph.framework_modeling_config['framework'] = 'onnxruntime'
input_data_node = OPERATORS['Input']()
input_tensors = []
output_tensors = [Tensor(), Tensor(), Tensor()]
input_data_node.construct('input_data', 'Input', input_tensors=input_tensors, output_tensors=output_tensors)
add_node = OPERATORS['Add']()
input_tensors = [Tensor(name='add_src0'), Tensor(name='add_src1')]
output_tensors = [Tensor(name='add:0', source_op=['add'], dest_op=['transpose'])]
add_node.construct('add', 'Add', input_tensors=input_tensors, output_tensors=output_tensors)
transpose_node = OPERATORS['Transpose']()
input_tensors = [Tensor(name='add:0', source_op=['add'], dest_op=['transpose'])]
output_tensors = [Tensor(name='transpose:0', source_op=['transpose'], dest_op=['layernorm'])]
transpose_node.construct('transpose', 'Transpose', input_tensors=input_tensors, output_tensors=output_tensors, attr=OrderedDict({'src_perm': '0,1,2', 'dst_perm': '1,0,2'}))
ln_node = OPERATORS['LayerNorm']()
input_tensors = [Tensor(name='transpose:0', source_op=['transpose'], dest_op=['layernorm']), Tensor(name='scale:0', data=np.random.randn(1024).astype('float32'), shape=[1024]), Tensor(name='shift:0', data=np.random.randn(1024).astype('float32'), shape=[1024])]
output_tensors = [Tensor(name='layernorm:0', source_op=['layernorm'], dest_op=[])]
ln_node.construct('layernorm', 'LayerNorm', input_tensors=input_tensors, output_tensors=output_tensors, attr=OrderedDict({'epsilon': 0.009}))
graph.insert_nodes(len(graph.nodes), [input_data_node, add_node, transpose_node, ln_node])
graph.add_config_item('hidden_size', 1024)
graph = AddEmbeddings()(graph)
self.assertEqual(5, len(graph.nodes))
self.assertEqual('-1,1024', graph.nodes[3].attr['dst_shape'])
self.assertEqual('1,0,2', graph.nodes[2].attr['dst_perm'])
self.assertEqual(0.009, graph.nodes[4].attr['epsilon']) |
class ActorVae(nn.Module):
def __init__(self, ablation, nfeats: int, latent_dim: list=[1, 256], ff_size: int=1024, num_layers: int=9, num_heads: int=4, dropout: float=0.1, is_vae: bool=True, activation: str='gelu', position_embedding: str='learned', **kwargs) -> None:
super().__init__()
self.latent_size = latent_dim[0]
self.latent_dim = latent_dim[(- 1)]
self.is_vae = is_vae
input_feats = nfeats
output_feats = nfeats
self.encoder = ActorAgnosticEncoder(nfeats=input_feats, vae=True, latent_dim=self.latent_dim, ff_size=ff_size, num_layers=num_layers, num_heads=num_heads, dropout=dropout, activation=activation, **kwargs)
self.decoder = ActorAgnosticDecoder(nfeats=output_feats, vae=True, latent_dim=self.latent_dim, ff_size=ff_size, num_layers=num_layers, num_heads=num_heads, dropout=dropout, activation=activation, **kwargs)
def forward(self, features: Tensor, lengths: Optional[List[int]]=None):
print('Should Not enter here')
(z, dist) = self.encode(features, lengths)
feats_rst = self.decode(z, lengths)
return (feats_rst, z, dist)
def encode(self, features: Tensor, lengths: Optional[List[int]]=None) -> Union[(Tensor, Distribution)]:
dist = self.encoder(features, lengths)
if self.is_vae:
latent = sample_from_distribution(dist)
else:
latent = dist.unsqueeze(0)
return (latent, dist)
def decode(self, z: Tensor, lengths: List[int]):
feats = self.decoder(z, lengths)
return feats |
class DEnKF(nn.Module):
def __init__(self, num_ensemble, dim_x, dim_z):
super(DEnKF, self).__init__()
self.num_ensemble = num_ensemble
self.dim_x = dim_x
self.dim_z = dim_z
self.r_diag = (np.ones(self.dim_z).astype(np.float32) * 0.1)
self.r_diag = self.r_diag.astype(np.float32)
self.process_model = ProcessModel(self.num_ensemble, self.dim_x)
self.observation_model = ObservationModel(self.num_ensemble, self.dim_x, self.dim_z)
self.observation_noise = ObservationNoise(self.dim_z, self.r_diag)
self.sensor_model = imgSensorModel(self.num_ensemble, self.dim_z)
def forward(self, inputs, states):
batch_size = inputs[0].shape[0]
raw_obs = inputs
(state_old, m_state) = states
state_pred = self.process_model(state_old)
m_A = torch.mean(state_pred, axis=1)
mean_A = repeat(m_A, 'bs dim -> bs k dim', k=self.num_ensemble)
A = (state_pred - mean_A)
A = rearrange(A, 'bs k dim -> bs dim k')
H_X = self.observation_model(state_pred)
mean = torch.mean(H_X, axis=1)
H_X_mean = rearrange(mean, 'bs (k dim) -> bs k dim', k=1)
m = repeat(mean, 'bs dim -> bs k dim', k=self.num_ensemble)
H_A = (H_X - m)
H_XT = rearrange(H_X, 'bs k dim -> bs dim k')
H_AT = rearrange(H_A, 'bs k dim -> bs dim k')
(ensemble_z, z, encoding) = self.sensor_model(raw_obs)
y = rearrange(ensemble_z, 'bs k dim -> bs dim k')
R = self.observation_noise(encoding)
innovation = (((1 / (self.num_ensemble - 1)) * torch.matmul(H_AT, H_A)) + R)
inv_innovation = torch.linalg.inv(innovation)
K = ((1 / (self.num_ensemble - 1)) * torch.matmul(torch.matmul(A, H_A), inv_innovation))
gain = rearrange(torch.matmul(K, (y - H_XT)), 'bs dim k -> bs k dim')
state_new = (state_pred + gain)
m_state_new = torch.mean(state_new, axis=1)
m_state_new = rearrange(m_state_new, 'bs (k dim) -> bs k dim', k=1)
m_state_pred = rearrange(m_A, 'bs (k dim) -> bs k dim', k=1)
output = (state_new.to(dtype=torch.float32), m_state_new.to(dtype=torch.float32), m_state_pred.to(dtype=torch.float32), z.to(dtype=torch.float32), ensemble_z.to(dtype=torch.float32), H_X_mean.to(dtype=torch.float32))
return output |
class ResidualBlock(nn.Module):
def __init__(self, dim_in, dim_out):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1), nn.InstanceNorm2d(dim_out, affine=False), nn.ReLU(inplace=True), nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1), nn.InstanceNorm2d(dim_out, affine=False))
def forward(self, x):
return (x + self.main(x)) |
def simxSetVisionSensorImage(clientID, sensorHandle, image, options, operationMode):
size = len(image)
image_bytes = (ct.c_byte * size)(*image)
return c_SetVisionSensorImage(clientID, sensorHandle, image_bytes, size, options, operationMode) |
def eval_one_epoch(model, eval_loader, epoch, tb_log, log_f, loss_func, class_func):
model.eval()
log_print(('EVAL EPOCH %d' % epoch), log_f=log_f)
batch_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
iou = AverageMeter()
with torch.no_grad():
end = time.time()
for (it, (_, _, _, _, _, _, _, xyz_seq, range_seq, remission_seq, labels_seq)) in enumerate(eval_loader):
xyz_seq = xyz_seq.cuda(non_blocking=True).float()
labels_seq = labels_seq.cuda(non_blocking=True).long()
remission_seq = remission_seq.cuda(non_blocking=True).float()
range_seq = range_seq.cuda(non_blocking=True).float()
features = torch.cat([remission_seq.unsqueeze((- 1)), range_seq.unsqueeze((- 1))], dim=(- 1)).transpose(2, 3)
pred_cls = model(xyz_seq, features)
pred_cls = pred_cls.transpose(1, 2)
loss = loss_func(torch.log(pred_cls.clamp(min=1e-08)), labels_seq)
loss = loss.mean()
argmax = pred_cls.argmax(dim=1)
evaluator.addBatch(argmax, labels_seq)
losses.update(loss.item(), xyz_seq.size(0))
batch_time.update((time.time() - end))
end = time.time()
accuracy = evaluator.getacc()
(jaccard, class_jaccard) = evaluator.getIoU()
acc.update(accuracy.item(), xyz_seq.size(0))
iou.update(jaccard.item(), xyz_seq.size(0))
print('Validation finished.')
print('Validation set:\nTime avg per batch {batch_time.avg:.3f}\nLoss avg {loss.avg:.4f}\nAcc avg {acc.avg:.3f}\nIoU avg {iou.avg:.3f}'.format(batch_time=batch_time, loss=losses, acc=acc, iou=iou))
for (i, jacc) in enumerate(class_jaccard):
print('IoU class {i:} [{class_str:}] = {jacc:.3f}'.format(i=i, class_str=class_func(i), jacc=jacc))
return (acc.avg, iou.avg, class_jaccard, losses.avg) |
def get_memory_info():
with open('/proc/meminfo', 'r') as mem:
ret = {}
tmp = 0
for i in mem:
sline = i.split()
if (str(sline[0]) == 'MemTotal:'):
ret['total'] = int(sline[1])
elif (str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:')):
tmp += int(sline[1])
ret['free'] = tmp
ret['used'] = (int(ret['total']) - int(ret['free']))
return ret |
def _get_metadata(vc):
fps = vc.get(cv2.CAP_PROP_FPS)
width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
num_frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))
return VideoMetadata(fps, num_frames, width, height) |
def zero_grad(model):
for p in model.parameters():
if (p.requires_grad and (p.grad is not None)):
p.grad = None |
def require_safetensors(test_case):
return unittest.skipUnless(is_safetensors_available(), 'test requires safetensors')(test_case) |
def has_pool_type(m):
if is_pool_type(m):
return True
for l in m.children():
if has_pool_type(l):
return True
return False |
def ensure_file_exists(filename):
if (not os.path.exists(filename)):
(head, tail) = os.path.split(filename)
ensure_dir_exists(head)
with open(filename, 'w') as f:
pass |
def inference(model, data_loader, dataset_name, mem_active=False, output_folder=None):
device = torch.device('cuda')
num_devices = get_world_size()
logger = logging.getLogger('hit.inference')
dataset = data_loader.dataset
logger.info('Start evaluation on {} dataset({} videos).'.format(dataset_name, len(dataset)))
start_time = time.time()
predictions = compute_on_dataset(model, data_loader, device, logger, mem_active)
synchronize()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=total_time))
logger.info('Total inference time: {} ({} s / video per device, on {} devices)'.format(total_time_str, ((total_time * num_devices) / len(dataset)), num_devices))
predictions = _accumulate_predictions_from_multiple_gpus(predictions)
if (not is_main_process()):
return
if output_folder:
torch.save(predictions, os.path.join(output_folder, 'predictions.pth'))
return evaluate(dataset=dataset, predictions=predictions, output_folder=output_folder) |
def GetHome():
go_to_js = GetPlanToJointStateService()
req = GetHomeRequest()
print(('req home: ' + str(req)))
open_gripper = GetOpenGripperService()
move = GetPlanToPoseService()
servo_mode = GetServoModeService()
def home():
rospy.loginfo('HOME: set servo mode')
servo_mode('servo')
rospy.loginfo('HOME: open gripper to drop anything')
open_gripper()
rospy.loginfo('HOME: move to config home')
res1 = go_to_js(req)
if ((res1 is None) or ('failure' in res1.ack.lower())):
rospy.logerr(res1.ack)
raise RuntimeError('HOME(): error moving to home1')
rospy.loginfo('HOME: done')
return home |
def vis_gt(src_dir, out_dir, anno_list):
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.mkdir(out_dir)
for anno_file in anno_list:
annos = []
with open(os.path.join(src_dir, anno_file), 'r', encoding='utf-8') as f:
for line_ in f.readlines():
if (line_.strip() == ''):
continue
annos.append(json.loads(line_.strip()))
for anno_ in tqdm(annos):
img = cv2.imread(os.path.join(src_dir, anno_['file_name']))
out_file = anno_['file_name'].split('/')[(- 1)].replace('jpg', 'txt')
saver = open(os.path.join(out_dir, out_file), 'w', encoding='utf-8')
to_sort_polys = [np.array(x['polygon']) for x in anno_['annotations']]
sorted_idx = get_poly_sort_idx(to_sort_polys)
for (i, idx_) in enumerate(sorted_idx):
poly_ = np.array(anno_['annotations'][idx_]['polygon'], np.int)
cv2.polylines(img, [poly_.reshape((- 1), 2)], isClosed=True, thickness=1, color=(0, 0, 255))
cv2.putText(img, str(i), (poly_[0], poly_[1]), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
saver.write(f'''
idx:{idx_}
''')
saver.write(f'''TEXT:{anno_['annotations'][idx_]['text']}
''')
if ('entity' not in anno_['annotations'][idx_]):
continue
saver.write(f'''_KIE:{anno_['annotations'][idx_]['entity']}
''')
cv2.imwrite(os.path.join(out_dir, out_file.replace('txt', 'jpg')), img)
saver.close() |
class HierarchicalHealpixMap(DustMap3D):
def __init__(self, filter=None, sf10=True):
DustMap3D.__init__(self, filter=filter)
self._sf10 = sf10
return None
def _evaluate(self, ls, bs, ds):
ls = numpy.atleast_1d(ls)
bs = numpy.atleast_1d(bs)
ds = numpy.atleast_1d(ds)
distmod = ((5.0 * numpy.log10(ds)) + 10.0)
lbIndx = self._lbIndx(ls, bs)
if ((len(ls) == 1) and (len(ds) > 1)):
lbIndx = numpy.tile(lbIndx, len(ds))
result = numpy.zeros_like(ds)
for (counter, i, d) in zip(numpy.arange(len(result)), lbIndx, distmod):
if (self._intps[i] != 0):
out = self._intps[i](d)
else:
interpData = interpolate.InterpolatedUnivariateSpline(self._distmods, self._best_fit[i], k=self._interpk)
out = interpData(d)
self._intps[i] = interpData
result[counter] = out
if (self._filter is not None):
result = (result * aebv(self._filter, sf10=self._sf10))
result[(lbIndx == (- 1))] = numpy.nan
return result
def dust_vals_disk(self, lcen, bcen, dist, radius):
try:
import healpy
except ImportError:
raise ModuleNotFoundError('This function requires healpy to be installed')
vec = healpy.pixelfunc.ang2vec(((90.0 - bcen) * _DEGTORAD), (lcen * _DEGTORAD))
distmod = ((5.0 * numpy.log10(dist)) + 10.0)
pixarea = []
extinction = []
for nside in self._nsides:
ipixs = healpy.query_disc(nside, vec, (radius * _DEGTORAD), inclusive=False, nest=True)
nsideindx = (self._pix_info['nside'] == nside)
potenIndxs = self._indexArray[nsideindx]
nsidepix = self._pix_info['healpix_index'][nsideindx]
tout = []
for (ii, ipix) in enumerate(ipixs):
lbIndx = potenIndxs[(ipix == nsidepix)]
if (numpy.sum(lbIndx) == 0):
continue
if (self._intps[lbIndx] != 0):
tout.append(self._intps[lbIndx][0](distmod))
else:
interpData = interpolate.InterpolatedUnivariateSpline(self._distmods, self._best_fit[lbIndx], k=self._interpk)
tout.append(interpData(distmod))
self._intps[lbIndx] = interpData
tarea = healpy.pixelfunc.nside2pixarea(nside)
tarea = [tarea for ii in range(len(tout))]
pixarea.extend(tarea)
extinction.extend(tout)
pixarea = numpy.array(pixarea)
extinction = numpy.array(extinction)
if (not (self._filter is None)):
extinction = (extinction * aebv(self._filter, sf10=self._sf10))
return (pixarea, extinction)
def _lbIndx(self, ls, bs):
stop_mask = numpy.zeros(len(ls), dtype=bool)
indx_result = (numpy.ones(len(ls), dtype=int) * (- 1))
for nside in self._nsides:
tpix = ang2pix(nside, ((90.0 - bs) * _DEGTORAD), (ls * _DEGTORAD), nest=True)
nside_idx = numpy.where((self._pix_info['nside'] == nside))[0]
healpix_index_nside = self._pix_info['healpix_index'][nside_idx]
sorted_order = numpy.argsort(healpix_index_nside)
result = numpy.searchsorted(healpix_index_nside, tpix, sorter=sorted_order)
known_bad_idx = (result == len(nside_idx))
result[known_bad_idx] = 0
result = sorted_order[result]
result = nside_idx[result]
good_result_idx = (((self._pix_info['healpix_index'][result] == tpix) & (self._pix_info['nside'][result] == nside)) & (~ known_bad_idx))
indx_result = numpy.where(((~ stop_mask) & good_result_idx), result, indx_result)
indx_result = numpy.where((known_bad_idx & (~ stop_mask)), (- 1), indx_result)
stop_mask = (stop_mask | good_result_idx)
return indx_result
def plot_mollweide(self, d, **kwargs):
try:
import healpy
except ImportError:
raise ModuleNotFoundError('This function requires healpy to be installed')
dm = ((5.0 * numpy.log10(d)) + 10.0)
filter_fac = (aebv(self._filter, sf10=self._sf10) if (not (self._filter is None)) else 1.0)
tpix = numpy.argmin(numpy.fabs((dm - self._distmods)))
nside_max = numpy.max(self._pix_info['nside'])
npix = healpy.pixelfunc.nside2npix(nside_max)
pix_val = numpy.empty(npix, dtype='f8')
pix_val[:] = healpy.UNSEEN
for nside in numpy.unique(self._pix_info['nside']):
indx = (self._pix_info['nside'] == nside)
pix_val_n = (filter_fac * self._best_fit[(indx, tpix)])
mult_factor = ((nside_max // nside) ** 2)
pix_idx_n = (self._pix_info['healpix_index'][indx] * mult_factor)
for offset in range(mult_factor):
pix_val[(pix_idx_n + offset)] = pix_val_n[:]
nside_plot = kwargs.get('nside_plot', 2048)
if ((not (nside_plot is None)) and (nside_plot < nside_max)):
pix_val = healpy.pixelfunc.ud_grade(pix_val, nside_plot, pess=False, order_in='NEST', order_out='NEST')
pix_val[(pix_val == healpy.UNSEEN)] = (- 1.0)
if (not (self._filter is None)):
kwargs['unit'] = ('$A_{%s}\\,(\\mathrm{mag})$' % self._filter.split(' ')[(- 1)])
else:
kwargs['unit'] = '$E(B-V)\\,(\\mathrm{mag})$'
kwargs['title'] = kwargs.get('title', '')
healpy.visufunc.mollview(pix_val, nest=True, xsize=4000, min=0.0, max=numpy.quantile(pix_val, 0.99), format='$%g$', cmap='gist_yarg', **kwargs)
return None |
class BlockDataset(Dataset):
def __init__(self, data, block_size):
self.data = data
self.block_size = block_size
def __len__(self):
return (len(self.data) - self.block_size)
def __getitem__(self, idx):
x = torch.from_numpy(self.data[idx:(idx + self.block_size)].astype(np.int64))
y = torch.from_numpy(self.data[(idx + 1):((idx + 1) + self.block_size)].astype(np.int64))
return (x, y) |
class EnvBatch():
def __init__(self, feature_store=None, batch_size=100):
self.features_aug = None
if feature_store:
if (type(feature_store) is dict):
self.features = feature_store
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.feature_size = next(iter(self.features.values())).shape[(- 1)]
print(('The feature size is %d' % self.feature_size))
elif (len(feature_store) == 1):
print('Using augment features only')
self.features = feature_store[0]
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.feature_size = next(iter(self.features.values())).shape[(- 1)]
print(('The feature size is %d' % self.feature_size))
elif (type(feature_store) is list):
print('Using Both features')
self.features = feature_store[0]
self.features_aug = feature_store[1]
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.feature_size = next(iter(self.features.values())).shape[(- 1)]
print(('The feature size is %d' % self.feature_size))
else:
print('Image features not provided')
self.features = None
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.featurized_scans = set([key.split('_')[0] for key in list(self.features.keys())])
self.sims = []
for i in range(batch_size):
sim = MatterSim.Simulator()
sim.setRenderingEnabled(False)
sim.setDiscretizedViewingAngles(True)
sim.setCameraResolution(self.image_w, self.image_h)
sim.setCameraVFOV(math.radians(self.vfov))
sim.init()
self.sims.append(sim)
self.feature_specify = 'original'
def _make_id(self, scanId, viewpointId):
return ((scanId + '_') + viewpointId)
def newEpisodes(self, scanIds, viewpointIds, headings):
for (i, (scanId, viewpointId, heading)) in enumerate(zip(scanIds, viewpointIds, headings)):
self.sims[i].newEpisode(scanId, viewpointId, heading, 0)
def getStates(self):
feature_states = []
for (i, sim) in enumerate(self.sims):
state = sim.getState()
long_id = self._make_id(state.scanId, state.location.viewpointId)
if self.features_aug:
if (args.aug_method == 'alternative'):
if ((i % (len(self.features_aug) + 1)) == 0):
feature = self.features[long_id]
else:
feature = self.features_aug[((i % (len(self.features_aug) + 1)) - 1)][long_id]
elif (args.aug_method == 'specify'):
if (self.feature_specify == 'original'):
feature = self.features[long_id]
else:
feature = self.features_aug[(i % len(self.features_aug))][long_id]
else:
print('Aug Method Error')
feature_states.append((feature, state))
elif self.features:
feature = self.features[long_id]
feature_states.append((feature, state))
else:
feature_states.append((None, state))
return feature_states
def makeActions(self, actions):
for (i, (index, heading, elevation)) in enumerate(actions):
self.sims[i].makeAction(index, heading, elevation) |
def set_grad(params, params_with_grad, scale=1.0):
for (param, param_w_grad) in zip(params, params_with_grad):
if (param.grad is None):
param.grad = torch.nn.Parameter(param.data.new().resize_(*param.data.size()))
grad = param_w_grad.grad.data
if (scale is not None):
grad /= scale
if (torch.isnan(grad).any() or torch.isinf(grad).any()):
return True
param.grad.data.copy_(grad)
return False |
class EmbeddingNormalization():
def __init__(self, norm: Union[(float, torch.Tensor)]=1):
self.norm = norm
if (isinstance(self.norm, torch.Tensor) and (self.norm.ndim == 2)):
self.norm = self.norm.unsqueeze(0)
def __call__(self, embeddings: torch.Tensor) -> torch.Tensor:
with torch.inference_mode():
norm_embs = F.normalize_embeddings(embeddings, self.norm)
return norm_embs |
class PAWS(AbstractTask):
name = 'paws'
labels_list = ['No', 'Yes']
metric = [metrics.accuracy]
metric_names = ['accuracy']
split_to_data_split = {'train': 'train', 'validation': 'validation', 'test': 'test'}
def load_dataset(self, split: int):
return datasets.load_dataset('paws', 'labeled_final', split=split, script_version='master')
def preprocessor(self, example, add_prefix=True):
src_texts = ['sentence1:', example['sentence1'], 'sentence2:', example['sentence2']]
tgt_texts = [('No' if (str(example['label']) == '0') else 'Yes')]
return {'source': (((example['sentence1'] + '\nIs that a paraphrase of the following sentence?\n') + example['sentence2']) + '?\n'), 'target': ' '.join(tgt_texts), 'task': self.name, 'extra_fields': {}} |
def test_list_space():
space = ListSpace(gym.spaces.Discrete(2), 5, 10)
assert space.contains(space.sample())
assert (not space.contains(0))
assert (not space.contains(([0] * 4)))
assert (not space.contains(([2] * 5)))
assert (not space.contains(([1] * 11))) |
def run(args):
output_dir = os.path.split(args.output_path)[0]
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
with open(args.dcalphas_path, 'rb') as fin:
dcalphas = pickle.load(fin)
with open(args.angles_path, 'rb') as fin:
angles = pickle.load(fin)
coords = None
if (args.coords_path is not None):
with open(args.coords_path, 'rb') as fin:
coords = pickle.load(fin)
output = {}
for pid in dcalphas:
output[pid] = {'dcalphas': dcalphas[pid]['dcalpha'], 'psi': angles[pid]['psi'], 'phi': angles[pid]['phi']}
if (coords is not None):
output[pid]['coords'] = coords[pid]['coords']
with open(args.output_path, 'wb') as fout:
pickle.dump(output, fout) |
('/timeseries/<state>/<metric>')
def get_timeseries(state, metric):
if (state not in STATE_WHITELIST):
abort(400, f"Bad state name. Must be one of: {', '.join(STATE_WHITELIST)}")
if (metric not in METRIC_WHITELIST):
abort(400, f"Bad metric name. Must be one of: {', '.join(METRIC_WHITELIST)}")
df = CACHE_TIMESERIES.get()
column_name = (state + METRIC_SUFFIX_MAP[metric])
output_dict = {'data': [{time: value} for (time, value) in df[column_name].to_dict().items()], 'meta': TIMESERIES_JSON_OUTPUT_META_DICT}
resp = jsonify(output_dict)
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp |
def _has_arg(fn, arg_name):
while isinstance(fn, functools.partial):
fn = fn.func
while hasattr(fn, '__wrapped__'):
fn = fn.__wrapped__
arg_spec = inspect.getfullargspec(fn)
if arg_spec.varkw:
return True
return ((arg_name in arg_spec.args) or (arg_name in arg_spec.kwonlyargs)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.