text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import numpy as np
def normalize(mat):
"""
Normalize a matrix along the axis 1.
:param np.ndarray mat: Matrix to normalize.
:return np.ndarray: Normalized matrix.
"""
if len(mat.shape) == 1:
summed = mat.sum()
if summed == 0:
return np.zeros_like(mat)
return mat / summed
zero_lines = (mat.sum(axis=1) == 0)
mat[zero_lines, :] += 1e-20
mat = mat / mat.sum(axis=1).reshape(-1, 1)
mat[zero_lines, :] = 0
return mat
def safe_log(x):
"""
Logarithm of tensor x.
:param np.ndarray x: Tensor to be logged.
:rtype: np.ndarray
"""
non_positive = (x <= 0)
x[non_positive] = 1e-300
x = np.log(x)
x[non_positive] = -float('inf')
return x
def log_sum_exp(x):
"""
Equivalent to \log \sum \exp (x), where the summation if done along
the first axis.
:param np.ndarray x: The tensor to be log_sum_exp'ed.
:rtype: np.ndarray
"""
base_value = x.max(axis=0)
base_value[base_value == -float('inf')] = 1e-300
x = x - base_value
x = np.exp(x)
x = x.sum(axis=0)
if isinstance(x, np.ndarray):
non_positive = (x <= 0)
x[non_positive] = 1e-300
rst = base_value + np.log(x)
rst[non_positive] = -float('inf')
return rst
else:
if x <= 0:
return -float('inf')
return np.log(x) + base_value
def Gaussian(mu, sigma, x):
"""
The PDF of Gaussian distribution.
If variance is zero, Gaussian distribution degrades to delta distribution
:param np.ndarray mu: Mean(s).
:param np.ndarray sigma: Variance(s).
:param np.ndarray x: Variable(s).
:rtype: np.ndarray
"""
if isinstance(sigma, float):
if sigma == 0:
if x == mu:
return float('inf')
else:
return 0
return np.exp(-(x - mu)**2 / 2 / sigma**2) / np.sqrt(2 * np.pi) / np.abs(sigma)
zeros = (sigma == 0)
sigma[zeros] = 1e-10
equals = (x == mu)
ret = np.exp(-(x - mu)**2 / 2 / sigma**2) / np.sqrt(2 * np.pi) / np.abs(sigma)
ret[zeros] = 0
ret[equals * zeros] = float('inf')
return ret
def Zeller(s):
"""
Zeller algorithms, which could convert a date into the day of a week.
:param str s: Format: month_day_year. E.g. 08_08_1997
:rtype: int
"""
year = int(s[-2:])
month = int(s[:s.index('_')])
day = int(s[3:5])
if month < 3:
month += 12
year -= 1
return (year+year//4+6+(26*(month+1))//10+day) % 7
def weighted_mean_std(values):
"""
Calculate mean and variance for a series of weighted values
:param dict values: The key of the dict is value, and the
value is the weight
:rtype: float, float
"""
dim = (1,)
single_number = False
for value, weight in values.items():
if not isinstance(weight, np.ndarray):
values[value] = np.array([weight])
single_number = True
dim = values[value].shape
cnt = np.zeros(shape=dim, dtype=np.float64)
x_sum = np.zeros(shape=dim, dtype=np.float64)
x2_sum = np.zeros(shape=dim, dtype=np.float64)
for value, weight in values.items():
cnt += weight
x_sum += value * weight
x2_sum += value * value * weight
zero_lines = (cnt == 0)
cnt[zero_lines] = 1e-100
mean = x_sum / cnt
variance = x2_sum / cnt - mean * mean
mean[zero_lines] = float('inf')
variance[zero_lines] = 1.0
variance[variance < 0] = 0.0
if single_number:
mean = mean[0]
variance = variance[0]
std = np.sqrt(variance)
return mean, std
def split_list(l, n_part):
"""
:param list l:
:param int n_part:
:rtype: list[list]
"""
ret = list()
for start_idx in range(n_part):
ret.append(l[start_idx::n_part])
return ret
if __name__ == '__main__':
_test = 1
|
{"hexsha": "85533e03c6995d269dccbb6e833ea499d83a4bd9", "size": 3922, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/math_util.py", "max_stars_repo_name": "hiaoxui/D2T_Grounding", "max_stars_repo_head_hexsha": "4c46f8a8d2867712399ac7c0e7f7f34ef911a69a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2018-11-16T08:59:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-06T10:57:16.000Z", "max_issues_repo_path": "utils/math_util.py", "max_issues_repo_name": "hiaoxui/D2T_Grounding", "max_issues_repo_head_hexsha": "4c46f8a8d2867712399ac7c0e7f7f34ef911a69a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-01-18T09:36:53.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-01T15:06:03.000Z", "max_forks_repo_path": "utils/math_util.py", "max_forks_repo_name": "hiaoxui/D2T-Grounding", "max_forks_repo_head_hexsha": "4c46f8a8d2867712399ac7c0e7f7f34ef911a69a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9808917197, "max_line_length": 87, "alphanum_fraction": 0.5637429883, "include": true, "reason": "import numpy", "num_tokens": 1146}
|
from preprocess import *
from pyspark.ml.classification import RandomForestClassifier
from pyspark.sql.functions import col, when, concat_ws
from pyspark.ml.feature import StringIndexer, VectorAssembler
from pyspark.ml.regression import LinearRegression
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
from sklearn import datasets, linear_model
from sklearn.ensemble import RandomForestRegressor
from pyspark.mllib.evaluation import MulticlassMetrics, RegressionMetrics
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, RegressionEvaluator
from sklearn import metrics
import numpy as np
from plot import *
from metrics import *
# parler_data000000000037.ndjson
parlerDataDirectory = './parler_data000000000037.ndjson/'
outputFileDirectory = './preprocessed/'
outputJson = './parler-data/'
#SPARK MODELS
def prep_data_spark(parlerDataDirectory):
preprocessor = Preprocess(parlerDataDirectory, outputFileDirectory, outputJson)
preprocessor.preprocessJson(parlerDataDirectory)
df = preprocessor.getProcessedData()
df.show()
df = df.dropna()
df = df.withColumn("comments", col("comments").cast(FloatType())).\
withColumn("followers", col("followers").cast(FloatType())).\
withColumn("following", col("following").cast(FloatType())).\
withColumn("impressions", col("impressions").cast(FloatType())).\
withColumn("reposts", col("reposts").cast(FloatType())).\
withColumn("verified", col("verified").cast(FloatType())). \
withColumn("categoryIndexMimeType", col("categoryIndexMimeType").cast(FloatType())). \
withColumn("categoryIndexDomains", col("categoryIndexDomains").cast(FloatType())). \
withColumn("sentiment_score", col("sentiment_score").cast(FloatType())). \
withColumn("hashtag significance", col("hashtag significance").cast(FloatType())). \
withColumn("body significance", col("body significance").cast(FloatType())). \
withColumn("upvotes", col("upvotes").cast(FloatType()))
indexer = StringIndexer(inputCol='upvotes', outputCol='label')
indexed = indexer.fit(df).transform(df)
assembler = VectorAssembler(
inputCols=['comments', 'followers', 'following', 'impressions', 'reposts', 'verified', 'categoryIndexMimeType',
'categoryIndexDomains', 'sentiment_score', 'hashtag significance', 'body significance'],
outputCol="features")
output = assembler.transform(indexed)
output.show()
return output
def prep_data_scikit(parlerDataDirectory):
preprocessor = Preprocess(parlerDataDirectory, outputFileDirectory, outputJson)
preprocessor.preprocessJson(parlerDataDirectory)
df = preprocessor.getProcessedData().toPandas()
df = df.dropna() # may be removing more than we want
# df.show()
train, test = train_test_split(df, test_size=0.3)
# Split the data into x and y
train_x = train[
['comments', 'followers', 'following', 'impressions', 'reposts', 'verified', 'categoryIndexMimeType',
'categoryIndexDomains', 'sentiment_score', 'hashtag significance', 'body significance']]
train_y = train['upvotes']
test_x = test[['comments', 'followers', 'following', 'impressions', 'reposts', 'verified', 'categoryIndexMimeType',
'categoryIndexDomains', 'sentiment_score', 'hashtag significance', 'body significance']]
test_y = test['upvotes']
return train_x, train_y, test_x, test_y
def convert_array_to_string(my_list):
return '[' + ','.join([str(elem) for elem in my_list]) + ']'
def format_for_csv(predictions):
convert_array_to_string_udf = udf(convert_array_to_string, StringType())
predictions = predictions.withColumn("features", convert_array_to_string_udf(predictions["features"]))
predictions.coalesce(1).write.format("csv").save(outputJson, header = True)
return predictions
|
{"hexsha": "4c323fe45d3f0e2a3a3c961ef775feba1540f38d", "size": 4124, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlPrep.py", "max_stars_repo_name": "JonathanSolvesProblems/Soen-471-Project", "max_stars_repo_head_hexsha": "f9039df455a3a150a0211bd0241f1ae611ac3a28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mlPrep.py", "max_issues_repo_name": "JonathanSolvesProblems/Soen-471-Project", "max_issues_repo_head_hexsha": "f9039df455a3a150a0211bd0241f1ae611ac3a28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mlPrep.py", "max_forks_repo_name": "JonathanSolvesProblems/Soen-471-Project", "max_forks_repo_head_hexsha": "f9039df455a3a150a0211bd0241f1ae611ac3a28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.3370786517, "max_line_length": 119, "alphanum_fraction": 0.7344810863, "include": true, "reason": "import numpy", "num_tokens": 913}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) BaseDetection, Inc. and its affiliates. All Rights Reserved
from typing import Dict
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from cvpods.layers import Conv2d, ConvTranspose2d, ShapeSpec
class FCNHead(nn.Module):
"""
The head used in FCN for Semantic Segmentation.
See: https://arxiv.org/abs/1605.06211 for more details.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
self.in_features = cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
feature_strides = {k: v.stride for k, v in input_shape.items()}
feature_channels = {k: v.channels for k, v in input_shape.items()}
self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE
num_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
self.loss_weight = cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT
upsampling_strides = []
feature_strides_list = list(feature_strides.values())
upsampling_strides.append(feature_strides_list[0])
feature_strides_list = feature_strides_list[::-1]
for s1, s2 in zip(feature_strides_list[:], feature_strides_list[1:]):
upsampling_strides.append(s1 // s2)
assert len(upsampling_strides) == len(self.in_features)
score_convs = []
upsampling_convs = []
for idx, in_feature in enumerate(self.in_features):
ch = feature_channels[in_feature]
score_convs.append(
Conv2d(ch, num_classes, kernel_size=1)
)
stride = upsampling_strides[idx]
upsampling_convs.append(
ConvTranspose2d(
num_classes,
num_classes,
kernel_size=stride * 2,
stride=stride,
padding=1,
bias=False,
)
)
self.score_convs = nn.ModuleList(score_convs)
self.upsampling_convs = nn.ModuleList(upsampling_convs)
self._initialize_weights()
def _initialize_weights(self):
# Ref: https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py
def get_upsampling_weight(in_channels, out_channels, kernel_size):
"""
Make a 2D bilinear kernel suitable for upsampling of the given (h, w) size.
"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros(
(in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64
)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.zero_()
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
assert m.kernel_size[0] == m.kernel_size[1]
initial_weight = get_upsampling_weight(
m.in_channels, m.out_channels, m.kernel_size[0])
m.weight.data.copy_(initial_weight)
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
x = self.layers(features, ori_shape=targets.shape[-2:])
if self.training:
return None, self.losses(x, targets)
else:
return x, {}
def layers(self, features, ori_shape):
# NOTE The compute order is from back to front
for i, f in zip(range(-1, -len(features) - 1, -1), self.in_features[::-1]):
if i == -1:
x = self.score_convs[i](features[f])
pre = self.upsampling_convs[i](x)
else:
x = self.score_convs[i](features[f])
# Crop
h, w = pre.shape[-2:]
crop_offset_h = (x.size(-2) - pre.size(-2)) // 2
crop_offset_w = (x.size(-1) - pre.size(-1)) // 2
cur = x[:, :, crop_offset_h: crop_offset_h + h, crop_offset_w: crop_offset_w + w]
# Fuse
x = pre + cur
pre = self.upsampling_convs[i](x)
h, w = ori_shape[-2:]
crop_offset_h = (pre.size(-2) - ori_shape[-2]) // 2
crop_offset_w = (pre.size(-1) - ori_shape[-1]) // 2
x = pre[:, :, crop_offset_h: crop_offset_h + h, crop_offset_w: crop_offset_w + w]
return x
def losses(self, predictions, targets):
loss = F.cross_entropy(
predictions, targets, reduction="mean", ignore_index=self.ignore_value
)
losses = {"loss_sem_seg": loss * self.loss_weight}
return losses
|
{"hexsha": "c44a46240388ba33d5ec1b76c5575e58de7054af", "size": 5180, "ext": "py", "lang": "Python", "max_stars_repo_path": "cvpods/modeling/meta_arch/fcn.py", "max_stars_repo_name": "reinforcementdriving/cvpods", "max_stars_repo_head_hexsha": "32d98b74745020be035a0e20337ad934201615c4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-24T17:01:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-24T17:01:29.000Z", "max_issues_repo_path": "cvpods/modeling/meta_arch/fcn.py", "max_issues_repo_name": "wondervictor/cvpods", "max_issues_repo_head_hexsha": "614a975e5425bbaeb66bbd1ffca552d633ba89ca", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cvpods/modeling/meta_arch/fcn.py", "max_forks_repo_name": "wondervictor/cvpods", "max_forks_repo_head_hexsha": "614a975e5425bbaeb66bbd1ffca552d633ba89ca", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0882352941, "max_line_length": 97, "alphanum_fraction": 0.5637065637, "include": true, "reason": "import numpy", "num_tokens": 1217}
|
#!/usr/bin/env python3
"""
Grid features extraction script.
"""
import argparse
import os
import torch
import tqdm
from fvcore.common.file_io import PathManager
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import default_setup
from detectron2.evaluation import inference_context
from detectron2.modeling import build_model
import numpy as np
from clip.clip import load
import torch.nn as nn
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from grid_feats import (
add_attribute_config,
build_detection_test_loader_with_attributes,
)
# from timm.models.vision_transformer import resize_pos_embed
# A simple mapper from object detection dataset to VQA dataset names
dataset_to_folder_mapper = {}
dataset_to_folder_mapper['coco_2014_train'] = 'train2014'
dataset_to_folder_mapper['coco_2014_val'] = 'val2014'
#dataset_to_folder_mapper['coco_2014_val'] = 'trainval2014'
#dataset_to_folder_mapper['coco_2014_train'] = 'trainval2014'
# One may need to change the Detectron2 code to support coco_2015_test
# insert "coco_2015_test": ("coco/test2015", "coco/annotations/image_info_test2015.json"),
# at: https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/builtin.py#L36
dataset_to_folder_mapper['coco_2015_test'] = 'test2015'
dataset_to_folder_mapper['coco_2015_test-dev'] = 'test-dev2015'
def extract_grid_feature_argument_parser():
parser = argparse.ArgumentParser(description="Grid feature extraction")
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument("--dataset", help="name of the dataset", default="coco_2014_train",
choices=['coco_2014_train', 'coco_2014_val', 'coco_2015_test', 'coco_2015_test-dev'])
parser.add_argument('--model_type', default='RN50', type=str, help='RN50, RN101, RN50x4, ViT-B/32, vit_base_patch32_224_in21k')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def extract_grid_feature_on_dataset(model, data_loader, dump_folder):
for idx, inputs in enumerate(tqdm.tqdm(data_loader)):
with torch.no_grad():
image_id = inputs[0]['image_id']
file_name = '%d.pth' % image_id
# compute features
images = model.preprocess_image(inputs)
features = model.backbone(images.tensor)
outputs = model.roi_heads.get_conv5_features(features)
# modify the filename
file_name = inputs[0]['file_name'].split("/")[-1].replace("jpg", "npy")
outputs = outputs.permute(0, 2, 3, 1)
exit()
with PathManager.open(os.path.join(dump_folder, file_name), "wb") as f:
np.save(f, outputs.cpu().numpy())
def do_feature_extraction(cfg, model, dataset_name, args):
with inference_context(model):
dump_folder = os.path.join(cfg.OUTPUT_DIR, "features", dataset_to_folder_mapper[dataset_name])
PathManager.mkdirs(dump_folder)
data_loader = build_detection_test_loader_with_attributes(cfg, dataset_name, model_type='clip')
extract_clip_feature_on_dataset(model, data_loader, dump_folder, args)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_attribute_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# force the final residual block to have dilations 1
cfg.MODEL.RESNETS.RES5_DILATION = 1
cfg.freeze()
default_setup(cfg, args)
return cfg
def extract_clip_feature_on_dataset(model, data_loader, dump_folder, args):
save_args.model_type = args.model_type.split("-")[0]
mean = torch.Tensor([0.48145466, 0.4578275, 0.40821073]).to("cuda").reshape(3, 1, 1)
std = torch.Tensor([0.26862954, 0.26130258, 0.27577711]).to("cuda").reshape(3, 1, 1)
dump_folder = f"clip/{save_args.model_type}/" + dump_folder.split("/")[-1]
if args.model_type == "ViT-B/32":
num_patches = 558 #600 * 1000 // 32 // 32
print(num_patches)
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768, device='cuda'),)
pos_embed.weight = resize_pos_embed(model.visual.positional_embedding.unsqueeze(0), pos_embed.unsqueeze(0))
model.visual.positional_embedding = pos_embed
print(model.visual.positional_embedding.device)
# pass
dump_folder.replace( "rscratch", "dnn" )
dump_folder = "/dnn/sheng.s/clip_boi/grid-feats-vqa/" + dump_folder
if not os.path.exists(dump_folder):
os.makedirs(dump_folder)
for idx, inputs in enumerate(tqdm.tqdm(data_loader)):
with torch.no_grad():
image_id = inputs[0]['image_id']
file_name = '%d.pth' % image_id
# compute features
image = inputs[0]['image'].to("cuda").float() / 255.0
image = (image - mean) / std
image = image.unsqueeze(0)
outputs = model.encode_image(image)
if "RN" in args.model_type:
outputs = outputs.permute(0, 2, 3, 1)
else:
outputs = outputs[:, :, :].reshape(1, 13, 43, 768)
with PathManager.open(os.path.join(dump_folder, file_name), "wb") as f:
# save as CPU tensors
torch.save(outputs.cpu(), f)
def main(args):
cfg = setup(args)
model, transform = load(args.model_type, jit=False)
do_feature_extraction(cfg, model, args.dataset, args)
if __name__ == "__main__":
args = extract_grid_feature_argument_parser().parse_args()
print("Command Line Args:", args)
main(args)
|
{"hexsha": "7c62e1ba59e97f238e09a86895f6c890c24d960e", "size": 5819, "ext": "py", "lang": "Python", "max_stars_repo_path": "CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py", "max_stars_repo_name": "HermannLiang/CLIP-ViL", "max_stars_repo_head_hexsha": "49c28bc5ece1aacfcbfd9c8810db70663ca0516a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py", "max_issues_repo_name": "HermannLiang/CLIP-ViL", "max_issues_repo_head_hexsha": "49c28bc5ece1aacfcbfd9c8810db70663ca0516a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py", "max_forks_repo_name": "HermannLiang/CLIP-ViL", "max_forks_repo_head_hexsha": "49c28bc5ece1aacfcbfd9c8810db70663ca0516a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9788732394, "max_line_length": 131, "alphanum_fraction": 0.6872314831, "include": true, "reason": "import numpy", "num_tokens": 1447}
|
# Taylor problem 1.39
last revised: 07-Jan-2019 by Dick Furnstahl [furnstahl.1@osu.edu]
The goal of this notebook is to practice Python while considering some visualizations of problem 1.39 to see how they might help check the results, interpret the behavior, or suggest new ideas on how to extend the problem. Suggestions are very welcome! We'll use the notebook Matplotlib backend for Jupyter (`%matplotlib notebook`) to allow for reading points off the plot.
**Problem statement:** A ball is thrown with initial speed $v_0$ up an inclined plane. The plane is inclined at an angle $\phi$ above the horizontal, and the ball's initial velocity is at an angle $\theta$ above the plane. Choose axes with $x$ measured up the slope, $y$ normal to the slope, and $z$ across it. Write down Newton's second law using these axes and find the ball's position as a function of time. Show that the ball lands a distance
$
\begin{align}
R = \frac{2 v_0^2 \sin\theta \cos(\theta+\phi)}{g \cos^2\phi}
\end{align}
$
from its launch point. Show that for given $v_0$ and $\phi$, the maximum possible range up the inclined plane is
$
\begin{align}
R_{\textrm{max}} = \frac{v_0^2}{g(1+\sin\phi)} \;.
\end{align}
$
### Gameplan:
* Accept the formulas for $R$ and $R_\textrm{max}$ as given and make plots. Interpret.
* Use a fixed value of phi first, then loop through phi.
* Add formatting to plots (and widgets!).
* Check the formulas using sympy.
* Solve the ODE numerically and make some spot comparisons to the formulas.
### Make plots of $R$ and $R_\textrm{max}$
We are given expressions for $R$ and $R_\textrm{max}$, so we'll first turn them into functions. Then make some plots.
```python
import numpy as np
from numpy import pi
```
```python
def Range_R(theta, phi, g=9.8, v0=1.0):
"""
Distance R up the plane given angles theta and phi.
Given in Taylor problem 1.39.
"""
return (2.*v0**2 * np.sin(theta) * np.cos(theta+phi))/(g*np.cos(phi)**2)
```
```python
def Rmax(phi, g=9.8, v0=1.0):
"""
Maximum distance up the plane for fixed angle phi.
Given in Taylor problem 1.39.
"""
return v0**2/(g*(1. + np.sin(phi)))
```
```python
# set up for plotting but now using the notebook backend
%matplotlib notebook
import matplotlib.pyplot as plt
import matplotlib as mpl
# The following can be uncommented if we want to adjust the font size.
plt.rcParams.update({'font.size': 10})
```
```python
# Make a plot of range R versus theta for several phi values
phi_vec = [0., pi/8., pi/4., 3.*pi/8.]
theta = np.arange(0,pi/2.,0.01)
# Label the figure and axis with _R to distinguish it
fig_R = plt.figure(figsize=(3,3))
ax_R = fig_R.add_subplot(1,1,1)
ax_R.set_title('Distance to landing')
ax_R.set_xlabel(r'$\theta$')
ax_R.set_ylabel(r'$R(\theta,\phi)$')
# start with phi=0 to see if it looks right
ax_R.plot(theta, Range_R(theta,phi=0.))
fig_R.tight_layout() # make the spacing of subplots nicer
```
<IPython.core.display.Javascript object>
Point to the curve to read off coordinates and try out the various controls (hover over a button to reveal a tooltip).
Remember that this is not a trajectory; each trajectory is just one point on this curve, specified by $\theta$ and $\phi=0$. Does the shape and features of the curve (e.g., symmetry about the peak) agree with your intuition? Can you prove (or verify) it should be that way from the analytic formula?
It's awkward to work in radians when our intuition (mine, at least) is better in degrees. Let's define some functions to convert (no doubt there are built-in functions, but these are easy and good practice for us).
```python
def rad_to_deg(theta_rad):
"""Take as input an angle in radians and return it in degrees."""
return 180./np.pi * theta_rad
def deg_to_rad(theta_deg):
"""Take as input an angle in degrees and return it in radians."""
return np.pi/180. * theta_deg
```
Now make a plot with $\theta$ in degrees with a list of $\phi$ values, specified in radians but converted to degrees for the plot legend.
```python
# Now make a plot of range R versus theta for several phi values
phi_vec = [0., pi/8., pi/4., 3.*pi/8., 0.99*pi/2]
theta = np.arange(0,pi/2.,0.01)
theta_deg = rad_to_deg(theta)
fig_R = plt.figure(figsize=(4,4))
ax_R = fig_R.add_subplot(1,1,1)
ax_R.set_title('Distance to landing')
ax_R.set_xlabel(r'$\theta$ (degrees)')
ax_R.set_ylabel(r'$R(\theta,\phi)$')
#ax_R.plot(theta, Range_R(theta,phi=0.))
ax_R.set_ylim(0.,0.15)
for phi in phi_vec:
label_phi = fr'$\phi = {rad_to_deg(phi):.1f}\!^\circ$'
ax_R.plot(theta_deg, Range_R(theta,phi), label=label_phi)
ax_R.legend()
fig_R.tight_layout() # make the spacing of subplots nicer
```
<IPython.core.display.Javascript object>
Hmmm, it would be nice to have a picture of the incline next to this. How would we do that?
```python
# make a plot of range R versus theta for several phi values
phi_vec = [0., pi/8., pi/4., 3.*pi/8., 0.99*pi/2]
theta = np.arange(0,pi/2.,0.01)
theta_deg = rad_to_deg(theta)
fig_R = plt.figure(figsize=(6,3))
ax_R = fig_R.add_subplot(1,2,1)
ax_R.set_title('Distance to landing')
ax_R.set_xlabel(r'$\theta$')
ax_R.set_ylabel(r'$R(\theta,\phi)$')
ax_R.set_ylim(0.,0.15)
for phi in phi_vec:
label_phi = fr'$\phi = {rad_to_deg(phi):.1f}\!^\circ$'
ax_R.plot(theta_deg, Range_R(theta,phi), label=label_phi)
ax_R.legend()
# now add another subplot with the inclined plane.
ax_plane = fig_R.add_subplot(1,2,2)
ax_plane.set_title('Inclined plane')
ax_plane.set_xlim(0.,1.1)
ax_plane.set_ylim(0.,1.1)
for phi in phi_vec:
label_phi = fr'$\phi = {rad_to_deg(phi):.1f}\!^\circ$'
x_pts = [0., 1.]
y_pts = [0., np.tan(phi)]
ax_plane.plot(x_pts, y_pts, label=label_phi, lw=2)
ax_plane.axis('off')
ax_plane.legend()
fig_R.tight_layout() # make the spacing of subplots nicer
```
<IPython.core.display.Javascript object>
Ok, now we can look at the $R(\theta,\phi)$ plot and make observations, which we can then try to back-up from the formula. **Try to answer these questions.**
* Can you generalize the argument about symmetry to any $\phi$?
* What are the constraints on $R$ from the figure? Do they match the formula?
* Do the changes in the curves with increasing $\phi$ make sense? Can they be extracted from the formula?
Now plot the $R_{max}$ formula. **Evaluate whether it is consistent with the plots of R.**
```python
# make a plot of R_max versus phi
fig_Rmax = plt.figure(figsize=(3,3), num='Taylor 1.39')
ax_Rmax = fig_Rmax.add_subplot(1,1,1)
ax_Rmax.set_title(r'Maximum Distance given $\phi$')
ax_Rmax.set_xlabel(r'$\phi$ (degrees)')
ax_Rmax.set_ylabel(r'$R_{max}$')
phi = np.arange(0,pi/2.,.1)
ax_Rmax.plot(rad_to_deg(phi), Rmax(phi))
fig_Rmax.tight_layout() # make the spacing of subplots nicer
```
<IPython.core.display.Javascript object>
**Spotcheck some points on this curve against the previous graph of $R$. What should you compare?**
### Widgetizing with ipywidgets and interact
The widgets don't seem to play well with the notebook back end, so we'll switch to `%matplotlib inline`. You may need to restart the kernel.
```python
%matplotlib inline
from ipywidgets import interact, fixed
import ipywidgets as widgets
# A simple function evaluation (all in radians at this point).
interact(Range_R, theta=np.pi/4., phi=(0,np.pi/2), g=fixed(9.8));
```
interactive(children=(FloatSlider(value=0.7853981633974483, description='theta', max=2.356194490192345, min=-0…
```python
def plot_range_R_versus_theta(phi_deg=0):
"""Make a plot of range R versus theta for given phi values in degrees"""
phi_rad = deg_to_rad(phi_deg)
theta_deg = np.arange(0,90.,0.1)
theta_rad = deg_to_rad(theta_deg)
range_R = Range_R(theta_rad,phi_rad)
fig_R = plt.figure(figsize=(6,3), num='Taylor 1.39')
ax_R = fig_R.add_subplot(1,2,1)
ax_R.set_title('Distance to landing')
ax_R.set_xlabel(r'$\theta$')
ax_R.set_ylabel(r'$R(\theta,\phi)$')
label_phi = fr'$\phi = {phi_deg:.1f}\!^\circ$'
ax_R.plot(theta_deg, range_R, label=label_phi)
ax_R.set_ylim(bottom=0., top=1.1*range_R.max()) # set limit *after* plotting to get auto-scaling
ax_R.legend()
ax_plane = fig_R.add_subplot(1,2,2)
ax_plane.set_title('Inclined plane')
ax_plane.set_xlim(0.,1.1)
ax_plane.set_ylim(0.,1.1)
label_phi = fr'$\phi = {phi_deg:.1f}\!^\circ$'
x_pts = [0., 1.]
y_pts = [0., np.tan(phi_rad)]
ax_plane.plot(x_pts, y_pts, label=label_phi, lw=2)
ax_plane.axis('off')
ax_plane.legend()
fig_R.tight_layout() # make the spacing of subplots nicer
interact(plot_range_R_versus_theta, phi_deg=(0.,90.));
```
interactive(children=(FloatSlider(value=0.0, description='phi_deg', max=90.0), Output()), _dom_classes=('widge…
```python
# to avoid the jiggling and do some formatting
phi_deg_widget = widgets.FloatSlider(min=0., max=90.0, step=0.1, value=0.,
description=r'$\phi$ in degrees',
readout_format='.0f',
continuous_update=False
)
interact(plot_range_R_versus_theta, phi_deg=phi_deg_widget);
```
interactive(children=(FloatSlider(value=0.0, continuous_update=False, description='$\\phi$ in degrees', max=90…
### Ok, how about using sympy?
See if we can reproduce the algebra of solving for final t, x, and y.
```python
import sympy as sy
theta, phi, v0, m, g = sy.symbols('theta phi v0 m g')
x, y, t = sy.symbols('x y t')
half = sy.Rational(1,2)
```
```python
tf = sy.solve(v0*sy.sin(theta)*t - half*g*sy.cos(phi)*t**2, t)
tf
```
[0, 2*v0*sin(theta)/(g*cos(phi))]
```python
x = v0*sy.cos(theta)*t - half*g*sy.sin(phi)*t**2
xf = x.subs(t,tf[1])
R = sy.trigsimp(xf)
R
```
2*v0**2*sin(theta)*cos(phi + theta)/(g*cos(phi)**2)
```python
thetamax = sy.solve(sy.trigsimp(sy.diff(R,theta)),theta)
thetamax
```
[-phi/2 + pi/4, -phi/2 + 3*pi/4]
```python
Rmax = R.subs(theta,thetamax[0])
sy.simplify(Rmax)
```
v0**2*(-sin(phi) + 1)/(g*cos(phi)**2)
### Ok, now as a differential equation
We'll use odeint for simplicity. Treat it as coupled 2nd-order differential equations for $x(t)$, $y(t)$, $v_x(t)$, and $v_y(t)$:
$
\begin{align}
\frac{d}{dt}\left(\begin{array}{c}
\mathbf{x} \\
\mathbf{v}
\end{array}\right)
= \left(\begin{array}{c}
\mathbf{v} \\
\mathbf{F}/m
\end{array}\right)
\qquad \Longrightarrow \qquad
\frac{d}{dt}\left(\begin{array}{c}
x \\
y \\
v_x \\
v_y
\end{array}\right)
= \left(\begin{array}{c}
v_x \\
v_y \\
F_x/m \\
F_y/m
\end{array}\right)
\end{align}
$
```python
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
```
```python
def ode_rhs(u_vec, t, *params):
"""
Right-hand side (rhs) of the differential equation, with
u_vec = [x, y, v_x, v_y] and params = [g, phi].
"""
x, y, v_x, v_y = u_vec # We don't actually use x or y here, but could!
g, phi = params
return [v_x, v_y, -g*np.sin(phi), -g*np.cos(phi)]
```
```python
g = 9.8
phi = np.pi/8.
theta = np.pi/4.
v0 = 1.
analytic_range = Range_R(theta,phi,g,v0)
print("Analytic range = {}".format(analytic_range))
u0_vec = [0, 0, v0*np.cos(theta), v0*np.sin(theta)]
t_max = 1. # integration time
t_pts = np.arange(0, t_max, 0.01)
# absolute and relative tolerances for ode solver
abserr = 1.0e-8
relerr = 1.0e-6
# Integrate the differential equation
x, y, v_x, v_y = odeint(ode_rhs, u0_vec, t_pts, args=(g, phi),
atol=abserr, rtol=relerr).T
```
Analytic range = 0.06469904807392131
We'll just check one case here, but you can do more!
```python
fig = plt.figure(figsize=(4,4))
ax = fig.add_subplot(1,1,1)
ax.plot(x, y)
ax.set_ylim(0,.15)
ax.set_xlim(0,.15)
ax.set_xlabel('x: distance up incline')
ax.set_ylabel(r'y: distance $\perp$ to incline')
ax.axvline(analytic_range, color="red")
```
**Did it work?**
```python
```
|
{"hexsha": "323c92de84f17124f78bed782413edf9f93d87eb", "size": 605785, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "2020_week_1/Taylor_problem_1.39.ipynb", "max_stars_repo_name": "CLima86/Physics_5300_CDL", "max_stars_repo_head_hexsha": "d9e8ee0861d408a85b4be3adfc97e98afb4a1149", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2020_week_1/Taylor_problem_1.39.ipynb", "max_issues_repo_name": "CLima86/Physics_5300_CDL", "max_issues_repo_head_hexsha": "d9e8ee0861d408a85b4be3adfc97e98afb4a1149", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2020_week_1/Taylor_problem_1.39.ipynb", "max_forks_repo_name": "CLima86/Physics_5300_CDL", "max_forks_repo_head_hexsha": "d9e8ee0861d408a85b4be3adfc97e98afb4a1149", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 156.210675606, "max_line_length": 165823, "alphanum_fraction": 0.8394116725, "converted": true, "num_tokens": 3671}
|
from bootstrapping import bootstrap
import numpy as np
import matplotlib.pyplot as plt
# generate 10,000 standard normal variables
sample = np.random.randn(10000)
# Run the bootstrap algorithm. Do 50,000 random resamplings and then calculate
# the standard deviation for each one.
bootstrap_values = bootstrap(sample, num_samples=50000, f=np.std)
# Plot the histogram of bootstrapped standard deviations.
# This should give an idea of the distribution of the sample standard deviation
# which should be around 1
plt.hist(bootstrap_values, bins=100)
plt.xlabel('Standard Deviation')
plt.ylabel('Number of bootstrap samples')
plt.show()
|
{"hexsha": "ad1f3fba828217e8cc54168a934612cf982265c8", "size": 640, "ext": "py", "lang": "Python", "max_stars_repo_path": "bootstrap_test.py", "max_stars_repo_name": "ananswam/bootstrapping", "max_stars_repo_head_hexsha": "3dd412917751b4ea9295311881fe79851a9552b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bootstrap_test.py", "max_issues_repo_name": "ananswam/bootstrapping", "max_issues_repo_head_hexsha": "3dd412917751b4ea9295311881fe79851a9552b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bootstrap_test.py", "max_forks_repo_name": "ananswam/bootstrapping", "max_forks_repo_head_hexsha": "3dd412917751b4ea9295311881fe79851a9552b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4761904762, "max_line_length": 79, "alphanum_fraction": 0.796875, "include": true, "reason": "import numpy", "num_tokens": 142}
|
#ifndef _INCLUDED_UBLAS_VECTOR_HPP_
#define _INCLUDED_UBLAS_VECTOR_HPP_
#include <boost/serialization/list.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/version.hpp>
#include <boost/serialization/split_free.hpp>
#include <boost/numeric/ublas/vector.hpp>
namespace boost {
namespace serialization {
template<class Archive, class U>
inline void save (Archive &ar, const boost::numeric::ublas::vector<U> &v, const unsigned int) {
unsigned int count = v.size();
ar << count;
typename boost::numeric::ublas::vector<U>::const_iterator it = v.begin();
while (count-- > 0) {
ar << *it++;
}
}
template<class Archive, class U>
inline void load (Archive &ar, boost::numeric::ublas::vector<U> &v, const unsigned int) {
unsigned int count;
ar >> count;
v.resize(count);
typename boost::numeric::ublas::vector<U>::iterator it = v.begin();
while (count-- > 0) {
ar >> *it++;
}
}
template<class Archive, class U>
void serialize(Archive & ar, boost::numeric::ublas::vector<U> &v, const unsigned int file_version)
{
boost::serialization::split_free(ar, v, file_version);
}
} // namespace serialization
} // namespace boost
#endif
|
{"hexsha": "3d083c36a47c071d5d8fbb7e3ff831f89616f1b4", "size": 1282, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost_ext/ublas_vector.hpp", "max_stars_repo_name": "ahmadia/hypermesh", "max_stars_repo_head_hexsha": "c694d634a8493c94be39488b85aacc2d1b8884e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost_ext/ublas_vector.hpp", "max_issues_repo_name": "ahmadia/hypermesh", "max_issues_repo_head_hexsha": "c694d634a8493c94be39488b85aacc2d1b8884e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost_ext/ublas_vector.hpp", "max_forks_repo_name": "ahmadia/hypermesh", "max_forks_repo_head_hexsha": "c694d634a8493c94be39488b85aacc2d1b8884e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1363636364, "max_line_length": 102, "alphanum_fraction": 0.6544461778, "num_tokens": 314}
|
c------------------------------------------------------------------
c Computes double covariance or correlation matrix
c------------------------------------------------------------------
C NCLFORTSTART
subroutine dcovarxy(x,y,xmsg,ymsg,cxy,n,m,lag,ncrit,iopt)
implicit none
c INPUT
integer n, m, lag, ncrit, iopt
double precision x(n,m), y(n,m), xmsg, ymsg
c OUTPUT
double precision cxy(m,m)
C NCLEND
c LOCAL
integer i, j, k
double precision sumxx, sumyy, sumxy, sumx, sumy, nxy, xvar, yvar
do i=1,m
do j=1,m
cxy(i,j) = xmsg
end do
end do
do i=1,m
do j=1,m
nxy = 0.0d0
sumx = 0.0d0
sumy = 0.0d0
sumxy= 0.0d0
sumxx= 0.0d0
sumyy= 0.0d0
do k=lag+1, n
if (x(k,i).ne.xmsg .and. y(k-lag,j).ne.ymsg) then
nxy = nxy + 1
sumx = sumx + x(k,i)
sumy = sumy + y(k-lag,j)
sumxy = sumxy + x(k,i)*y(k-lag,j)
sumxx = sumxx + x(k,i)*x(k,i)
sumyy = sumyy + y(k-lag,j)*y(k-lag,j)
end if
end do
if (nxy.gt.1d0 .and. nxy.ge.ncrit) then
cxy(i,j) = (sumxy-(sumx*sumy)/nxy)/(nxy-1d0)
if (iopt.eq.1 .and.
& sumxx.gt.0.0d0 .and. sumyy.gt.0.0d0) then
xvar = (sumxx-((sumx*sumx)/(nxy)) )/(nxy-1.)
yvar = (sumyy-((sumy*sumy)/(nxy)) )/(nxy-1.)
cxy(i,j) = cxy(i,j)/(sqrt(xvar)*sqrt(yvar))
end if
end if
end do
end do
return
end
|
{"hexsha": "0b367a2b93c245f089035ed2108ee24931ff3a79", "size": 1841, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ni/src/lib/nfpfort/covcorm_xy_matrix_ncl.f", "max_stars_repo_name": "tenomoto/ncl", "max_stars_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 210, "max_stars_repo_stars_event_min_datetime": "2016-11-24T09:05:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T19:15:32.000Z", "max_issues_repo_path": "ni/src/lib/nfpfort/covcorm_xy_matrix_ncl.f", "max_issues_repo_name": "tenomoto/ncl", "max_issues_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 156, "max_issues_repo_issues_event_min_datetime": "2017-09-22T09:56:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T07:02:21.000Z", "max_forks_repo_path": "ni/src/lib/nfpfort/covcorm_xy_matrix_ncl.f", "max_forks_repo_name": "tenomoto/ncl", "max_forks_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 58, "max_forks_repo_forks_event_min_datetime": "2016-12-14T00:15:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T09:13:00.000Z", "avg_line_length": 31.7413793103, "max_line_length": 71, "alphanum_fraction": 0.3775122216, "num_tokens": 571}
|
"""
pretty(doc)
Pretty print the parsed HTML `doc`.
"""
function pretty(doc)
io = IOBuffer()
print(io, doc; pretty=true)
return String(take!(io))
end
function map!(f::Function, doc::HTMLDocument)
for elem in PreOrderDFS(doc.root)
if elem isa HTMLElement
# Changing elem directly doesn't work, so we loop direct children.
children = elem.children
for i in 1:length(children)
elem.children[i] = f(elem.children[i])
end
end
# else (isa HTMLText) is handled by the fact that we loop direct children.
end
return doc
end
function clean(elem::HTMLElement{T}) where T
children = elem.children
parent = elem.parent
attributes = Dict{AbstractString,AbstractString}()
return HTMLElement{T}(children, parent, attributes)
end
function remove_extra_href_info(entry::Pair)
url = last(entry)
symbol_loc = findfirst(r"\?|#", url)
if isnothing(symbol_loc)
return entry
else
symbol_loc_start = first(findfirst(r"\?|#", url))
stripped = url[1:symbol_loc_start] * "[...]"
new_entry::Pair = first(entry) => stripped
return new_entry
end
end
function remove_extra_href_info(dic::Dict)::Dict
pairs = [remove_extra_href_info(entry) for entry in dic]
return Dict(pairs)
end
function clean(elem::HTMLElement{:a})
children = elem.children
parent = elem.parent
attributes = filter(entry -> first(entry) == "href", elem.attributes)
updated_attributes = remove_extra_href_info(attributes)
return HTMLElement{:a}(children, parent, updated_attributes)
end
function clean(elem::T) where T<:Union{HTMLElement{:style},HTMLElement{:script}}
children = []
parent = elem.parent
attributes = Dict{AbstractString,AbstractString}()
return T(children, parent, attributes)
end
function contains_title_description(entry)
text = string(entry)::String
return contains(text, r"title|description")
end
function clean(elem::HTMLElement{:meta})
children = elem.children
parent = elem.parent
A = elem.attributes
K = keys(A)
if !any(contains_title_description(A))
A = Dict{AbstractString,AbstractString}()
end
return HTMLElement{:meta}(children, parent, A)
end
function clean_tree(elem::HTMLElement)
return clean(elem)
end
function clean_tree(elem::HTMLText)
return elem
end
"""
body(content::String)
Return only the body from the HTML `content`.
"""
function body(content::String)
doc = parsehtml(content)
for elem in PreOrderDFS(doc.root)
if elem isa HTMLElement && tag(elem) == :body
return string(elem)::String
end
end
@warn "Couldn't find body in content:\n$content"
return content
end
"""
clean(content::String)
Return only the parts of the HTML which are visible in the rendered page.
This assumes that a page has changed when a reader can see a change, which seems like a reasonable assumption.
Note that this assumption may be violated when a elements are updated on the fly via Javascript.
"""
function clean(content::String)
doc = parsehtml(content)
map!(clean_tree, doc)
text = pretty(doc)
return """
<!-- This HTML document was cleaned up (simplified) by `Skans.clean`. -->
$text
"""
end
|
{"hexsha": "3beb1f72d3f705acbc0593824c089cc4e99d1d34", "size": 3347, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/select.jl", "max_stars_repo_name": "rikhuijzer/Skann.jl", "max_stars_repo_head_hexsha": "3e5cb898b5ab360fd7982aeffe794d62b09c575f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-12-12T22:50:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T05:26:59.000Z", "max_issues_repo_path": "src/select.jl", "max_issues_repo_name": "rikhuijzer/Skans.jl", "max_issues_repo_head_hexsha": "3e5cb898b5ab360fd7982aeffe794d62b09c575f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-12-12T12:24:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T16:28:48.000Z", "max_forks_repo_path": "src/select.jl", "max_forks_repo_name": "rikhuijzer/Skan.jl", "max_forks_repo_head_hexsha": "3e5cb898b5ab360fd7982aeffe794d62b09c575f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-01-05T18:15:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T19:52:52.000Z", "avg_line_length": 27.4344262295, "max_line_length": 110, "alphanum_fraction": 0.6731401255, "num_tokens": 764}
|
import os
import numpy as np
import cv2
from . import find_tools as ft
# noinspection PyArgumentList
hog = cv2.HOGDescriptor((32, 64), (16, 16), (8, 8), (8, 8), 9, 1, 4, 0, 0.2, 0, 64)
def find_right_lung_hog(image):
hog.setSVMDetector(
np.loadtxt(os.path.dirname(__file__) + os.sep + "right_lung_hog.np", dtype=np.float32))
found, w = hog.detectMultiScale(image)
right_lung_rectangle = ft.find_max_rectangle(found)
return right_lung_rectangle
def find_left_lung_hog(image):
hog.setSVMDetector(np.loadtxt(os.path.dirname(__file__) + os.sep + "left_lung_hog.np", dtype=np.float32))
found, w = hog.detectMultiScale(image)
left_lung_rectangle = ft.find_max_rectangle(found)
return left_lung_rectangle
|
{"hexsha": "314742ef9a8fe02b44e26dfa8b3383f9be9f1af8", "size": 745, "ext": "py", "lang": "Python", "max_stars_repo_path": "lungs_finder/hog_finder.py", "max_stars_repo_name": "ggalal/lungs-finder", "max_stars_repo_head_hexsha": "d31e20a88f1de3c9b62025dc2875b01f7806f4d9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2017-08-04T20:07:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-25T19:45:02.000Z", "max_issues_repo_path": "lungs_finder/hog_finder.py", "max_issues_repo_name": "smn568/lungs-finder", "max_issues_repo_head_hexsha": "49c29373e3d576475b99580fbe0a547dfd369c31", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-12-21T02:41:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-02T03:15:49.000Z", "max_forks_repo_path": "lungs_finder/hog_finder.py", "max_forks_repo_name": "smn568/lungs-finder", "max_forks_repo_head_hexsha": "49c29373e3d576475b99580fbe0a547dfd369c31", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2017-10-13T15:44:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-07T23:08:44.000Z", "avg_line_length": 29.8, "max_line_length": 109, "alphanum_fraction": 0.722147651, "include": true, "reason": "import numpy", "num_tokens": 227}
|
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory methods to create simulation models."""
import numpy as np
import collections
import itertools
from ..log import PCG_ROOT_LOGGER
from ..utils import is_string
from ..simulation import SimulationModel
from multiprocessing.pool import Pool
def _parse_factory_input_as_vector(var):
"""Parse a input argument for the factory methods and return
its elements as a vector.
> *Input arguments*
* `var` (*type:* `float`, `int`, `str`, `unicode`,
`list`, `numpy.ndarray`): Input variable
> *Returns*
List of variables as `list` or `numpy.array` if the inputs are numeric.
`None` if the type of `var` is not supported.
"""
if isinstance(
var,
float) or isinstance(
var,
int) or isinstance(
var,
np.int64) or isinstance(
var,
np.float64):
PCG_ROOT_LOGGER.info('Variable provided as scalar={}'.format(
var))
return np.array([var])
if isinstance(var, collections.Iterable) and not is_string(var):
PCG_ROOT_LOGGER.info('Variable provided as vector={}'.format(
var))
return np.array(var)
elif is_string(var):
PCG_ROOT_LOGGER.info('Variable provided as a inline command=' + var)
try:
vars = eval(var)
PCG_ROOT_LOGGER.info(
'Generated output, fcn={}, output={}'.format(
var, vars))
except Exception as ex:
PCG_ROOT_LOGGER.warning(
'Could not evaluate variable lambda function, returning string'
', input={}, message={}'.format(var, str(ex)))
return var
if isinstance(vars, float) or isinstance(vars, int):
PCG_ROOT_LOGGER.info('Variable provided as scalar={}'.format(
var))
return np.array([vars])
elif not isinstance(vars, collections.Iterable):
if callable(vars):
vars = vars()
else:
PCG_ROOT_LOGGER.error(
'No value returned after evaluating'
' lambda function, fcn={}'.format(var))
return None
else:
return vars
PCG_ROOT_LOGGER.info(
'Variable provided as lambda function={}'.format(var))
elif callable(var):
return var()
else:
return None
def box(size, mass=0, name='box', pose=[0, 0, 0, 0, 0, 0], color=None,
visual_parameters=dict(), collision_parameters=dict()):
"""Factory method that returns a box-shaped model with one cuboid link.
> *Input arguments*
* `size` (*type:* `list` or `numpy.ndarray`): 3D vector with the
size of the box for width, length and height, respectively, in meters.
* `mass` (*type:* `float`, *default:* `0`): Mass of the model. If the
mass is not greater than zero, the model is set as static.
* `name` (*type:* `str`, *default:* `'box'`): Name of the model.
* `pose` (*type:* `list` or `numpy.array`, *default:*
`[0, 0, 0, 0, 0, 0]`): Origin of the model.
* `color` (*type:* `str` or `list`, *default:* `None`):
Color of the model. It can be provided as a
RGBA vector, `xkcd` for a random [XKCD color](https://xkcd.com/color/rgb/)
or a specific `xkcd` color name, and/or `random` for a random RGBA color.
> *Returns*
A box-shaped `pcg_gazebo.simulation.SimulationModel` instance.
"""
input_mass = float(_parse_factory_input_as_vector(mass)[0])
input_size = _parse_factory_input_as_vector(size).tolist()
input_pose = _parse_factory_input_as_vector(pose).tolist()
input_col_params = collision_parameters.copy()
for tag in collision_parameters:
if tag == 'fdir1':
input_col_params[tag] = _parse_factory_input_as_vector(
collision_parameters[tag]).tolist()
else:
input_col_params[tag] = float(
_parse_factory_input_as_vector(
collision_parameters[tag])[0])
model = SimulationModel(name=name)
model.add_cuboid_link(
link_name='link',
mass=input_mass,
size=input_size,
color=color,
visual_parameters=visual_parameters,
collision_parameters=input_col_params)
if input_mass <= 0:
model.static = True
model.pose = input_pose
return model
def mesh(visual_mesh, collision_mesh=None,
use_approximated_collision=False, approximated_collision_model='box',
visual_mesh_scale=[1, 1, 1], collision_mesh_scale=[1, 1, 1],
name='mesh', pose=[0, 0, 0, 0, 0, 0], color=None, mass=0,
inertia=None, use_approximated_inertia=True,
approximated_inertia_model='box', visual_parameters=dict(),
collision_parameters=dict()):
"""Create a model based on a mesh input. The options for visual and
collision meshes are:
* `visual_mesh` is provided and no `collision_mesh`.
The collision mesh is then set to be the same as the visual mesh.
* Both `visual_mesh` and `collision_mesh` are provided
separately.
* `visual_mesh` is provided and no `collision_mesh`, but
`use_approximated_collision` is `True`. In this case the collision geometry
can be an approximated geometry fitted to the visual mesh. Options for
the approximation methods are `box`, `sphere` or `cylinder`.
The same is valid for the moments of inertia. For static models, `mass`
can be set as 0. Otherwise, the following options are possible:
* `inertia` provided as `inertia=dict(ixx=0, iyy=0, izz=0, ixy=0, ixz=0,
iyz=0)`
* Set `use_approximated_inertia` to `True` and the inertia model will be
computed for the model using the `approximated_inertia_model` input
(options are `box`, `sphere` or `cylinder`). In this case the
approximated geometry will be computed from the visual mesh and
its dimensions combined with the provided `mass` will be used
to generate the model's moments of inertia.
> *Input arguments*
* `visual_mesh` (*type:* `str` or `trimesh.Trimesh`): Name of the
visual mesh file or mesh structure
* `collision_mesh` (*type:* `str` or `trimesh.Trimesh`, *default:* `None`):
Name of the collision mesh file. If `None` is provided, then
the visual mesh file will be used as collision geometry
* `use_approximated_collision` (*type:* `bool`, *default:* `False`): Enable
computing an approximated collision geometry from the visual mesh.
* `approximated_collision_model` (*type:* `str`, *default:* `'box'`): Type
of approximated collision geometry to be derived from the visual
mesh. Options are `box`, `cylinder` or `sphere`.
* `visual_mesh_scale` (*type:* `list`, *default:* `[1, 1, 1]`):
Scaling vector to be applied to the visual mesh
* `collision_mesh_scale` (*type:* `list`, *default:*
`[1, 1, 1]`): Scaling vector to be applied to the collision mesh
* `name` (*type:* `str`, *default:* `'box'`): Name of the model.
* `pose` (*type:* `list` or `numpy.array`, *default:*
`[0, 0, 0, 0, 0, 0]`): Origin of the model.
* `color` (*type:* `str` or `list`, *default:* `None`): Color of
the model. It can be provided as a RGBA vector, `xkcd` for a random
[XKCD color](https://xkcd.com/color/rgb/)
or a specific `xkcd` color name, and/or `random` for a random RGBA color.
* `mass` (*type:* `float`, *default:* `0`): Mass of the model. If the
mass is not greater than zero, the model is set as static.
* `inertia` (*type:* `dict`, *default:* `None`): Optional moments
of inertia setting to the model in the form of
`inertia=dict(ixx=0, iyy=0, izz=0, ixy=0, ixz=0, iyz=0)`
* `use_approximated_inertia` (*type:* `bool`, *default:* `True`): Enable
computation of moments of inertia based on the `mass` input and
the approximated inertia model setting based on the dimensions
of the mesh.
* `approximated_inertia_model` (*type:* `str`, *default:* `box`): Type of
geometrical approximation to be computed from the visual
mesh. The dimensions of the approximated geometry will be
then used to compute the moments of inertia of the model.
Options are `box`, `cylinder` or `sphere`.
> *Returns*
A `pcg_gazebo.simulation.SimulationModel` instance.
"""
input_mass = float(_parse_factory_input_as_vector(mass)[0])
input_visual_mesh_scale = _parse_factory_input_as_vector(
visual_mesh_scale).tolist()
input_collision_mesh_scale = _parse_factory_input_as_vector(
collision_mesh_scale).tolist()
input_pose = _parse_factory_input_as_vector(pose).tolist()
input_col_params = collision_parameters.copy()
for tag in collision_parameters:
if tag == 'fdir1':
input_col_params[tag] = _parse_factory_input_as_vector(
collision_parameters[tag]).tolist()
else:
input_col_params[tag] = float(
_parse_factory_input_as_vector(
collision_parameters[tag])[0])
model = SimulationModel(name=name)
model.add_link(
visual_mesh=visual_mesh,
collision_mesh=collision_mesh,
use_approximated_collision=use_approximated_collision,
approximated_collision_model=approximated_collision_model,
visual_mesh_scale=input_visual_mesh_scale,
collision_mesh_scale=input_collision_mesh_scale,
name=name,
color=color,
mass=input_mass,
inertia=inertia,
use_approximated_inertia=use_approximated_inertia,
approximated_inertia_model=approximated_inertia_model)
model.pose = input_pose
if mass <= 0:
model.static = True
return model
def sphere(radius, mass=0, name='sphere', pose=[0, 0, 0, 0, 0, 0],
color=None, visual_parameters=dict(), collision_parameters=dict()):
"""Return a sphere-shaped simulation model.
> *Input arguments*
* `radius` (*type:* `float`): Radius of the sphere.
* `mass` (*type:* `float`, *default:* `0`): Mass of the model. If the
mass is not greater than zero, the model is set as static.
* `name` (*type:* `str`, *default:* `'box'`): Name of the model.
* `pose` (*type:* `list` or `numpy.array`, *default:*
`[0, 0, 0, 0, 0, 0]`): Origin of the model.
* `color` (*type:* `str` or `list`, *default:* `None`): Color
of the model. It can be provided as a RGBA vector, `xkcd` for
a random [XKCD color](https://xkcd.com/color/rgb/) or a specific `xkcd`
color name, and/or `random` for a random RGBA color.
> *Returns*
A sphere-shaped `pcg_gazebo.simulation.SimulationModel` instance.
"""
input_mass = float(_parse_factory_input_as_vector(mass)[0])
input_radius = float(_parse_factory_input_as_vector(radius)[0])
input_pose = _parse_factory_input_as_vector(pose).tolist()
input_col_params = collision_parameters.copy()
for tag in collision_parameters:
if tag == 'fdir1':
input_col_params[tag] = _parse_factory_input_as_vector(
collision_parameters[tag]).tolist()
else:
input_col_params[tag] = float(
_parse_factory_input_as_vector(
collision_parameters[tag])[0])
model = SimulationModel(name=name)
model.add_spherical_link(
link_name='link',
mass=input_mass,
radius=input_radius,
color=color,
visual_parameters=visual_parameters,
collision_parameters=input_col_params)
if input_mass <= 0:
model.static = True
model.pose = input_pose
return model
def cylinder(
length,
radius,
mass=0,
name='cylinder',
pose=[0, 0, 0, 0, 0, 0],
color=None,
visual_parameters=dict(),
collision_parameters=dict()):
"""Return a cylinder-shaped simulation model with the rotation axis
set per default as `[0, 0, 1]`.
> *Input arguments*
* `radius` (*type:* `float`): Radius of the cylinder.
* `length` (*type:* `float`): Length of the cylinder.
* `mass` (*type:* `float`, *default:* `0`): Mass of the model. If the
mass is not greater than zero, the model is set as static.
* `name` (*type:* `str`, *default:* `'box'`): Name of the model.
* `pose` (*type:* `list` or `numpy.array`, *default:*
`[0, 0, 0, 0, 0, 0]`): Origin of the model.
* `color` (*type:* `str` or `list`, *default:* `None`):
Color of the model. It can be provided as a
RGBA vector, `xkcd` for a random [XKCD color](https://xkcd.com/color/rgb/)
or a specific `xkcd` color name, and/or `random` for a random RGBA color.
> *Returns*
A cylinder-shaped `pcg_gazebo.simulation.SimulationModel` instance.
"""
input_mass = float(_parse_factory_input_as_vector(mass)[0])
input_length = float(_parse_factory_input_as_vector(length)[0])
input_radius = float(_parse_factory_input_as_vector(radius)[0])
input_pose = _parse_factory_input_as_vector(pose).tolist()
input_col_params = collision_parameters.copy()
for tag in collision_parameters:
if tag == 'fdir1':
input_col_params[tag] = _parse_factory_input_as_vector(
collision_parameters[tag]).tolist()
else:
input_col_params[tag] = float(
_parse_factory_input_as_vector(
collision_parameters[tag])[0])
model = SimulationModel(name=name)
model.add_cylindrical_link(
link_name='link',
mass=input_mass,
radius=input_radius,
length=input_length,
color=color,
visual_parameters=visual_parameters,
collision_parameters=input_col_params)
if input_mass <= 0:
model.static = True
model.pose = input_pose
return model
def box_factory(size, mass=None, name='box', pose=[0, 0, 0, 0, 0, 0],
use_permutation=True, color=None):
"""Factory function for box-shaped models. It parses the vector `size`
to generate the boxes. The `mass` can be either a scalar or a vector.
If `mass` is a scalar, all boxes will have the same mass. If the size
of the vectors `size` and `mass` are the same, the boxes can be generated
by associating a `size` vector with a mass by position in the array or they
can be permutated. If the vectors `size` and `mass` have different lengths,
only permutation can be performed.
The `size` and `mass` inputs can also be provided as lambda functions
as `str`, such as:
```python
size="__import__('numpy').random.random((4, 3))"
mass="__import__('numpy').arange(1, 10, 4)"
```
> *Input arguments*
* `size` (*type:* `list` or lambda function as `str`): List of
3D size vectors
* `mass` (*type:* `float`, list of `float` or lambda function as `str`,
*default:* `None`): Mass of the boxes. If `mass` is `None`, all boxes
will be static models
* `pose` (*type:* `list` or `numpy.array`, *default:*
`[0, 0, 0, 0, 0, 0]`): Origin of the model.
* `use_permutation` (*type:* `bool`, *default:* `True`): Enable use of
permutation to associate the `size` elements with the `mass` inputs. If the
sizes of the `size` and `mass` have different sizes, permutation
will be used per default.
* `color` (*type:* `str` or `list`, *default:* `None`): Color of the model.
It can be provided as a RGBA vector, `xkcd` for a random
[XKCD color](https://xkcd.com/color/rgb/) or a specific `xkcd`
color name, and/or `random` for a random RGBA color.
> *Returns*
List of `pcg_gazebo.simulation.SimulationModel` instances.
"""
box_size = _parse_factory_input_as_vector(size)
if mass is not None:
box_mass = _parse_factory_input_as_vector(mass)
else:
box_mass = None
if box_size.shape[0] == 3 and len(box_size.shape) == 1:
box_size = box_size.reshape((1, 3))
if len(box_size.shape) != 2:
PCG_ROOT_LOGGER.error(
'Size of box shapes is invalid, provided={}'.format(
box_size.shape))
return list()
if box_size.shape[1] != 3:
PCG_ROOT_LOGGER.error(
'Size of box shapes is invalid, provided={}'.format(
box_size.shape))
return list()
models = list()
if mass is None:
for i in range(box_size.shape[0]):
PCG_ROOT_LOGGER.info(
'[box_factory] Not using permutation, '
'generating {} static models'.format(box_size.shape[0]))
box_name = '{}_{}'.format(name, i)
models.append(box(
size=box_size[i, :],
mass=0,
name=box_name,
pose=pose,
color=color))
elif not use_permutation:
if box_size.shape[0] == box_mass.shape[0]:
PCG_ROOT_LOGGER.info(
'[box_factory] Not using permutation, '
'generating {} dynamic models'.format(box_size.shape[0]))
for i in range(box_size.shape[0]):
box_name = '{}_{}'.format(name, i)
models.append(box(
size=box_size[i, :],
mass=box_mass[i],
name=box_name,
pose=pose,
color=color))
else:
PCG_ROOT_LOGGER.info(
'[box_factory] Since the number of masses and sizes'
' provided are different, using permutation and '
'generating {} models'.format(
box_size.shape[0] * box_mass.shape[0]))
if len(models) == 0:
PCG_ROOT_LOGGER.info(
'[box_factory] Using permutation, '
'generating {} dynamic models'.format(
box_size.shape[0] * box_mass.shape[0]))
model_counter = 0
for box_param in itertools.product(box_size, box_mass):
box_name = '{}_{}'.format(name, model_counter)
models.append(box(
size=box_param[0],
mass=box_param[1],
name=box_name,
pose=pose,
color=color))
model_counter += 1
return models
def sphere_factory(radius, mass=None, name='sphere', pose=[0, 0, 0, 0, 0, 0],
use_permutation=True, color=None):
"""Factory function for sphere-shaped models. It
parses the vector `radius` to generate the spheres.
The `mass` can be either a scalar or a vector.
If `mass` is a scalar, all spheres will have the
same mass. If the size of the vectors `radius` and
`mass` are the same, the spheres can be generated
by associating a `radius` value with a mass by position
in the array or they can be permutated. If the vectors
`radius` and `mass` have different lengths,
only permutation can be performed.
The `radius` and `mass` inputs can also be provided
as lambda functions as `str`, such as:
```python
radius="__import__('numpy').random.random(2)"
mass="__import__('numpy').arange(1, 4, 1)"
```
> *Input arguments*
* `radius` (*type:* `list` or lambda function as `str`):
List of radius values
* `mass` (*type:* `float`, list of `float` or lambda
function as `str`, *default:* `None`): Mass of the boxes.
If `mass` is `None`, all spheres will be static models
* `pose` (*type:* `list` or `numpy.array`, *default:*
`[0, 0, 0, 0, 0, 0]`): Origin of the model.
* `use_permutation` (*type:* `bool`, *default:* `True`):
Enable use of permutation to associate the `radius`
elements with the `mass` inputs. If the sizes of the
`radius` and `mass` have different sizes, permutation
will be used per default.
* `color` (*type:* `str` or `list`, *default:* `None`):
Color of the model. It can be provided as a RGBA vector,
`xkcd` for a random [XKCD color](https://xkcd.com/color/rgb/)
or a specific `xkcd` color name, and/or `random` for a
random RGBA color.
> *Returns*
List of `pcg_gazebo.simulation.SimulationModel` instances.
"""
sphere_radius = _parse_factory_input_as_vector(radius)
if mass is not None:
sphere_mass = _parse_factory_input_as_vector(mass)
else:
sphere_mass = None
PCG_ROOT_LOGGER.info(
'Generating spheres, radius={},'
' mass={}, use_permutation={}, name={}, pose={}'.format(
sphere_radius,
sphere_mass,
use_permutation,
name,
pose))
models = list()
if mass is None:
for i in range(sphere_radius.size):
sphere_name = '{}_{}'.format(name, i)
models.append(sphere(
radius=sphere_radius[i],
mass=0,
name=sphere_name,
pose=pose,
color=color))
elif not use_permutation:
if sphere_radius.shape == sphere_mass.shape:
for i in range(sphere_radius.size):
sphere_name = '{}_{}'.format(name, i)
models.append(sphere(
radius=sphere_radius[i],
mass=sphere_mass[i],
name=sphere_name,
pose=pose,
color=color))
if len(models) == 0:
model_counter = 0
for sphere_param in itertools.product(sphere_radius, sphere_mass):
sphere_name = '{}_{}'.format(name, model_counter)
models.append(sphere(
radius=sphere_param[0],
mass=sphere_param[1],
name=sphere_name,
pose=pose,
color=color))
model_counter += 1
return models
def cylinder_factory(
length,
radius,
mass=None,
name='cylinder',
pose=[0, 0, 0, 0, 0, 0],
use_permutation=True,
color=None):
"""Factory function for cylinder-shaped models. It parses the
vectors `radius` and `length` to generate the cylinders. The
`mass` can be either a scalar or a vector. If `mass` is a scalar,
all cylinders will have the same mass. If the size of the vectors
`length`, `radius` and `mass` are the same, the cylinders can be
generated by associating a `radius` and a `length` value with a
mass by position in the array or they can be permutated. If the
vectors `radius` and `length` have different lengths, only
permutation can be performed.
The `length`, `radius` and `mass` inputs can also be provided as lambda
functions as `str`, such as:
```python
length="__import__('numpy').random.random(2)"
radius="__import__('numpy').random.random(2)"
mass="__import__('numpy').arange(1, 4, 1)"
```
> *Input arguments*
* `radius` (*type:* `float`, list of `float` or lambda function as `str`):
List of radius values
* `length` (*type:* `float`, list of `float` or lambda function as `str`):
List of length values
* `mass` (*type:* `float`, list of `float` or lambda function as `str`,
*default:* `None`): Mass of the cylinders. If `mass` is `None`, all
cylinders will be static models
* `pose` (*type:* `list` or `numpy.array`, *default:*
`[0, 0, 0, 0, 0, 0]`): Origin of the model.
* `use_permutation` (*type:* `bool`, *default:* `True`): Enable use of
permutation to associate the `size` elements with the `mass` inputs. If the
sizes of the `length` and `radius` have different sizes, permutation will
be used per default.
* `color` (*type:* `str` or `list`, *default:* `None`): Color of the model.
It can be provided as a RGBA vector, `xkcd` for a random
[XKCD color](https://xkcd.com/color/rgb/) or a specific `xkcd`
color name, and/or `random` for a random RGBA color.
> *Returns*
List of `pcg_gazebo.simulation.SimulationModel` instances.
"""
cyl_lengths = _parse_factory_input_as_vector(length)
cyl_radius = _parse_factory_input_as_vector(radius)
if mass is not None:
cyl_mass = _parse_factory_input_as_vector(mass)
else:
cyl_mass = None
PCG_ROOT_LOGGER.info(
'Generating cylinders, length={}, radius={},'
' mass={}, use_permutation={}, name={}, pose={}'.format(
cyl_lengths, cyl_radius, cyl_mass, use_permutation,
name, pose))
models = list()
if not use_permutation:
if cyl_lengths.shape == cyl_radius.shape:
for i in range(cyl_lengths.size):
cyl_name = '{}_{}'.format(name, i)
if mass is None:
models.append(cylinder(
cyl_lengths[i],
cyl_radius[i],
name=cyl_name,
pose=pose,
color=color))
elif cyl_mass.size == 1:
models.append(cylinder(
cyl_lengths[i],
cyl_radius[i],
cyl_mass[0],
name=cyl_name,
pose=pose,
color=color))
elif cyl_mass.shape == cyl_lengths.shape:
models.append(cylinder(
cyl_lengths[i],
cyl_radius[i],
cyl_mass[i],
name=cyl_name,
pose=pose,
color=color))
if len(models) == 0:
model_counter = 0
if mass is not None:
for cyl_params in itertools.product(
cyl_lengths, cyl_radius, cyl_mass):
cyl_name = '{}_{}'.format(name, model_counter)
models.append(
cylinder(
length=cyl_params[0],
radius=cyl_params[1],
mass=cyl_params[2],
name=cyl_name,
pose=pose,
color=color))
model_counter += 1
else:
for cyl_params in itertools.product(cyl_lengths, cyl_radius):
cyl_name = '{}_{}'.format(name, model_counter)
models.append(
cylinder(
length=cyl_params[0],
radius=cyl_params[1],
name=cyl_name,
pose=pose,
color=color))
model_counter += 1
return models
def extrude(
polygon,
height,
thickness=0,
cap_style='round',
join_style='round',
extrude_boundaries=False,
name='mesh',
pose=[0, 0, 0, 0, 0, 0],
color=None,
mass=0,
inertia=None,
use_approximated_inertia=True,
approximated_inertia_model='box',
visual_parameters=dict(),
collision_parameters=dict()):
from .mesh import extrude
generated_mesh = extrude(
polygon=polygon,
height=height,
thickness=thickness,
cap_style=cap_style,
join_style=join_style,
extrude_boundaries=extrude_boundaries)
model = mesh(
visual_mesh=generated_mesh,
name=name,
pose=pose,
color=color,
mass=mass,
inertia=inertia,
use_approximated_inertia=use_approximated_inertia,
approximated_inertia_model=approximated_inertia_model,
visual_parameters=visual_parameters,
collision_parameters=collision_parameters
)
return model
def room(
polygon,
wall_height=2,
wall_thickness=0.1,
cap_style='square',
join_style='mitre',
add_floor=False,
floor_thickness=0.01,
name='room',
pose=[0, 0, 0, 0, 0, 0],
color=None,
mass=0,
separate_models=False,
visual_parameters=dict(),
collision_parameters=dict()):
from .mesh import extrude
from shapely.geometry import Point, MultiPoint, \
LineString, MultiLineString
assert not isinstance(polygon, Point), \
'A room cannot be created from a point'
models = list()
if isinstance(polygon, MultiPoint):
PCG_ROOT_LOGGER.warning(
'The input for room construction is a MultiPoint'
' object, using the convex hull')
input_poly = polygon.convex_hull
else:
input_poly = polygon
if isinstance(input_poly, LineString) or \
isinstance(input_poly, MultiLineString):
wall_mesh = extrude(
input_poly,
height=wall_height,
thickness=wall_thickness,
cap_style='flat',
join_style='mitre',
extrude_boundaries=False)
else:
outer_wall = input_poly.buffer(
wall_thickness / 2.,
cap_style=3,
join_style=2)
inner_wall = input_poly.buffer(
-wall_thickness / 2.,
cap_style=3,
join_style=2)
wall_poly = outer_wall.difference(inner_wall)
wall_mesh = extrude(
wall_poly,
height=wall_height,
extrude_boundaries=False)
model = SimulationModel(
name=name if not separate_models else name +
'_walls')
model.add_link(
visual_mesh=wall_mesh,
collision_mesh=wall_mesh,
use_approximated_collision=False,
approximated_collision_model=False,
visual_mesh_scale=[1, 1, 1],
collision_mesh_scale=[1, 1, 1],
name='walls',
color=color,
mass=0,
inertia=None,
use_approximated_inertia=False,
pose=[0, 0, wall_height / 2., 0, 0, 0])
model.pose = pose
model.static = True
models.append(model)
if add_floor:
if isinstance(input_poly, LineString) or \
isinstance(input_poly, MultiLineString):
floor_poly = input_poly.convex_hull
else:
floor_poly = input_poly
floor_poly = floor_poly.buffer(
wall_thickness / 2.,
cap_style=3,
join_style=2)
floor_mesh = extrude(
floor_poly,
height=floor_thickness)
floor_link_parameters = dict(
visual_mesh=floor_mesh,
collision_mesh=floor_mesh,
use_approximated_collision=False,
approximated_collision_model=False,
visual_mesh_scale=[1, 1, 1],
collision_mesh_scale=[1, 1, 1],
name='floor',
color=color,
mass=0,
inertia=None,
use_approximated_inertia=False,
pose=[0, 0, -floor_thickness / 2., 0, 0, 0])
if separate_models:
floor_model = SimulationModel(name=name + '_ground_plane')
floor_model.add_link(**floor_link_parameters)
floor_model.static = True
models.append(floor_model)
else:
models[0].add_link(**floor_link_parameters)
return models
def hinged_door(door_mesh_filename=None, width=0.6,
thickness=0.04, height=2.0, mass=10,
set_origin_to_ground=True, fix_to_world=True,
hand_convention='LH', max_opening_angle=np.pi / 2,
name='door', frame_mesh_filename=None,
with_frame=True, frame_width=0.05,
frame_height=0.05, frame_depth=0.05):
from ..simulation.components import HingedDoor
door = HingedDoor(
door_mesh_filename=door_mesh_filename,
width=width,
thickness=thickness,
height=height,
mass=mass,
set_origin_to_ground=set_origin_to_ground,
fix_to_world=fix_to_world,
hand_convention=hand_convention,
max_opening_angle=max_opening_angle,
name=name,
frame_mesh_filename=frame_mesh_filename,
with_frame=with_frame,
frame_width=frame_width,
frame_height=frame_height,
frame_depth=frame_depth
)
return door
def config2models(config):
"""Parse the input `dict` configuration and calls the respective
model factory.
> *Input arguments*
* `config` (*type:* `dict`): Dictionary with the model generation
rules
> *Returns*
List of `pcg_gazebo.simulation.SimulationModel` instances.
"""
models = list()
if config['type'] == 'box':
models.append(box(**config['args']))
elif config['type'] == 'sphere':
models.append(sphere(**config['args']))
elif config['type'] == 'cylinder':
models.append(cylinder(**config['args']))
elif config['type'] == 'cylinder_factory':
models = models + cylinder_factory(**config['args'])
elif config['type'] == 'sphere_factory':
models = models + sphere_factory(**config['args'])
elif config['type'] == 'box_factory':
models = models + box_factory(**config['args'])
elif config['type'] == 'mesh':
models.append(mesh(**config['args']))
elif config['type'] == 'extrude':
models.append(extrude(**config['args']))
elif config['type'] == 'hinged_door':
models.append(hinged_door(**config['args']))
return [model.to_sdf() for model in models]
def create_models_from_config(config, n_processes=None):
"""Creation of models from a `dict` configuration input using
multi-processing.
> *Input arguments*
* `config` (*type:* `dict`): Dictionary with the model generation
rules
* `n_processes` (*type:* `int`, *default:* `None`): Maximum number of
processes. If `None`, then use the number of CPUs available.
> *Returns*
List of `pcg_gazebo.simulation.SimulationModel` instances.
"""
if not isinstance(config, list):
PCG_ROOT_LOGGER.info('Input is not a list of configurations')
return None
results = list()
if n_processes is not None:
pool = Pool(n_processes)
output = pool.map(config2models, config)
for item in output:
results = results + item
else:
for c in config:
results = results + config2models(c)
generated_models = list()
for sdf in results:
generated_models.append(SimulationModel.from_sdf(sdf))
return generated_models
|
{"hexsha": "139f048cc9a43f5f55994932c3564dbf92494c5e", "size": 35077, "ext": "py", "lang": "Python", "max_stars_repo_path": "pcg_gazebo/generators/creators.py", "max_stars_repo_name": "TForce1/pcg_gazebo", "max_stars_repo_head_hexsha": "9ff88016b7b6903236484958ca7c6ed9f8ffb346", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2020-02-04T18:16:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T11:36:34.000Z", "max_issues_repo_path": "pcg_gazebo/generators/creators.py", "max_issues_repo_name": "awesomebytes/pcg_gazebo", "max_issues_repo_head_hexsha": "4f335dd460ef7c771f1df78b46a92fad4a62cedc", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 75, "max_issues_repo_issues_event_min_datetime": "2020-01-23T13:40:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T07:26:01.000Z", "max_forks_repo_path": "pcg_gazebo/generators/creators.py", "max_forks_repo_name": "GimpelZhang/gazebo_world_generator", "max_forks_repo_head_hexsha": "eb7215499d0ddc972d804c988fadab1969579b1b", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2020-09-10T06:35:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-20T19:08:17.000Z", "avg_line_length": 36.5385416667, "max_line_length": 79, "alphanum_fraction": 0.604926305, "include": true, "reason": "import numpy", "num_tokens": 8393}
|
import itertools
import numpy as np
from game import Grid, Game
from config import *
config = Base()
def get_grid(tiles, directions):
g = Grid(config.SIZE)
g.tiles = tiles.copy()
for direction in directions:
g.run(direction)
g.add_random_tile()
return g.tiles
def printf(tiles):
for row in tiles:
for i in row:
print("{:^6}".format(i), end='')
print()
def my_log2(z):
if z == 0:
return 0
else:
return z
# return np.math.log2(z)
class Ai:
def __init__(self):
self.g = Grid(config.SIZE)
def get_next(self, tiles):
score_list = []
tn = self.get_tile_num(tiles)
if tn >= self.g.size ** 2 / 3:
return "RD"[np.random.randint(0, 2)], 0
kn = min(max(tn ** 2, 20), 40)
for directions in itertools.product("ULRD", repeat=3):
fen = []
for i in range(kn):
t_g = get_grid(tiles, directions)
fen.append(self.get_score(t_g))
print(directions, min(fen))
score_list.append([directions, min(fen)])
score_list = sorted(score_list, key=(lambda x: [x[1]]))
# print(score_list)
for d in score_list[::-1]:
self.g.tiles = tiles.copy()
if self.g.run(d[0][0], is_fake=False) != 0:
return d[0][0], d[1] / kn
self.g.tiles = tiles.copy()
# print('===',score_list[-1][0][0])
return score_list[-1][0][0], score_list[-1][1] / kn
def get_score(self, tiles):
# 格子数量(越少越好) 金角银边()
# bjs = [self.get_bj2(tiles)[i] * 2.8 + self.get_bj(tiles)[i] for i in range(4)]
# return max(bjs)
a = self.get_bj2__4(tiles)
b = self.get_bj__4(tiles)
print(a, b)
return a * 2.8 + b
def debug(self, tiles):
print('\n=======开始判断========')
print('移动前棋盘:')
printf(tiles)
score_list = []
for directions in itertools.product("ULRD", repeat=2):
t_g = get_grid(tiles, directions)
fen = self.get_score(t_g)
score_list.append([directions, fen])
print('==={}=={}=='.format(directions, fen))
printf(t_g)
score_list = sorted(score_list, key=(lambda x: [x[1]]))
# print(score_list)
for d in score_list[::-1]:
# print('-->',d)
self.g.tiles = tiles.copy()
# print(self.g.run(d[0][0],is_fake=True))
if self.g.run(d[0][0], is_fake=True) != 0:
# print('---异动前:')
# print(self.g.tiles)
# print('---异动后:')
self.g.run(d[0][0])
# print(self.g.tiles)
return d[0][0]
# print('===',score_list[-1][0][0])
return score_list[-1][0][0]
# 空格子数量
def get_tile_num(self, tiles):
# l = len(tiles)
n = 0
for row in tiles:
for i in row:
if i == 0:
n += 1
return n
# return np.bincount(tiles)[0]
def get_bj(self, tiles):
gjs = [
self.get_bj__1(tiles),
self.get_bj__2(tiles),
self.get_bj__3(tiles),
self.get_bj__4(tiles)
]
return gjs
def get_bj__4(self, tiles):
bj = 0
l = len(tiles)
size = self.g.size - 1
for y in range(l):
for x in range(l):
z = tiles[y][x]
if z != 0:
z_log = z - 2
bj += z_log * (x + y - (size * 2 - 1))
else:
bj += (100 - 20 * (x + y - (size * 2 - 1)))
# print(z, "-- ", bj)
return bj
def get_bj__3(self, tiles):
bj = 0
l = len(tiles)
size = self.g.size - 1
for y in range(l):
for x in range(l):
z = tiles[y][x]
if z != 0:
z_log = z - 2
bj += z_log * ((size - x) + y - (size * 2 - 1))
else:
bj += (100 - 20 * ((size - x) + y - (size * 2 - 1)))
return bj
def get_bj__2(self, tiles):
bj = 0
l = len(tiles)
size = self.g.size - 1
for y in range(l):
for x in range(l):
z = tiles[y][x]
if z != 0:
z_log = z - 2
bj += z_log * ((size - x) + (size - y) - (size * 2 - 1))
else:
bj += (100 - 20 * ((size - x) + (size - y) - (size * 2 - 1)))
return bj
def get_bj__1(self, tiles):
bj = 0
l = len(tiles)
size = self.g.size - 1
for y in range(l):
for x in range(l):
z = tiles[y][x]
if z != 0:
z_log = z - 2
bj += z_log * (x + (size - y) - (size * 2 - 1))
else:
bj += (100 - 20 * (x + (size - y) - (size * 2 - 1)))
return bj
def get_bj2(self, tiles):
gjs = [
self.get_bj2__1(tiles),
self.get_bj2__2(tiles),
self.get_bj2__3(tiles),
self.get_bj2__4(tiles)
]
return gjs
def get_bj2__1(self, tiles):
bj = 0
l = len(tiles)
for y in range(0, l - 1, 1):
for x in range(l - 1, 0, -1):
z = tiles[y][x]
if tiles[y][x] < tiles[y][x - 1]:
bj -= abs(my_log2(tiles[y][x - 1]) - z)
if tiles[y][x] < tiles[y + 1][x]:
bj -= abs(my_log2(tiles[y + 1][x]) - z)
if tiles[y][x] < tiles[y + 1][x - 1]:
bj -= abs(my_log2(tiles[y + 1][x - 1]) - z)
return bj
def get_bj2__2(self, tiles):
bj = 0
l = len(tiles)
for y in range(0, l - 1):
for x in range(0, l - 1):
z = tiles[y][x]
if tiles[y][x] < tiles[y][x + 1]:
bj -= abs(my_log2(tiles[y][x + 1]) - z)
if tiles[y][x] < tiles[y + 1][x]:
bj -= abs(my_log2(tiles[y + 1][x]) - z)
if tiles[y][x] < tiles[y + 1][x + 1]:
bj -= abs(my_log2(tiles[y + 1][x + 1]) - z)
return bj
def get_bj2__3(self, tiles):
bj = 0
l = len(tiles)
for y in range(l - 1, 0, -1):
for x in range(0, l - 1):
z = tiles[y][x]
if tiles[y][x] < tiles[y][x + 1]:
bj -= abs(my_log2(tiles[y][x + 1]) - z)
if tiles[y][x] < tiles[y - 1][x]:
bj -= abs(my_log2(tiles[y - 1][x]) - z)
if tiles[y][x] < tiles[y - 1][x + 1]:
bj -= abs(my_log2(tiles[y - 1][x + 1]) - z)
return bj
def get_bj2__4(self, tiles):
bj = 0
l = len(tiles)
for y in range(l - 1, 0, -1):
for x in range(l - 1, 0, -1):
z = tiles[y][x]
if z < tiles[y][x - 1]:
bj -= abs(my_log2(tiles[y][x - 1]) - z)
if z < tiles[y - 1][x]:
bj -= abs(my_log2(tiles[y - 1][x]) - z)
if z < tiles[y - 1][x - 1]:
bj -= abs(my_log2(tiles[y - 1][x - 1]) - z)
return bj
if __name__ == '__main__':
game = Game(4)
game.grid.tiles = np.array([
[0, 0, 0, 0],
[0, 32, 64, 128],
[256, 512, 1024, 1024],
[1024, 1024, 1024, 1024]
])
ai = Ai()
print(game.grid)
a = ai.get_next(game.grid.tiles)
print(a)
game.run(a[0])
print(game.grid)
|
{"hexsha": "7b85e5dd64f8a2f31f942c379355d398b6597db4", "size": 7745, "ext": "py", "lang": "Python", "max_stars_repo_path": "01 VacantHusky-2048GameAutoMovePython/2048python/ai.py", "max_stars_repo_name": "Guleixibian2009/Game-Collection", "max_stars_repo_head_hexsha": "1d8997b3ab0ea38958ed67dab7132bc89d467644", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-19T05:37:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T05:37:19.000Z", "max_issues_repo_path": "01 VacantHusky-2048GameAutoMovePython/2048python/ai.py", "max_issues_repo_name": "Guleixibian2009/Game-Collection", "max_issues_repo_head_hexsha": "1d8997b3ab0ea38958ed67dab7132bc89d467644", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "01 VacantHusky-2048GameAutoMovePython/2048python/ai.py", "max_forks_repo_name": "Guleixibian2009/Game-Collection", "max_forks_repo_head_hexsha": "1d8997b3ab0ea38958ed67dab7132bc89d467644", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4921259843, "max_line_length": 88, "alphanum_fraction": 0.4156229826, "include": true, "reason": "import numpy", "num_tokens": 2334}
|
import numpy as np
import time
from collections import Counter
class Vocabulary(object):
UNK = '<unk>'
def __init__(self, offset=0, unk=True):
self.word_to_ind = {}
self.ind_to_word = {}
self.word_count = Counter()
self.size = 0
self.offset = offset
self.special_words = set()
if unk:
self.add_word(self.UNK, special=True)
self.finished = False
def __len__(self):
return self.size
def add_words(self, words, special=False):
for w in words:
self.add_word(w, special)
def has(self, word):
return word in self.word_to_ind
def add_word(self, word, special=False):
self.word_count[word] += 1
if special:
self.special_words.add(word)
def finish(self, freq_threshold=0, size_threshold=None):
if freq_threshold > 0:
for word, count in self.word_count.items():
if count < freq_threshold:
del self.word_count[word]
self.ind_to_word = [w for w, c in self.word_count.most_common(size_threshold)]
self.word_to_ind = {w: i for i, w in enumerate(self.ind_to_word)}
# Make sure special words are included
n = len(self.ind_to_word)
for w in self.special_words:
if w not in self.word_to_ind:
self.ind_to_word.append(w)
self.word_to_ind[w] = n
n += 1
self.size = len(self.ind_to_word)
self.finished = True
def to_ind(self, word):
if word in self.word_to_ind:
return self.word_to_ind[word]
else:
# NOTE: if UNK is not enabled, it will throw an exception
if self.UNK in self.word_to_ind:
return self.word_to_ind[self.UNK]
else:
raise KeyError(str(word))
def to_word(self, ind):
return self.ind_to_word[ind]
def dump(self):
for i, w in enumerate(self.ind_to_word):
print ('{:<8}{:<}'.format(i, w))
if i > 100:
break
def load_embeddings(self, wordvec_file, dim):
print ('Loading pretrained word vectors:', wordvec_file)
start_time = time.time()
embeddings = np.random.uniform(-1., 1., [self.size, dim])
num_exist = 0
with open(wordvec_file, 'r') as f:
for line in f:
ss = line.split()
word = ss[0]
if word in self.word_to_ind:
num_exist += 1
vec = np.array([float(x) for x in ss[1:]])
embeddings[self.word_to_ind[word]] = vec
print ('[%d s]' % (time.time() - start_time))
print ('%d pretrained' % num_exist)
return embeddings
|
{"hexsha": "b11d7392633e1034d8a976c1b27f00a171ae588d", "size": 2811, "ext": "py", "lang": "Python", "max_stars_repo_path": "cocoa_folder/cocoa/model/vocab.py", "max_stars_repo_name": "s-akanksha/DialoGraph_ICLR21", "max_stars_repo_head_hexsha": "d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-03-17T05:15:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T06:09:21.000Z", "max_issues_repo_path": "cocoa_folder/cocoa/model/vocab.py", "max_issues_repo_name": "s-akanksha/DialoGraph_ICLR21", "max_issues_repo_head_hexsha": "d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-05-25T07:28:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T01:54:43.000Z", "max_forks_repo_path": "cocoa_folder/cocoa/model/vocab.py", "max_forks_repo_name": "s-akanksha/DialoGraph_ICLR21", "max_forks_repo_head_hexsha": "d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-10-11T03:39:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-01T23:58:50.000Z", "avg_line_length": 30.8901098901, "max_line_length": 86, "alphanum_fraction": 0.552828175, "include": true, "reason": "import numpy", "num_tokens": 647}
|
/**
* Copyright (C) 2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the GNU Affero General Public License in all respects for
* all of the code used other than as permitted herein. If you modify file(s)
* with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
#include "mongo/pch.h"
#include <boost/thread.hpp>
#include <string>
#include <vector>
#include "mongo/bson/oid.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/commands.h"
#include "mongo/db/pubsub.h"
#include "mongo/db/pubsub_sendsock.h"
namespace mongo {
namespace {
// constants for field names
const std::string kSubscriptionId = "subscriptionId";
const std::string kPublishField = "publish";
const std::string kMessageField = "message";
const std::string kSubscribeField = "subscribe";
const std::string kFilterField = "filter";
const std::string kProjectionField = "projection";
const std::string kPollField = "poll";
const std::string kTimeoutField = "timeout";
const std::string kMillisPolledField = "millisPolled";
const std::string kPollAgainField = "pollAgain";
const std::string kMessagesField = "messages";
const std::string kErrorField = "errors";
const std::string kUnsubscribeField = "unsubscribe";
// Helper method to validate single or array of SubscriptionId arguments
void validate(BSONElement& element, std::set<OID>& oids) {
// ensure that the subscriptionId argument is a SubscriptionId or array
uassert(18543,
mongoutils::str::stream() << "The subscriptionId argument must be "
<< "an ObjectID or Array but was a "
<< typeName(element.type()),
element.type() == jstOID || element.type() == mongo::Array);
if (element.type() == jstOID) {
oids.insert(element.OID());
}
else {
std::vector<BSONElement> elements = element.Array();
for (std::vector<BSONElement>::iterator it = elements.begin();
it != elements.end();
it++) {
// ensure that each member of the array is a SubscriptionId
uassert(18544,
mongoutils::str::stream() << "Each subscriptionId in the "
<< "subscriptionId array must be an "
<< "ObjectID but found a "
<< typeName(it->type()),
it->type() == jstOID);
oids.insert(it->OID());
}
}
}
}
/**
* Command for publishing to self and other nodes in your replica set or cluster.
*
* Format:
* {
* publish: <string>, // name of channel to publish to.
* message: <Object> // the body of the message to publish. Can have any format desired.
* }
*/
class PublishCommand : public Command {
public:
PublishCommand() : Command("publish") {}
virtual bool slaveOk() const { return true; }
virtual bool slaveOverrideOk() const { return true; }
virtual bool isWriteCommandForConfigServer() const { return false; }
virtual LockType locktype() const { return NONE; }
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {
ActionSet actions;
// TODO: get a real action type
actions.addAction(ActionType::find);
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
virtual void help(stringstream &help) const {
help << "{ publish : <channel>, message : {} }";
}
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg,
BSONObjBuilder& result, bool fromRepl) {
uassert(18556, "PubSub is not enabled.", pubsubEnabled);
BSONElement channelElem = cmdObj[kPublishField];
// ensure that the channel is a string
uassert(18527,
mongoutils::str::stream() << "The channel passed to the publish "
<< "command must be a string but was a "
<< typeName(channelElem.type()),
channelElem.type() == mongo::String);
string channel = channelElem.String();
// $events channel is reserved for DB events
uassert(18555,
mongoutils::str::stream() << "The \"$events\" channel is reserved for"
<< "database event notifications.",
!StringData(channel).startsWith("$events"));
// ensure that message argument exists
uassert(18552,
mongoutils::str::stream() << "The publish command requires a message argument.",
cmdObj.hasField(kMessageField));
BSONElement messageElem = cmdObj[kMessageField];
// ensure that the message is a document
uassert(18528,
mongoutils::str::stream() << "The message for the publish command must be a "
<< "document but was a "
<< typeName(messageElem.type()),
messageElem.type() == mongo::Object);
BSONObj message = messageElem.Obj();
bool success = PubSubSendSocket::publish(channel, message);
uassert(18538, "Failed to publish message.", success);
return true;
}
} publishCmd;
/**
* Command for subscribing to messages on a given channel.
*
* Format:
* {
* subscribe: <string> // name of channel to subscribe to.
* }
*
* Return value:
* {
* subscriptionId: <ObjectId> // ID of subscription created
* }
*
*/
class SubscribeCommand : public Command {
public:
SubscribeCommand() : Command("subscribe") {}
virtual bool slaveOk() const { return true; }
virtual bool slaveOverrideOk() const { return true; }
virtual bool isWriteCommandForConfigServer() const { return false; }
virtual LockType locktype() const { return NONE; }
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {
ActionSet actions;
// TODO: get a real action type
actions.addAction(ActionType::find);
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
virtual void help(stringstream &help) const {
help << "{ subscribe : <channel>, filter : <BSONObj>, projection : <BSONObj> }";
}
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg,
BSONObjBuilder& result, bool fromRepl) {
uassert(18557, "PubSub is not enabled.", pubsubEnabled);
BSONElement channelElem = cmdObj[kSubscribeField];
// ensure that the channel is a string
uassert(18531, mongoutils::str::stream() << "The channel passed to the subscribe "
<< "command must be a string but was a "
<< typeName(channelElem.type()),
channelElem.type() == mongo::String);
string channel = channelElem.String();
// TODO: validate filter format (look at find command?)
BSONObj filter;
if (cmdObj.hasField(kFilterField)) {
BSONElement filterElem = cmdObj[kFilterField];
// ensure that the filter is a BSON object
uassert(18553, mongoutils::str::stream() << "The filter passed to the subscribe "
<< "command must be an object but was a "
<< typeName(filterElem.type()),
filterElem.type() == mongo::Object);
filter = filterElem.Obj();
}
// TODO: validate projection format
BSONObj projection;
if (cmdObj.hasField(kProjectionField)) {
BSONElement projectionElem = cmdObj[kProjectionField];
// ensure that the projection is a BSON object
uassert(18554, mongoutils::str::stream() << "The projection passed to the "
<< "subscribe command must be an object "
<< "but was a "
<< typeName(projectionElem.type()),
projectionElem.type() == mongo::Object);
projection = projectionElem.Obj();
}
// TODO: add secure access to this channel?
// perhaps return an <oid, key> pair?
OID oid = PubSub::subscribe(channel, filter, projection);
result.append(kSubscriptionId, oid);
return true;
}
} subscribeCmd;
/**
* Command for polling on a single or multiple subscriptions.
*
* Format:
* {
* subscriptionId: <ObjectId | Array>, // ID or IDs of subscriptions to poll on
* [timeout]: <Number> // number of milliseconds to wait if there are no new messages.
* }
*
* Return value:
* {
* messages: <Object>, // messages found. Always returned, even if empty. Has format:
* {
* subscriptionId: <Array>, // key is ID, value is array of message objects
* subscriptionId2: <Array>,
* ...
* }
* errors: <Object>, // returned if and only if any errors occurred. Has format:
* {
* subscriptionId: <string>, // key is ID of channel, value is error string
* subscriptionId2: <string>,
* ...
* }
* millisPolled: <Integer>, // number of milliseconds command waited before finding messages.
* [pollAgain]: <Bool> // returned as true only if poll gets no messages and times out.
* }
*/
class PollCommand : public Command {
public:
PollCommand() : Command("poll") {}
virtual bool slaveOk() const { return true; }
virtual bool slaveOverrideOk() const { return true; }
virtual bool isWriteCommandForConfigServer() const { return false; }
virtual LockType locktype() const { return NONE; }
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {
ActionSet actions;
// TODO: get a real action type
actions.addAction(ActionType::find);
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
virtual void help(stringstream &help) const {
help << "{ poll : <subscriptionId(s)>, timeout : <integer milliseconds> }";
}
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg,
BSONObjBuilder& result, bool fromRepl) {
uassert(18558, "PubSub is not enabled.", pubsubEnabled);
BSONElement oidElement = cmdObj[kPollField];
std::set<OID> oids;
validate(oidElement, oids);
// if no timeout is specified, default is to return from the poll without waiting
long timeout = 0L;
BSONElement timeoutElem = cmdObj[kTimeoutField];
if (!timeoutElem.eoo()) {
uassert(18535,
mongoutils::str::stream() << "The timeout argument must be an integer "
<< "but was a "
<< typeName(timeoutElem.type()),
timeoutElem.type() == NumberDouble ||
timeoutElem.type() == NumberLong ||
timeoutElem.type() == NumberInt);
if (timeoutElem.type() == NumberDouble) {
timeout = static_cast<long>(std::floor(timeoutElem.numberDouble()));
}
else if (timeoutElem.type() == NumberLong) {
timeout = timeoutElem.numberLong();
}
else {
timeout = timeoutElem.numberInt();
}
}
long long millisPolled = 0;
bool pollAgain = false;
std::map<SubscriptionId, std::string> errors;
std::priority_queue<SubscriptionMessage> messages = PubSub::poll(oids,
timeout,
millisPolled,
pollAgain,
errors);
// serialize messages into BSON
BSONObjBuilder messagesBuilder;
while (!messages.empty()) {
SubscriptionMessage sm = messages.top();
SubscriptionId currId = sm.subscriptionId;
BSONObjBuilder channelBuilder;
while (!messages.empty() && sm.subscriptionId == currId) {
std::string currChannel = sm.channel;
BSONArrayBuilder arrayBuilder;
while (sm.subscriptionId == currId && sm.channel == currChannel) {
arrayBuilder.append(sm.message);
messages.pop();
if (messages.empty())
break;
sm = messages.top();
}
channelBuilder.append(currChannel, arrayBuilder.arr());
}
messagesBuilder.append(currId.toString(), channelBuilder.obj());
}
result.append(kMessagesField, messagesBuilder.obj());
result.append(kMillisPolledField, millisPolled);
if (pollAgain)
result.append(kPollAgainField, true);
if (errors.size() > 0) {
BSONObjBuilder errorBuilder;
for (std::map<SubscriptionId, std::string>::iterator it = errors.begin();
it != errors.end();
it++) {
errorBuilder.append(it->first.toString(), it->second);
}
result.append(kErrorField, errorBuilder.obj());
}
return true;
}
} pollCmd;
/**
* Command for unsubscribing from a previously registered subscription.
*
* Format:
* {
* unsubscribe: <ObjectId | Array>, // ID(s) of channel(s) to unsubscribe from.
* }
*
* Return value:
* {
* [errors]: <Object> // if any subscriptions can't be found, returns Object with format:
* {
* subscriptionId: <string>, // key is subscription ID, value is error message
* subscriptionId2: <string>,
* ...
* }
* }
*/
class UnsubscribeCommand : public Command {
public:
UnsubscribeCommand() : Command("unsubscribe") {}
virtual bool slaveOk() const { return true; }
virtual bool slaveOverrideOk() const { return true; }
virtual bool isWriteCommandForConfigServer() const { return false; }
virtual LockType locktype() const { return NONE; }
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {
ActionSet actions;
// TODO: get a real action type
actions.addAction(ActionType::find);
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
virtual void help(stringstream &help) const {
help << "{ unsubscribe : <subscriptionId(s)> }";
}
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg,
BSONObjBuilder& result, bool fromRepl) {
uassert(18559, "PubSub is not enabled.", pubsubEnabled);
BSONElement oidElement = cmdObj[kUnsubscribeField];
std::set<OID> oids;
validate(oidElement, oids);
std::map<SubscriptionId, std::string> errors;
for (std::set<OID>::iterator it = oids.begin(); it != oids.end(); it++) {
OID oid = *it;
PubSub::unsubscribe(oid, errors);
}
if (errors.size() > 0) {
BSONObjBuilder errorBuilder;
for (std::map<SubscriptionId, std::string>::iterator it = errors.begin();
it != errors.end();
it++) {
errorBuilder.append(it->first.toString(), it->second);
}
result.append(kErrorField, errorBuilder.obj());
}
return true;
}
} unsubscribeCmd;
} // namespace mongo
|
{"hexsha": "5a5f9798299f445e66894af43d4228e9689e84ac", "size": 19322, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/mongo/db/commands/pubsub_commands.cpp", "max_stars_repo_name": "EshaMaharishi/pubsub-1", "max_stars_repo_head_hexsha": "13cb194078ed39b00ea623db0d87df8e153e7981", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mongo/db/commands/pubsub_commands.cpp", "max_issues_repo_name": "EshaMaharishi/pubsub-1", "max_issues_repo_head_hexsha": "13cb194078ed39b00ea623db0d87df8e153e7981", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mongo/db/commands/pubsub_commands.cpp", "max_forks_repo_name": "EshaMaharishi/pubsub-1", "max_forks_repo_head_hexsha": "13cb194078ed39b00ea623db0d87df8e153e7981", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1982942431, "max_line_length": 100, "alphanum_fraction": 0.5241175862, "num_tokens": 3732}
|
import numpy as np
from numpy.linalg import inv
from basics.base_agent import BaseAgent
class LinUCBAgent(BaseAgent):
def __init__(self):
super().__init__()
self.name = "LinUCB"
def agent_init(self, agent_info=None):
if agent_info is None:
agent_info = {}
self.num_actions = agent_info.get('num_actions', 3)
self.alpha = agent_info.get('alpha', 1)
self.batch_size = agent_info.get('batch_size', 1)
# Set random seed for policy for each run
self.policy_rand_generator = np.random.RandomState(agent_info.get("seed", None))
self.last_action = None
self.last_state = None
self.num_round = None
def agent_policy(self, observation):
p_t = np.zeros(self.num_actions)
for i in range(self.num_actions):
# initialize theta hat
self.theta = inv(self.A[i]).dot(self.b[i])
# get context of each arm from flattened vector of length 100
cntx = observation
# get gain reward of each arm
p_t[i] = self.theta.T.dot(cntx) + self.alpha * np.sqrt(np.maximum(cntx.dot(inv(self.A[i]).dot(cntx)), 0))
# action = np.random.choice(np.where(p_t == max(p_t))[0])
action = self.policy_rand_generator.choice(np.where(p_t == max(p_t))[0])
return action
def agent_start(self, observation):
# Specify feature dimension
self.ndims = len(observation)
self.A = np.zeros((self.num_actions, self.ndims, self.ndims))
# Instantiate b as a 0 vector of length ndims.
self.b = np.zeros((self.num_actions, self.ndims, 1))
# set each A per arm as identity matrix of size ndims
for arm in range(self.num_actions):
self.A[arm] = np.eye(self.ndims)
self.A_oracle = self.A.copy()
self.b_oracle = self.b.copy()
self.last_state = observation
self.last_action = self.agent_policy(self.last_state)
self.num_round = 0
return self.last_action
def agent_update(self, reward):
self.A_oracle[self.last_action] = self.A_oracle[self.last_action] + np.outer(self.last_state, self.last_state)
self.b_oracle[self.last_action] = np.add(self.b_oracle[self.last_action].T, self.last_state * reward).reshape(self.ndims, 1)
def agent_step(self, reward, observation):
if reward is not None:
self.agent_update(reward)
# it is a good question whether I should increment num_round outside
# condition or not (since theoretical result doesn't clarify this
self.num_round += 1
if self.num_round % self.batch_size == 0:
self.A = self.A_oracle.copy()
self.b = self.b_oracle.copy()
self.last_state = observation
self.last_action = self.agent_policy(self.last_state)
return self.last_action
def agent_end(self, reward):
if reward is not None:
self.agent_update(reward)
self.num_round += 1
if self.num_round % self.batch_size == 0:
self.A = self.A_oracle.copy()
self.b = self.b_oracle.copy()
def agent_message(self, message):
pass
def agent_cleanup(self):
pass
if __name__ == '__main__':
agent_info = {'alpha': 2,
'num_actions': 4,
'seed': 1}
# check initialization
linucb = LinUCBAgent()
linucb.agent_init(agent_info)
print(linucb.num_actions, linucb.alpha)
assert linucb.num_actions == 4
assert linucb.alpha == 2
# check policy
observation = np.array([1, 2, 5, 0])
linucb.A = np.zeros((linucb.num_actions, len(observation), len(observation)))
# Instantiate b as a 0 vector of length ndims.
linucb.b = np.zeros((linucb.num_actions, len(observation), 1))
# set each A per arm as identity matrix of size ndims
for arm in range(linucb.num_actions):
linucb.A[arm] = np.eye(len(observation))
action = linucb.agent_policy(observation)
print(action)
assert action == 1
# check start
observation = np.array([1, 2, 5, 0])
linucb.agent_start(observation)
print(linucb.ndims)
print(linucb.last_state, linucb.last_action)
assert linucb.ndims == len(observation)
assert np.allclose(linucb.last_state, observation)
assert np.allclose(linucb.b, np.zeros((linucb.num_actions, len(observation), 1)))
assert np.allclose(linucb.A, np.array([np.eye(len(observation)), np.eye(len(observation)),
np.eye(len(observation)), np.eye(len(observation))]))
assert linucb.last_action == 3
# check step
observation = np.array([5, 3, 1, 2])
reward = 1
action = linucb.agent_step(reward, observation)
print(linucb.A)
print(linucb.b)
print(action)
true_A = np.array([[2., 2., 5., 0.],
[2., 5., 10., 0.],
[5., 10., 26., 0.],
[0., 0., 0., 1.]])
true_b = np.array([[1.],
[2.],
[5.],
[0.]])
for i in range(3):
assert np.allclose(linucb.A[i], np.eye(4))
assert np.allclose(linucb.b[i], np.zeros((linucb.num_actions, 4, 1)))
assert np.allclose(linucb.A[3], true_A)
assert np.allclose(linucb.b[3], true_b)
assert linucb.last_action == 0
observation = np.array([3, 1, 3, 5])
reward = None
action = linucb.agent_step(reward, observation)
print(linucb.A)
print(linucb.b)
print(action)
assert np.allclose(linucb.A[3], true_A)
assert np.allclose(linucb.b[3], true_b)
assert action == 0
# check batch size
agent_info = {'alpha': 2,
'num_actions': 4,
'seed': 1,
'batch_size': 2}
linucb = LinUCBAgent()
linucb.agent_init(agent_info)
observation = np.array([1, 2, 5, 0])
linucb.agent_start(observation)
assert linucb.num_round == 0
assert linucb.last_action == 1
observation = np.array([5, 3, 1, 2])
reward = 1
action = linucb.agent_step(reward, observation)
assert linucb.num_round == 1
assert np.allclose(linucb.b, np.zeros((linucb.num_actions, len(observation), 1)))
assert np.allclose(linucb.A, np.array([np.eye(len(observation)), np.eye(len(observation)),
np.eye(len(observation)), np.eye(len(observation))]))
for i in [0, 2, 3]:
assert np.allclose(linucb.A_oracle[i], np.eye(4))
assert np.allclose(linucb.b_oracle[i], np.zeros((linucb.num_actions, 4, 1)))
assert np.allclose(linucb.A_oracle[1], true_A)
assert np.allclose(linucb.b_oracle[1], true_b)
observation = np.array([3, 1, 3, 5])
reward = None
action = linucb.agent_step(reward, observation)
# sinse reward is None, nothing should happen
assert linucb.num_round == 1
assert np.allclose(linucb.b, np.zeros((linucb.num_actions, len(observation), 1)))
assert np.allclose(linucb.A, np.array([np.eye(len(observation)), np.eye(len(observation)),
np.eye(len(observation)), np.eye(len(observation))]))
for i in [0, 2, 3]:
assert np.allclose(linucb.A_oracle[i], np.eye(4))
assert np.allclose(linucb.b_oracle[i], np.zeros((linucb.num_actions, 4, 1)))
assert np.allclose(linucb.A_oracle[1], true_A)
assert np.allclose(linucb.b_oracle[1], true_b)
observation = np.array([3, 0, 2, 5])
reward = 0
action = linucb.agent_step(reward, observation)
assert linucb.num_round == 2
assert np.allclose(linucb.b, linucb.b_oracle)
assert np.allclose(linucb.A, linucb.A_oracle)
|
{"hexsha": "39ce44ecbbdd78c4fedb80695d28731a901711de", "size": 7750, "ext": "py", "lang": "Python", "max_stars_repo_path": "CMAB/LinUCB.py", "max_stars_repo_name": "RecoHut-Stanzas/S873634", "max_stars_repo_head_hexsha": "ae67db296ada7ab31d77c51d048254c7c028620e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-10T10:19:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T09:35:25.000Z", "max_issues_repo_path": "CMAB/LinUCB.py", "max_issues_repo_name": "danilprov/batch-bandits", "max_issues_repo_head_hexsha": "42f0988dcc310600dd5b0131278cfe2b8fcb30f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CMAB/LinUCB.py", "max_forks_repo_name": "danilprov/batch-bandits", "max_forks_repo_head_hexsha": "42f0988dcc310600dd5b0131278cfe2b8fcb30f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2920353982, "max_line_length": 132, "alphanum_fraction": 0.6101935484, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2110}
|
[STATEMENT]
lemma is_min2_Empty[simp]: "\<not>is_min2 x {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> is_min2 x {}
[PROOF STEP]
by (auto simp: is_min2_def)
|
{"llama_tokens": 79, "file": "Priority_Search_Trees_PST_General", "length": 1}
|
#include "plugins/lasso3d/lasso3d.h"
#include <QDebug>
#include <QEvent>
#include <QKeyEvent>
#include <QAction>
#include <QGLShaderProgram>
#include <QGLBuffer>
#include <QTabWidget>
#include <QApplication>
#include <QToolBar>
#include <QVBoxLayout>
#include <QDoubleSpinBox>
#include <QLabel>
#include <QSpacerItem>
#include <QStackedWidget>
#include <QSlider>
#include <QDockWidget>
#include <boost/make_shared.hpp>
#include "model/layerlist.h"
#include "model/cloudlist.h"
#include "gui/glwidget.h"
#include "gui/flatview.h"
#include "gui/mainwindow.h"
#include "utilities/pointpicker.h"
#include "commands/select.h"
#include "pluginsystem/core.h"
QString Lasso3D::getName(){
return "Lasso Tool";
}
void Lasso3D::initialize(Core *core){
core_= core;
cl_ = core_->cl_;
ll_ = core_->ll_;
glwidget_ = core_->mw_->glwidget_;
flatview_ = core_->mw_->flatview_;
mw_ = core_->mw_;
enable_ = new QAction(QIcon(":/images/lasso.png"), "Ppolygon lasso tool", 0);
enable_->setCheckable(true);
enable_->setChecked(false);
is_enabled_ = false;
connect(enable_, SIGNAL(triggered()), this, SLOT(enable()));
connect(this, SIGNAL(enabling()), core_, SIGNAL(endEdit()));
mw_->addMenu(enable_, "Edit");
mw_->toolbar_->addAction(enable_);
settings_ = new QWidget();
QVBoxLayout * layout = new QVBoxLayout(settings_);
settings_->setLayout(layout);
mw_->tooloptions_->addWidget(settings_);
lasso_ = new Lasso();
}
void Lasso3D::cleanup(){
mw_->removeMenu(enable_, "Edit");
mw_->toolbar_->removeAction(enable_);
disconnect(this, SIGNAL(enabling()), core_, SIGNAL(endEdit()));
disconnect(enable_, SIGNAL(triggered()), this, SLOT(enable()));
delete lasso_;
}
void Lasso3D::paint2d(){
lasso_->drawLasso(last_mouse_pos_.x(), last_mouse_pos_.y(), flatview_);
}
void Lasso3D::paint(const Eigen::Affine3f& proj, const Eigen::Affine3f& mv){
lasso_->drawLasso(last_mouse_pos_.x(), last_mouse_pos_.y(), glwidget_);
}
bool Lasso3D::is3d(){
QTabWidget * tabs = qobject_cast<QTabWidget *>(glwidget_->parent()->parent());
return tabs->currentIndex() == tabs->indexOf(glwidget_);
}
bool Lasso3D::mouseClickEvent(QMouseEvent * event){
lasso_->addScreenPoint(event->x(), event->y(), core_->mw_->glwidget_->width(), core_->mw_->glwidget_->height());
// Test code
if(lasso_->getPolygon().size() == 2){
timer_.restart();
timer_.start();
}
action_count_++;
return true;
}
bool Lasso3D::mouseDblClickEvent(QMouseEvent *){
if(cl_->clouds_.size() == 0){
disable();
return false;
}
// Test code
if(action_count_ > 2){
invocations_++;
action_count_--; // Extra click removed
seconds_ += float(timer_.elapsed())/1000;
}
auto cloud = cl_->active_;
boost::shared_ptr<std::vector<int>> selected_indices = boost::make_shared<std::vector<int>>();
if(is3d()){
auto & cam = core_->mw_->glwidget_->camera_;
Eigen::Affine3f mv = cam.modelviewMatrix() * cloud->modelview();
Eigen::Affine3f proj = cam.projectionMatrix();
lasso_->getIndices(proj, mv, cloud.get(), selected_indices);
} else {
lasso_->getIndices2D(cloud->scan_height(), flatview_->getCamera(),
cloud->cloudToGridMap(), selected_indices);
}
core_->us_->beginMacro("Lasso tool");
bool negative_select = QApplication::keyboardModifiers() == Qt::ControlModifier;
core_->us_->push(new Select(cl_->active_, selected_indices, core_->mw_->deselect_ || negative_select, core_->mw_->select_mask_, true, ll_->getHiddenLabels()));
core_->us_->endMacro();
lasso_->clear();
//disable();
return true;
}
bool Lasso3D::mouseMoveEvent(QMouseEvent * event) {
last_mouse_pos_ << event->x(), event->y();
if(cl_->clouds_.size() == 0) {
disable();
return false;
}
if(is3d()){
lasso_->moveLastScreenPoint(event->x(), event->y(), core_->mw_->glwidget_);
glwidget_->update();
} else {
lasso_->moveLastScreenPoint(event->x(), event->y(), core_->mw_->flatview_);
flatview_->update();
}
if(event->buttons() != Qt::LeftButton)
return false;
glwidget_->update();
return true;
}
bool Lasso3D::mousePressEvent(QMouseEvent * event) {
last_mouse_pos_ << event->x(), event->y();
mouse_down_pos_ = last_mouse_pos_;
if(event->buttons() != Qt::LeftButton)
return false;
if(cl_->clouds_.size() == 0){
disable();
return false;
}
return true;
}
bool Lasso3D::mouseReleaseEvent(QMouseEvent * event){
last_mouse_pos_ << event->x(), event->y();
float dist = (last_mouse_pos_ - mouse_down_pos_).norm();
if(dist < 2){
return mouseClickEvent(event);
}
return true;
}
void Lasso3D::enable() {
if(is_enabled_){
disable();
return;
}
// Test code
invocations_ = 0;
action_count_ = 0;
seconds_ = 0;
// QTabWidget * tabs = qobject_cast<QTabWidget *>(glwidget_->parent()->parent());
// tabs->setCurrentWidget(glwidget_);
enable_->setChecked(true);
mw_->options_dock_->show();
mw_->tooloptions_->setCurrentWidget(settings_);
lasso_->clear();
emit enabling();
connect(glwidget_, SIGNAL(pluginPaint(Eigen::Affine3f, Eigen::Affine3f)),
this, SLOT(paint(Eigen::Affine3f, Eigen::Affine3f)),
Qt::DirectConnection);
connect(flatview_, SIGNAL(pluginPaint()),
this, SLOT(paint2d()),
Qt::DirectConnection);
glwidget_->installEventFilter(this);
flatview_->installEventFilter(this);
connect(core_, SIGNAL(endEdit()), this, SLOT(disable()));
is_enabled_ = true;
}
void Lasso3D::disable() {
// Test code
QFile file("lasso.txt");
if ( file.open(QIODevice::Append) ) {
QTextStream stream( &file );
stream << invocations_ << ", " << action_count_ << ", " << seconds_ << ", " << seconds_/action_count_ << "\n";
}
file.flush();
file.close();
enable_->setChecked(false);
disconnect(core_, SIGNAL(endEdit()), this, SLOT(disable()));
disconnect(glwidget_, SIGNAL(pluginPaint(Eigen::Affine3f, Eigen::Affine3f)),
this, SLOT(paint(Eigen::Affine3f, Eigen::Affine3f)));
disconnect(flatview_, SIGNAL(pluginPaint()),
this, SLOT(paint2d()));
glwidget_->removeEventFilter(this);
glwidget_->removeEventFilter(this);
is_enabled_ = false;
}
bool Lasso3D::eventFilter(QObject *object, QEvent *event){
// Bypass plugin via shift
if(QApplication::keyboardModifiers() == Qt::SHIFT || !core_->mw_->edit_mode_)
return false;
switch(event->type()){
case QEvent::MouseButtonPress:
return mousePressEvent(static_cast<QMouseEvent*>(event));
case QEvent::MouseButtonRelease:
return mouseReleaseEvent(static_cast<QMouseEvent*>(event));
case QEvent::MouseMove:
return mouseMoveEvent(static_cast<QMouseEvent*>(event));
case QEvent::MouseButtonDblClick:
return mouseDblClickEvent(static_cast<QMouseEvent*>(event));
default:
return false;
}
}
Q_PLUGIN_METADATA(IID "za.co.circlingthesun.cloudclean.iplugin")
|
{"hexsha": "c6e3dc9154f6aba9b0ab679f2a557678287199f3", "size": 7280, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/plugins/lasso3d/lasso3d.cpp", "max_stars_repo_name": "circlingthesun/cloudclean", "max_stars_repo_head_hexsha": "4b9496bc3b52143c35f0ad83ee68bbc5e8aa32d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-10-18T16:10:21.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-28T01:52:24.000Z", "max_issues_repo_path": "src/plugins/lasso3d/lasso3d.cpp", "max_issues_repo_name": "circlingthesun/cloudclean", "max_issues_repo_head_hexsha": "4b9496bc3b52143c35f0ad83ee68bbc5e8aa32d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plugins/lasso3d/lasso3d.cpp", "max_forks_repo_name": "circlingthesun/cloudclean", "max_forks_repo_head_hexsha": "4b9496bc3b52143c35f0ad83ee68bbc5e8aa32d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2017-12-13T07:39:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-29T13:13:48.000Z", "avg_line_length": 27.680608365, "max_line_length": 163, "alphanum_fraction": 0.6450549451, "num_tokens": 1854}
|
# -*- coding:utf8 -*-
"""
This module contains visualization tools for uesgraphs
"""
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.pylab import mpl
from matplotlib.collections import LineCollection
from matplotlib import gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import random
import shapely.geometry as sg
import sys
import warnings
class Visuals(object):
"""
Visualizes a uesgraph by networkX graph drawing
Parameters
----------
uesgraph : uesgraphs.uesgraph.UESGraph object
The visualization output will be following the graph layout
specified in the input uesgraph
"""
def __init__(self, uesgraph):
"""
Constructor for `Visuals`
"""
self.uesgraph = uesgraph
def create_plot_simple(self,
ax,
scaling_factor=0.5):
"""Creates a very simple plot setup for fast performance
Parameters
----------
ax : maplotlib ax object
scaling_factor : float
Factor that scales the sized of node dots in the plot relative to
the edge widths
Returns
-------
ax : maplotlib ax object
"""
counter = 0
for street in self.uesgraph.nodelist_street:
ax.scatter(self.uesgraph.node[street]['position'].x,
self.uesgraph.node[street]['position'].y,
s=scaling_factor,
color='grey',
alpha=0.7)
for nodelist_heating in list(self.uesgraph.nodelists_heating.values()):
for heating_node in nodelist_heating:
y_offset = random.choice([0.0001, 0.0002, 0.00015])
x_offset = random.choice([0.0005, 0.00055, 0.0006, 0.00065,
0.0007])
ax.scatter(self.uesgraph.node[heating_node]['position'].x,
self.uesgraph.node[heating_node]['position'].y,
s=scaling_factor*15,
color='red',
alpha=0.7)
for edge in self.uesgraph.edges():
for node in edge:
color = 'black'
style = 'solid'
alpha = 1
linewidth=0.2
if 'street' in self.uesgraph.node[node]['node_type']:
color = 'grey'
style = 'solid'
alpha = 0.7
linewidth=1.5
break
elif 'heat' in self.uesgraph.node[node]['node_type']:
color = 'red'
style = 'solid'
linewidth=1
alpha = 1
break
elif 'cool' in self.uesgraph.node[node]['node_type']:
color = 'blue'
style = 'solid'
linewidth=1
alpha = 1
break
ax.plot([self.uesgraph.node[edge[0]]['position'].x,
self.uesgraph.node[edge[1]]['position'].x],
[self.uesgraph.node[edge[0]]['position'].y,
self.uesgraph.node[edge[1]]['position'].y],
color=color,
linewidth=linewidth,
alpha=alpha)
for building in self.uesgraph.nodelist_building:
if self.uesgraph.node[building]['position'] is not None:
if self.uesgraph.node[building][
'is_supply_heating'] is False:
ax.scatter(self.uesgraph.node[building]['position'].x,
self.uesgraph.node[building]['position'].y,
s=scaling_factor * 3,
color='green',
alpha=0.7)
else:
ax.scatter(self.uesgraph.node[building]['position'].x,
self.uesgraph.node[building]['position'].y,
s=scaling_factor * 25,
color='red',
alpha=0.7)
counter += 1
if 'proximity' in self.uesgraph.graph:
try:
poly = self.uesgraph.graph['proximity']
x, y = poly.exterior.xy
ax.plot(x, y, color='red', alpha=0.7,
linewidth=1, solid_capstyle='round', zorder=2)
except:
None
plt.tick_params(axis='both',
which='both',
bottom=False,
top=False,
labelbottom=False,
right=False,
left=False,
labelleft=False)
plt.axis('equal')
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.axis('off')
return ax
def _place_text(self, element):
"""
Returns a point object where to place text in a plot
Parameters
----------
element : int or list
Node or edge identifier for the node which should be labeled with
text
Returns
-------
text_pos : shapely.geometry.Point object
Position of the text
"""
if sys.version_info < (3, 6):
warnings.warn('The placement of elements in versions older than'
'Python 3.6 may differ from the 3.6 placement')
diagonal = self.uesgraph.max_position.distance(
self.uesgraph.min_position)
curr_scaling = diagonal * 0.04
if isinstance(element, tuple):
edge = element
pos_0 = self.uesgraph.node[edge[0]]['position']
pos_1 = self.uesgraph.node[edge[1]]['position']
parallel_line = sg.LineString([pos_0, pos_1]).parallel_offset(
curr_scaling/2)
text_pos = sg.Point(parallel_line.centroid.x,
parallel_line.centroid.y-curr_scaling/4.)
else:
node = element
node_pos = self.uesgraph.node[node]['position']
neighbors = list(self.uesgraph.neighbors(node))
if len(neighbors) > 1:
# Find 2 nearest neighbors `neighbor_0` and `neighbor_1`
distances = {}
for neighbor in neighbors:
neighbor_pos= self.uesgraph.node[neighbor]['position']
distances[neighbor] = neighbor_pos.distance(node_pos)
neighbor_0 = min(distances, key=distances.get)
del distances[neighbor_0]
neighbor_1 = min(distances, key=distances.get)
neighbor_0_pos = self.uesgraph.node[neighbor_0]['position']
neighbor_1_pos = self.uesgraph.node[neighbor_1]['position']
# Find `ref_point` between both nearest neighbors
ref_point = sg.LineString([neighbor_0_pos,
neighbor_1_pos]).centroid
# Place text on line between `ref_point` and `node`
# text_pos = sg.LineString([node_pos, ref_point]).interpolate(
# curr_scaling)
# text_pos = sg.Point(ref_point.x - 3, ref_point.y - 3)
text_pos = ref_point
plt.plot([text_pos.x, node_pos.x],
[text_pos.y, node_pos.y],
'--',
color='black',
alpha=0.7)
elif len(neighbors) == 0:
text_pos = self.uesgraph.node[node]['position']
else:
neighbor_pos = self.uesgraph.node[neighbors[0]]['position']
dx = node_pos.x - neighbor_pos.x
dy = node_pos.y - neighbor_pos.y
opposite = sg.Point(node_pos.x + dx,
node_pos.y + dy)
ring_distance = curr_scaling
text_pos = node_pos.buffer(ring_distance).exterior.intersection(
sg.LineString([node_pos, opposite]))
return text_pos
def create_plot(self,
ax,
labels=None,
show_diameters=False,
show_mass_flows=False,
label_size=7,
edge_markers=[],
node_markers=[],
add_edge_temperatures=False,
add_edge_flows=False,
directions=False,
scaling_factor=1.5,
scaling_factor_diameter=25,
):
"""Creates the plot setup, that can be shown or saved to file
Parameters
----------
ax : maplotlib ax object
labels : str
If set to `'street'`, node numbers of street nodes are shown in
plot
show_diameters : boolean
True if edges of heating networks should show the relative diameter
of the pipe, False if not
show_mass_flows : boolean
True if edges of heating networks should show the mass flow rate
through the pipe, False if not
label_size : int
Fontsize for optional labels
edge_markers : list
A list of edges that should be marked in the plot
node_markers : list
A list of nodes that should be marked in the plot
add_edge_temperatures : boolean
Plots edge temperatures on top of plot if True
add_edge_flows : boolean
Plots edge width according to flow rates in the networks if True
directions : boolean
Plots arrows for flow directions if True; If add_edge_flows is
False, these arrows will show the initial assumed flow direction.
If add_edge_flows is True, the arrows show the calculated flow
direction.
scaling_factor : float
Factor that scales the sized of node dots in the plot relative to
the edge widths
scaling_factor_diameter : float
Factor that scales the width of lines for show_diameters = True
Returns
-------
ax : maplotlib ax object
"""
assert show_diameters is False or show_mass_flows is False
if show_mass_flows is True:
mass_flow_max = 0
volume_flows = [0]
for edge in self.uesgraph.edges():
if 'mass_flow' in self.uesgraph.edges[edge[0], edge[1]]:
curr_m = abs(self.uesgraph.edges[
edge[0], edge[1]]['mass_flow'])
if curr_m > mass_flow_max:
mass_flow_max = curr_m
if 'volume_flow' in self.uesgraph.edges[edge[0], edge[1]]:
volume_flows.append(abs(self.uesgraph.edges[
edge[0], edge[1]]['volume_flow']))
volume_flow_max = max(volume_flows)
for street in self.uesgraph.nodelist_street:
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[street],
node_size=2 * scaling_factor,
node_color='black',
linewidths=None,
alpha=0.2
)
if labels == 'street':
plt.text(self.uesgraph.node[street]['position'].x,
self.uesgraph.node[street]['position'].y,
s=str(street),
horizontalalignment='center',
fontsize=label_size)
if draw is not None:
draw.set_edgecolor('black')
for heat_network in self.uesgraph.nodelists_heating.keys():
for node in self.uesgraph.nodelists_heating[heat_network]:
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[node],
node_color='red',
node_size=3 * scaling_factor,
linewidths=None,
alpha=0.7)
if labels == 'heat':
plt.text(self.uesgraph.node[node]['position'].x,
self.uesgraph.node[node]['position'].y,
s=str(node),
horizontalalignment='center',
fontsize=label_size)
if labels == 'name':
if 'name' in self.uesgraph.node[node]:
text_pos = self._place_text(node)
plt.text(text_pos.x,
text_pos.y,
s=str(self.uesgraph.node[node]['name']),
horizontalalignment='center',
fontsize=label_size)
if draw is not None:
draw.set_edgecolor('red')
for cool_network in self.uesgraph.nodelists_cooling.keys():
for node in self.uesgraph.nodelists_cooling[cool_network]:
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[node],
node_color='blue',
node_size=1,
linewidths=None,
alpha=0.7)
if draw is not None:
draw.set_edgecolor('blue')
for elec_network in self.uesgraph.nodelists_electricity.keys():
for node in self.uesgraph.nodelists_electricity[elec_network]:
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[node],
node_color='orange',
node_size=3 * scaling_factor,
linewidths=None)
if draw is not None:
draw.set_edgecolor('orange')
for gas_network in self.uesgraph.nodelists_gas.keys():
for node in self.uesgraph.nodelists_gas[gas_network]:
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[node],
node_color='gray',
node_size=3 * scaling_factor,
linewidths=None)
if draw is not None:
draw.set_edgecolor('gray')
for other_network in self.uesgraph.nodelists_others.keys():
for node in self.uesgraph.nodelists_others[other_network]:
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[node],
node_color='purple',
node_size=3 * scaling_factor,
linewidths=None)
if draw is not None:
draw.set_edgecolor('purple')
for building in self.uesgraph.nodelist_building:
if self.uesgraph.node[building]['position'] is not None:
if self.uesgraph.node[building]['is_supply_heating'] is True:
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[building],
node_color='red',
node_size=90 *
scaling_factor,
linewidths=None)
if draw is not None:
draw.set_edgecolor('red')
if self.uesgraph.node[building]['is_supply_cooling'] is True:
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[building],
node_color='blue',
node_size=60 *
scaling_factor,
linewidths=None)
if draw is not None:
draw.set_edgecolor('blue')
if self.uesgraph.node[building]['is_supply_gas'] is True:
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[building],
node_color='gray',
node_size=40 *
scaling_factor,
linewidths=None)
if draw is not None:
draw.set_edgecolor('gray')
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[building],
node_size=25 * scaling_factor,
node_color='green',
linewidths=None,
alpha=0.7
)
if labels == 'building':
plt.text(self.uesgraph.node[building]['position'].x,
self.uesgraph.node[building]['position'].y,
s=str(building),
horizontalalignment='center',
fontsize=label_size)
elif labels == 'name':
if 'name' in self.uesgraph.node[building]:
text_pos = self._place_text(building)
plt.text(text_pos.x,
text_pos.y,
s=self.uesgraph.node[building]['name'],
horizontalalignment='center',
fontsize=label_size)
if draw is not None:
draw.set_edgecolor('green')
if self.uesgraph.node[building][
'is_supply_electricity'] is True:
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[building],
node_color='orange',
node_size=12 *
scaling_factor,
linewidths=None,
alpha=0.8)
if draw is not None:
draw.set_edgecolor('orange')
if self.uesgraph.node[building]['is_supply_other'] is True:
draw = nx.draw_networkx_nodes(self.uesgraph,
pos=self.uesgraph.positions,
nodelist=[building],
node_color='purple',
node_size=5 *
scaling_factor,
linewidths=None,
alpha=0.5)
if draw is not None:
draw.set_edgecolor('purple')
for edge in self.uesgraph.edges():
for node in edge:
color = 'black'
style = 'solid'
alpha = 1
if show_diameters is True:
if 'diameter' in self.uesgraph.edges[
edge[0], edge[1]]:
weight = self.uesgraph.edges[edge[0], edge[1]][
'diameter'] * scaling_factor_diameter
else:
weight = 0.01
elif show_mass_flows is True:
if 'mass_flow' in self.uesgraph.edges[
edge[0], edge[1]]:
weight = abs(self.uesgraph.edges[edge[0], edge[1]][
'mass_flow']) / mass_flow_max * 10
elif 'volume_flow' in self.uesgraph.edge[
edge[0]][edge[1]]:
weight = abs(self.uesgraph.edges[edge[0], edge[1]][
'volume_flow']) / volume_flow_max * 10
if weight < 0.5 and self.uesgraph.edges[edge[0],
edge[1]][
'volume_flow'] > 1e-9:
weight = 10.5
else:
weight = 0.01
if 'street' in self.uesgraph.node[node]['node_type']:
color = 'black'
style = 'solid'
alpha = 0.2
break
elif 'heat' in self.uesgraph.node[node]['node_type']:
color = 'red'
style = 'solid'
alpha = 0.8
break
elif 'cool' in self.uesgraph.node[node]['node_type']:
color = 'blue'
style = 'solid'
alpha = 0.8
break
elif 'elec' in self.uesgraph.node[node]['node_type']:
color = 'orange'
style = 'dotted'
alpha = 0.8
break
elif 'gas' in self.uesgraph.node[node]['node_type']:
color = 'gray'
style = 'dashdot'
alpha = 0.8
break
elif 'others' in self.uesgraph.node[node]['node_type']:
color = 'purple'
style = 'dashdot'
alpha = 0.8
break
if show_diameters is True or show_mass_flows is True:
nx.draw_networkx_edges(self.uesgraph,
pos=self.uesgraph.positions,
edgelist=[edge],
style=style,
width=weight,
edge_color=[color],
alpha=alpha)
else:
nx.draw_networkx_edges(self.uesgraph,
pos=self.uesgraph.positions,
edgelist=[edge],
style=style,
edge_color=[color],
alpha=alpha)
if labels == 'name':
if 'name' in self.uesgraph.edges[edge[0], edge[1]]:
text_pos = self._place_text(edge)
plt.text(text_pos.x,
text_pos.y,
s=self.uesgraph.edges[edge[0], edge[1]]['name'],
horizontalalignment='center',
fontsize=label_size)
if labels == 'all_nodes':
for node in self.uesgraph.nodes():
plt.text(self.uesgraph.node[node]['position'].x,
self.uesgraph.node[node]['position'].y,
s=str(node),
horizontalalignment='center',
fontsize=label_size)
if add_edge_temperatures is True or add_edge_flows is True:
self._add_edge_data(ax,
add_temperatures=add_edge_temperatures,
add_flows=add_edge_flows,
directions=directions)
for edge in edge_markers:
self._add_edge_marker(ax, edge)
if node_markers != []:
self._add_node_marker(
ax,
node_markers,
node_size=50*scaling_factor,
)
if directions is True and add_edge_flows is False:
# Plot arrows for assumed flow direction
for edge in self.uesgraph.edges():
pos_0 = self.uesgraph.node[edge[0]]['position']
pos_1 = self.uesgraph.node[edge[1]]['position']
# center = (pos_0 + pos_1) / 2
center = sg.LineString([pos_0, pos_1]).centroid
arrow_head = center.distance(pos_0) / 10
x = float(center.x)
y = float(center.y)
dx = (float(pos_1.x - pos_0.x)) / 4
dy = (float(pos_1.y - pos_0.y)) / 4
ax.arrow(x, y, dx, dy,
head_width=arrow_head, head_length=arrow_head,
linewidth=1,
fc='k', ec='k')
plt.tick_params(axis='both',
which='both',
bottom=False,
top=False,
labelbottom=False,
right=False,
left=False,
labelleft=False)
plt.axis('equal')
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.axis('off')
return ax
def create_plot_3d(self,
ax,
z_attrib='pressure',
show_flow=False,
angle=110,
label_size=20,
):
"""Creates the plot setup for a 3d view
Parameters
----------
ax : maplotlib ax object
z_attrib : str
Keyword to control which attribute of nodes will be used for
the z-coordinate
show_flow : boolean
Varies linewidth of the edges if True
angle : float
View angle for 3d plot
label_size : int
Fontsize for labels
Returns
-------
ax : maplotlib ax object
"""
if show_flow is True:
flows = []
for edge in self.uesgraph.edges():
flows.append(self.uesgraph.edge[
edge[0]][edge[1]]['volume_flow'])
min_flow = min(flows)
max_flow = max(flows)
delta_flow = max_flow - min_flow
for edge in self.uesgraph.edges():
flow = self.uesgraph.edge[edge[0]][edge[1]]['volume_flow']
weight = ((flow - min_flow) / delta_flow) * 3
print('weight', weight)
self.uesgraph.edge[edge[0]][edge[1]]['weight'] = weight + 0.1
for node in self.uesgraph.nodes():
if z_attrib in self.uesgraph.node[node]:
x = self.uesgraph.node[node]['position'].x
y = self.uesgraph.node[node]['position'].y
z = self.uesgraph.node[node][z_attrib] * 1e-5
ax.scatter(x, y, zs=z, zdir='z', c='0.5', alpha=0.5)
for edge in self.uesgraph.edges():
if (z_attrib in self.uesgraph.node[edge[0]] and
z_attrib in self.uesgraph.node[edge[1]]):
x = [self.uesgraph.node[edge[0]]['position'].x,
self.uesgraph.node[edge[1]]['position'].x]
y = [self.uesgraph.node[edge[0]]['position'].y,
self.uesgraph.node[edge[1]]['position'].y]
z = [self.uesgraph.node[edge[0]][z_attrib] * 1e-5,
self.uesgraph.node[edge[1]][z_attrib] * 1e-5]
if show_flow is False:
ax.plot(x, y, zs=z, zdir='z', ls='-', color='grey',
alpha=0.5)
else:
linewidth = self.uesgraph.edge[edge[0]][edge[1]]['weight']
ax.plot(x, y, zs=z, zdir='z', ls='-', color='grey',
alpha=0.5, linewidth=linewidth)
for node in self.uesgraph.nodes():
if 'is_supply_heating' in self.uesgraph.node[node]:
if self.uesgraph.node[node]['is_supply_heating']:
x = self.uesgraph.node[node]['position'].x
y = self.uesgraph.node[node]['position'].y
z = self.uesgraph.node[node][z_attrib] * 1e-5
ax.scatter(x, y, zs=z, zdir='z', c='red')
ax.view_init(20, angle)
ax.set_zlabel('Pressure in bar', fontsize=label_size,
labelpad=label_size*2)
ax.tick_params(labelsize=label_size, pad=label_size)
ax.set_xticklabels([])
ax.set_yticklabels([])
return ax
def show_network(self,
save_as=None,
show_plot=True,
labels=None,
show_diameters=False,
show_mass_flows=False,
label_size=7,
edge_markers=[],
node_markers=[],
add_edge_temperatures=False,
add_edge_flows=False,
directions=False,
scaling_factor=1.5,
scaling_factor_diameter=25,
simple=False,):
"""Shows a plot of the network
Parameters
----------
save_as : str
optional parameter, string denoting full path and file name +
extension for saving the plot
show_plot : boolean
True if the plot should be shown in the current Python instance,
False if not
labels : str
If set to `'street'`, node numbers of street nodes are shown in
plot
show_diameters : boolean
True if edges of heating networks should show the relative diameter
of the pipe, False if not
show_mass_flows : boolean
True if edges of heating networks should show the mass flow rate
through the pipe, False if not
label_size : int
Fontsize for optional labels
edge_markers : list
A list of edges that should be marked in the plot
node_markers : list
A list of nodes that should be marked in the plot
add_edge_temperatures : boolean
Plots edge temperatures on top of plot if True
add_edge_flows : boolean
Plots edge width according to flow rates in the networks if True
directions : boolean
Plots arrows for flow directions if True; If add_edge_flows is
False, these arrows will show the initial assumed flow direction.
If add_edge_flows is True, the arrows show the calculated flow
direction.
scaling_factor : float
Factor that scales the sized of node dots in the plot relative to
the edge widths
scaling_factor_diameter : float
Factor that scales the width of lines for show_diameters = True
simple : boolean
For very large uesgraphs, the standard plotting may take too long
(hours...). In these cases, `simple=True` gives faster results
"""
dx = float(self.uesgraph.max_position.x - self.uesgraph.min_position.x)
dy = float(self.uesgraph.max_position.y - self.uesgraph.min_position.y)
if self.uesgraph.max_position.x == self.uesgraph.min_position.x:
dx = 1
if self.uesgraph.max_position.y == self.uesgraph.min_position.y:
dy = 1
if dx >= dy:
x_size = 20
y_size = x_size * dy/dx
else:
y_size = 20
x_size = y_size * dx/dy
plt.rcParams['figure.figsize'] = x_size, y_size
fig = plt.figure()
if add_edge_temperatures is True:
gs = gridspec.GridSpec(1, 2,
width_ratios=[20, 1])
ax = plt.subplot(gs[0])
else:
ax = plt.subplot(1, 1, 1)
if simple is False:
ax = self.create_plot(
ax,
labels=labels,
show_diameters=show_diameters,
show_mass_flows=show_mass_flows,
label_size=label_size,
edge_markers=edge_markers,
node_markers=node_markers,
add_edge_temperatures=add_edge_temperatures,
add_edge_flows=add_edge_flows,
directions=directions,
scaling_factor=scaling_factor,
scaling_factor_diameter=scaling_factor_diameter,
)
else:
ax = self.create_plot_simple(
ax,
scaling_factor=scaling_factor,
)
margin_x = dx/20
margin_y = dy/20
ax.set_xlim([float(self.uesgraph.min_position.x) - margin_x,
float(self.uesgraph.max_position.x) + margin_x])
ax.set_ylim([float(self.uesgraph.min_position.y) - margin_y,
float(self.uesgraph.max_position.y) + margin_y])
if add_edge_temperatures is True:
temperatures = []
for node in self.uesgraph.nodes():
if 'temperature_supply' in self.uesgraph.node[node]:
temperatures.append(self.uesgraph.node[node][
'temperature_supply'])
print(node, self.uesgraph.node[node][
'temperature_supply'])
mean_temperature = np.mean(temperatures)
std_temperatures = np.std(temperatures)
temperature_min = 56.21334421417651
temperature_max = 78.30040843644306
# temperature_min = max(min(temperatures),
# mean_temperature - 2 * std_temperatures)
# temperature_max = min(max(temperatures),
# mean_temperature + 2 * std_temperatures)
print('mean_temperature', mean_temperature)
print('std_temperatures', std_temperatures)
print('temperature_min for colormap', temperature_min)
print('temperature_max for colormap', temperature_max)
ax1 = plt.subplot(gs[1])
norm = mpl.colors.Normalize(vmin=temperature_min,
vmax=temperature_max)
cb1 = mpl.colorbar.ColorbarBase(ax1,
cmap=plt.get_cmap('viridis'),
norm=norm,
orientation='vertical'
)
cb1.ax.set_ylabel(u'Temperature in °C', labelpad=15)
text = cb1.ax.yaxis.label
font = matplotlib.font_manager.FontProperties(size=label_size)
text.set_font_properties(font)
cb1.ax.tick_params(labelsize=label_size)
# The following work-around tries to make sure that the
# ticklabels are not obscured by some strange offset behaviour
# ticklabels = [float(item) for item in
# cb1.get_ticks()]
# # Calculate new ticklabels
# dT = temperature_max - temperature_min
# step = dT / (len(ticklabels) + 1)
# new_ticklabels = []
# for i in range(len(ticklabels)):
# base_temperature = temperature_min
# if temperature_min - 273.15 > 0:
# base_temperature -= 273.15
# if step > 1:
# decimals = 0
# elif step > 0.1:
# decimals = 1
# else:
# decimals = 2
# new_ticklabels.append(round(base_temperature+step*(i+1),
# decimals))
# cb1.ax.set_yticklabels(new_ticklabels)
if save_as is not None:
plt.savefig(save_as, bbox_inches='tight', dpi=150)
plt.close()
if show_plot is True:
plt.show()
return fig
def show_3d(self,
save_as=None,
show_plot=True,
show_flow=False,
z_attrib='pressure',
angle=110,
label_size=20,
):
"""Shows an explosion plot of stacked networks in 3d view
Parameters
----------
save_as : str
optional parameter, string denoting full path and file name +
extension for saving the plot
show_plot : boolean
True if the plot should be shown in the current Python instance,
False if not
show_flow : boolean
Varies linewidth of the edges if True
angle : float
View angle for 3d plot
label_size : int
Fontsize for optional labels
"""
plt.rcParams['figure.figsize'] = 10, 10
fig = plt.figure()
ax = plt.subplot(1, 1, 1, projection='3d')
ax = self.create_plot_3d(ax, z_attrib=z_attrib, show_flow=show_flow,
angle=angle, label_size=label_size)
plt.tight_layout()
if save_as is not None:
# plt.savefig(save_as, bbox_inches='tight')
plt.savefig(save_as)
plt.close()
if show_plot is True:
plt.show()
return fig
def network_explosion(self,
save_as=None,
show_plot=True,
angle=250,
networks=['all'],
scaling_factor=1.5,
dotted_lines=True):
"""Shows a plot of the network in 3d view
Parameters
----------
save_as : str
optional parameter, string denoting full path and file name +
extension for saving the plot
show_plot : boolean
True if the plot should be shown in the current Python instance,
False if not
angle : float
View angle for 3d plot
networks : list
Instead of ['all'], the networks list can specify which networks
should be plotted. Accepted items are {'heating', 'cooling',
'electricity', 'gas', 'others'}
scaling_factor : float
Factor that scales the sized of node dots in the plot relative to
the edge widths
dotted_lines : boolean
Optional dotted lines between different levels of network
explosion if set to True
"""
plt.rcParams['figure.figsize'] = 15, 15
level_counter = 0
z_step = 1
fig = plt.figure()
ax = plt.subplot(1, 1, 1, projection='3d')
# Extract all necessary subgraphs
building_graph = self.uesgraph.create_subgraphs(None,
all_buildings=False,
streets=True)[
'default']
heating_graphs = self.uesgraph.create_subgraphs('heating',
all_buildings=False)
cooling_graphs = self.uesgraph.create_subgraphs('cooling',
all_buildings=False)
electricity_graphs = self.uesgraph.create_subgraphs('electricity',
all_buildings=False)
gas_graphs = self.uesgraph.create_subgraphs('gas',
all_buildings=False)
other_graphs = self.uesgraph.create_subgraphs('others',
all_buildings=False)
# Add first layer for whole uesgraph
for node in self.uesgraph.nodelist_building:
x = self.uesgraph.node[node]['position'].x
y = self.uesgraph.node[node]['position'].y
z = level_counter
if self.uesgraph.node[node]['is_supply_heating'] is True:
ax.scatter(x, y, zs=z, zdir='z',
c='red', edgecolors='red',
s=scaling_factor*2.5,
alpha=0.8,
depthshade=False)
ax.scatter(x, y, zs=z, zdir='z',
c='green', edgecolors='green',
s=scaling_factor*0.7,
alpha=0.7,
depthshade=False)
elif self.uesgraph.node[node]['is_supply_cooling'] is True:
ax.scatter(x, y, zs=z, zdir='z',
c='blue', edgecolors='blue',
s=scaling_factor*2.5,
alpha=0.8,
depthshade=False)
ax.scatter(x, y, zs=z, zdir='z',
c='green', edgecolors='green',
s=scaling_factor*0.7,
alpha=0.7,
depthshade=False)
elif self.uesgraph.node[node]['is_supply_electricity'] is True:
ax.scatter(x, y, zs=z, zdir='z',
c='orange', edgecolors='orange',
s=scaling_factor*2.5,
alpha=0.8,
depthshade=False)
ax.scatter(x, y, zs=z, zdir='z',
c='green', edgecolors='green',
s=scaling_factor*0.7,
alpha=0.7,
depthshade=False)
elif self.uesgraph.node[node]['is_supply_gas'] is True:
ax.scatter(x, y, zs=z, zdir='z',
c='grey', edgecolors='grey',
s=scaling_factor*2.5,
alpha=0.8,
depthshade=False)
ax.scatter(x, y, zs=z, zdir='z',
c='green', edgecolors='green',
s=scaling_factor*0.7,
alpha=0.7,
depthshade=False)
elif self.uesgraph.node[node]['is_supply_other'] is True:
ax.scatter(x, y, zs=z, zdir='z',
c='purple', edgecolors='purple',
s=scaling_factor*2.5,
alpha=0.8,
depthshade=False)
ax.scatter(x, y, zs=z, zdir='z',
c='green', edgecolors='green',
s=scaling_factor*0.7,
alpha=0.7,
depthshade=False)
else:
ax.scatter(x, y, zs=z, zdir='z',
c='green', edgecolors='green',
s=scaling_factor,
alpha=0.8,
depthshade=False)
for edge in building_graph.edges():
x = [self.uesgraph.node[edge[0]]['position'].x,
self.uesgraph.node[edge[1]]['position'].x]
y = [self.uesgraph.node[edge[0]]['position'].y,
self.uesgraph.node[edge[1]]['position'].y]
z = [level_counter, level_counter]
ax.plot(x, y, zs=z, zdir='z', ls='-', color='grey',
alpha=0.2, linewidth=2)
for heating_graph in heating_graphs.values():
for edge in heating_graph.edges():
x = [self.uesgraph.node[edge[0]]['position'].x,
self.uesgraph.node[edge[1]]['position'].x]
y = [self.uesgraph.node[edge[0]]['position'].y,
self.uesgraph.node[edge[1]]['position'].y]
z = [level_counter, level_counter]
ax.plot(x, y, zs=z, zdir='z', ls='-', color='red',
alpha=0.5, linewidth=2)
for cooling_graph in cooling_graphs.values():
for edge in cooling_graph.edges():
x = [self.uesgraph.node[edge[0]]['position'].x,
self.uesgraph.node[edge[1]]['position'].x]
y = [self.uesgraph.node[edge[0]]['position'].y,
self.uesgraph.node[edge[1]]['position'].y]
z = [level_counter, level_counter]
ax.plot(x, y, zs=z, zdir='z', ls='-', color='blue',
alpha=0.5, linewidth=2)
for electricity_graph in electricity_graphs.values():
for edge in electricity_graph.edges():
x = [self.uesgraph.node[edge[0]]['position'].x,
self.uesgraph.node[edge[1]]['position'].x]
y = [self.uesgraph.node[edge[0]]['position'].y,
self.uesgraph.node[edge[1]]['position'].y]
z = [level_counter, level_counter]
ax.plot(x, y, zs=z, zdir='z', ls='-', color='orange',
alpha=0.5, linewidth=2)
for gas_graph in gas_graphs.values():
for edge in gas_graph.edges():
x = [self.uesgraph.node[edge[0]]['position'].x,
self.uesgraph.node[edge[1]]['position'].x]
y = [self.uesgraph.node[edge[0]]['position'].y,
self.uesgraph.node[edge[1]]['position'].y]
z = [level_counter, level_counter]
ax.plot(x, y, zs=z, zdir='z', ls='-', color='grey',
alpha=0.5, linewidth=2)
for other_graph in other_graphs.values():
for edge in other_graph.edges():
x = [self.uesgraph.node[edge[0]]['position'].x,
self.uesgraph.node[edge[1]]['position'].x]
y = [self.uesgraph.node[edge[0]]['position'].y,
self.uesgraph.node[edge[1]]['position'].y]
z = [level_counter, level_counter]
ax.plot(x, y, zs=z, zdir='z', ls='-', color='purple',
alpha=0.5, linewidth=2)
level_counter += z_step
# Add layer for heating networks
if 'all' in networks or 'heating' in networks:
if len(heating_graphs[list(heating_graphs.keys())[0]].nodes()) > 0:
ax = self._add_network_layer_3d(ax, 'heating',
level_counter,
scaling_factor,
dotted_lines=dotted_lines)
level_counter += z_step
# Add layer for cooling networks
if 'all' in networks or 'cooling' in networks:
if len(cooling_graphs[list(cooling_graphs.keys())[0]].nodes()) > 0:
ax = self._add_network_layer_3d(ax, 'cooling',
level_counter,
scaling_factor,
dotted_lines=dotted_lines)
level_counter += z_step
# Add layer for electricity networks
if 'all' in networks or 'electricity' in networks:
if electricity_graphs != {}:
ax = self._add_network_layer_3d(ax, 'electricity',
level_counter,
scaling_factor,
dotted_lines=dotted_lines)
level_counter += z_step
# Add layer for gas networks
if 'all' in networks or 'gas' in networks:
if gas_graphs != {}:
ax = self._add_network_layer_3d(ax, 'gas',
level_counter,
scaling_factor,
dotted_lines=dotted_lines)
level_counter += z_step
# Add layer for other networks
if 'all' in networks or 'others' in networks:
if other_graphs != {}:
ax = self._add_network_layer_3d(ax, 'others',
level_counter,
scaling_factor,
dotted_lines=dotted_lines)
level_counter += z_step
ax.view_init(20, angle)
if level_counter > 1:
ax.set_zlim([0.5, level_counter-0.5])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.set_axis_off()
if save_as is not None:
plt.tight_layout()
plt.savefig(save_as, bbox_inches='tight')
plt.close()
if show_plot is True:
plt.tight_layout()
plt.show()
return fig
def _add_network_layer_3d(self, ax, network_type, z_level,
scaling_factor, dotted_lines, streets=False):
"""Adds network of `network_type` to `z_level` of the plot in `ax`
Parameters
----------
ax : maplotlib ax object
network_type : str
Specifies the type of the destination network as {'heating',
'cooling', 'electricity', 'gas', 'others'}
z_level : float
z-coordinate of new network layer
scaling_factor : float
Factor that scales the sized of node dots in the plot relative to
the edge widths
dotted_lines : boolean
Optional dotted lines between different levels of network
explosion if set to True
streets : boolean
Adds street edges to network layer representation if True
Returns
-------
ax : maplotlib ax object
"""
building_graph = self.uesgraph.create_subgraphs(None,
all_buildings=False,
streets=True)[
'default']
graph_dict = self.uesgraph.create_subgraphs(network_type,
all_buildings=False)
if network_type == 'heating':
network_color = 'red'
elif network_type == 'cooling':
network_color = 'blue'
elif network_type == 'electricity':
network_color = 'orange'
elif network_type == 'gas':
network_color = 'grey'
elif network_type == 'others':
network_color = 'purple'
network_type = 'other'
for subgraph in graph_dict.values():
if streets is True:
for edge in building_graph.edges():
x = [self.uesgraph.node[edge[0]]['position'].x,
self.uesgraph.node[edge[1]]['position'].x]
y = [self.uesgraph.node[edge[0]]['position'].y,
self.uesgraph.node[edge[1]]['position'].y]
z = [z_level, z_level]
ax.plot(x, y, zs=z, zdir='z', ls='-', color='grey',
alpha=0.2, linewidth=2)
for edge in subgraph.edges():
x = [self.uesgraph.node[edge[0]]['position'].x,
self.uesgraph.node[edge[1]]['position'].x]
y = [self.uesgraph.node[edge[0]]['position'].y,
self.uesgraph.node[edge[1]]['position'].y]
z = [z_level, z_level]
ax.plot(x, y, zs=z, zdir='z', ls='-', color=network_color,
alpha=0.5, linewidth=2)
for node in subgraph.nodes():
x = self.uesgraph.node[node]['position'].x
y = self.uesgraph.node[node]['position'].y
z = z_level
if 'is_supply_other' in self.uesgraph.node[node]:
if self.uesgraph.node[node]['is_supply_' + network_type]:
ax.scatter(x, y, zs=z, zdir='z',
c=network_color, edgecolors=network_color,
s=scaling_factor*2.5,
alpha=0.8,
depthshade=False)
ax.scatter(x, y, zs=z, zdir='z',
c='green', edgecolors='green',
s=scaling_factor*0.7,
alpha=0.7,
depthshade=False)
if dotted_lines is True:
x = [x, x]
y = [y, y]
z = [0, z_level]
ax.plot(x, y, zs=z, zdir='z', ls='dotted',
color=network_color,
alpha=0.7, linewidth=2)
else:
ax.scatter(x, y, zs=z, zdir='z',
c='green', edgecolors='green',
s=scaling_factor,
alpha=0.7,
depthshade=False)
if dotted_lines is True:
x = [x, x]
y = [y, y]
z = [0, z_level]
ax.plot(x, y, zs=z, zdir='z', ls='dotted',
color='green',
alpha=0.4, linewidth=2)
else:
ax.scatter(x, y, zs=z, zdir='z',
c=network_color, edgecolors=network_color,
s=scaling_factor*0.5,
alpha=0.7,
depthshade=False)
return ax
def _add_node_marker(self, ax, nodelist, node_size=5, color='orange'):
"""Adds a special node marker to the building at `node_number`
Parameters
----------
ax : matplotlib ax object
Marker will be added to this ax object.
`uesgraphVis.create_plot(ax)` should be run on this ax beforehand.
nodelist : list
A list of node numbers
node_size : float
Size of the node marker
color : str
Color of the node marker
Returns
-------
ax : maplotlib ax object
"""
for building in nodelist:
if self.uesgraph.node[building]['position'] is not None:
ax.scatter(self.uesgraph.node[building]['position'].x,
self.uesgraph.node[building]['position'].y,
s=node_size,
color=color,
alpha=0.7)
return ax
def _add_edge_marker(self, ax, edge, color='orange'):
"""Adds a special edge marker
Parameters
----------
ax : matplotlib ax object
Marker will be added to this ax object.
`uesgraphVis.create_plot(ax)` should be run on this ax beforehand.
edge : list
A list of format [edge_0, edge_1]
Returns
-------
ax : maplotlib ax object
"""
nx.draw_networkx_edges(self.uesgraph,
pos=self.uesgraph.positions,
edgelist=[edge],
edge_color=color,
linewidths=None,
)
return ax
def _add_edge_data(self, ax, add_temperatures, add_flows, directions):
"""Plots temperatures and/ or mass flows on top of a network plot
Parameters
----------
ax : matplotlib ax handle
Plot additions will be made to ax
add_temperatures : boolean
If True, adds temperature data by colormapping edge colors
add_flows : boolean
If True, varies line thickness according to edge flows
directions : boolean
Plots arrows for flow directions if True;
If add_edge_flows is True, the arrows show the calculated flow
direction.
"""
scaling = 3
if add_temperatures is True:
temperatures = []
for node in self.uesgraph.nodes():
if 'temperature_supply' in self.uesgraph.node[node]:
temperatures.append(self.uesgraph.node[node][
'temperature_supply'])
mean_temperature = np.mean(temperatures)
std_temperatures = np.std(temperatures)
temperature_min = max(min(temperatures),
mean_temperature - 2 * std_temperatures)
temperature_max = min(max(temperatures),
mean_temperature + 2 * std_temperatures)
print('temperature_min', temperature_min)
print('temperature_max', temperature_max)
if add_flows is True:
mass_flows = []
for edge in self.uesgraph.edges():
mass_flows.append(self.uesgraph.edges[edge[0], edge[1]][
'mass_flow'])
mass_flow_max = max(mass_flows)
for edge in self.uesgraph.edges():
start = self.uesgraph.node[edge[0]]['position']
end = self.uesgraph.node[edge[1]]['position']
delta = start.distance(end)
line = sg.LineString([start, end])
T_added = False
if add_temperatures is True:
if 'temperature_supply' in self.uesgraph.node[
edge[0]] and 'temperature_supply' in \
self.uesgraph.node[edge[1]]:
if len(self.uesgraph.edges()) < 25:
discretization = 100
else:
discretization = 20
T1 = self.uesgraph.node[edge[0]]['temperature_supply']
T2 = self.uesgraph.node[edge[1]]['temperature_supply']
T_added = True
if T_added is False:
discretization = 2
T1 = 367
T2 = 367
flow_added = False
if add_flows is True:
if 'mass_flow' in self.uesgraph.edges[edge[0], edge[1]]:
mass_flow = self.uesgraph.edges[edge[0], edge[1]][
'mass_flow']
linewidth = 1 + 4 * abs(mass_flow)/mass_flow_max
flow_added = True
if flow_added is False:
linewidth = 1
t = np.linspace(0, 1, discretization)
x = []
y = []
for i in t:
here = line.interpolate(delta*i)
x.append(float(here.x))
y.append(float(here.y))
t = np.linspace(T1, T2, discretization)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
if add_temperatures is True:
lc = LineCollection(segments, cmap=plt.get_cmap('viridis'),
norm=plt.Normalize(temperature_min,
temperature_max))
lc.set_array(t)
else:
colors = [matplotlib.colors.colorConverter.to_rgba('r')]
print('colors', colors)
lc = LineCollection(segments, colors=colors)
lc.set_linewidth(linewidth*scaling)
ax.add_collection(lc)
if directions is True and add_flows is True:
# Plot arrows for assumed flow direction
for edge in self.uesgraph.edges():
mass_flow = self.uesgraph.edge[edge[0]][edge[1]][
'mass_flow']
if mass_flow > 0:
pos_0 = self.uesgraph.node[edge[0]]['position']
pos_1 = self.uesgraph.node[edge[1]]['position']
else:
pos_0 = self.uesgraph.node[edge[1]]['position']
pos_1 = self.uesgraph.node[edge[0]]['position']
# center = (pos_0 + pos_1) / 2
center = sg.LineString([pos_0, pos_1]).centroid
x = float(center.x)
y = float(center.y)
dx = (float(pos_1.x - pos_0.x)) / 4
dy = (float(pos_1.y - pos_0.y)) / 4
ax.arrow(x, y, dx, dy,
head_width=5, head_length=5, fc='k', ec='k')
if 'problems' in self.uesgraph.graph:
for node in self.uesgraph.graph['problems']:
ax.scatter(self.uesgraph.node[node]['position'].x,
self.uesgraph.node[node]['position'].y,
s=40,
color='blue',
alpha=0.7)
ax.text(self.uesgraph.node[node]['position'].x,
self.uesgraph.node[node]['position'].y,
s=str(node),
fontsize=4)
|
{"hexsha": "9735fd9e83b2812107e4a8aa0406c98c02dcf095", "size": 63342, "ext": "py", "lang": "Python", "max_stars_repo_path": "uesgraphs/visuals.py", "max_stars_repo_name": "RWTH-EBC/uesgraphs", "max_stars_repo_head_hexsha": "498fc57cd251dc93f51279275a965d77775e68b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2018-05-23T19:42:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T10:55:55.000Z", "max_issues_repo_path": "uesgraphs/visuals.py", "max_issues_repo_name": "RWTH-EBC/uesgraphs", "max_issues_repo_head_hexsha": "498fc57cd251dc93f51279275a965d77775e68b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2018-05-27T15:32:39.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-03T18:51:21.000Z", "max_forks_repo_path": "uesgraphs/visuals.py", "max_forks_repo_name": "RWTH-EBC/uesgraphs", "max_forks_repo_head_hexsha": "498fc57cd251dc93f51279275a965d77775e68b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-10-23T14:00:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T23:03:14.000Z", "avg_line_length": 43.4146675805, "max_line_length": 80, "alphanum_fraction": 0.4570427205, "include": true, "reason": "import numpy,import networkx", "num_tokens": 12232}
|
#!/usr/bin/python3
# https://matplotlib.org/examples/pylab_examples/contourf_demo.html
import numpy as np
from matplotlib import gridspec
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.colors import LogNorm
from skimage.measure import block_reduce
from astropy.io import fits
# Eventually I want to get propert C* WCS keywords into headers
from ReduceCorObs import plate_scale
from read_ap import ADU2R_adjust
# Number of images to plot
ims = 1
apertures = True
#apertures = False
origin = 'lower'
vmin = 10
vmax = 3000
if ims == 1:
block_size = 2
binning = 2
linewidth = 2
else:
# Increased from 4 to 5 to make smaller for arXiv e-print
block_size = 4
binning = 4
linewidth = 1
# Pamela wants hi-res
block_size = 2
binning = 2
linewidth = 1
# https://scipy-cookbook.readthedocs.io/items/Rebinning.html
def rebin( a, newshape ):
'''Rebin an array to a new shape.
'''
assert len(a.shape) == len(newshape)
slices = [ slice(0,old, float(old)/new) for old,new in zip(a.shape,newshape) ]
coordinates = np.mgrid[slices]
indices = coordinates.astype('i') #choose the biggest smaller integer index
return a[tuple(indices)]
def rebin_factor( a, newshape ):
'''Rebin an array to a new shape.
newshape must be a factor of a.shape.
'''
assert len(a.shape) == len(newshape)
assert not np.sometrue(np.mod( a.shape, newshape ))
slices = [ slice(None,None, old/new) for old,new in zip(a.shape,newshape) ]
return a[slices]
def plot_ims(ims):
if ims == 2:
fig = plt.figure(figsize=(6, 2.6))
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1.24])
else:
fig = plt.figure()
gs = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs[0])
rect15 = patches.Rectangle((-7.5,-7.5),15,15,linewidth=linewidth,edgecolor='C0',facecolor='none')
rect30 = patches.Rectangle((-15,-15),30,30,linewidth=linewidth,edgecolor='C1',facecolor='none')
rect40 = patches.Rectangle((-20,-20),40,40,linewidth=linewidth,edgecolor='C2',facecolor='none')
rect50 = patches.Rectangle((-25,-25),50,50,linewidth=linewidth,edgecolor='C2',facecolor='none')
# These are less bright in background than 2018-03-02
#fname = '/data/io/IoIO/reduced/2018-02-27/IPT_Na_R_043r.fits'
fname = '/data/io/IoIO/reduced/2018-02-27/IPT_Na_R_003r.fits'
# Like this but maybe too much uniform background
#fname = '/data/io/IoIO/reduced/2018-03-02/IPT_Na_R_040r.fits'
#fname = '/data/io/IoIO/noCrashPlan/reduced.previous_versions/sent_to_coauthors/2018-02-27/IPT_Na_R_060r.fits'
# Go through more images on this date
# Way too bright
#fname = '/data/io/IoIO/reduced/2018-03-02/IPT_Na_R_003r.fits'
# Wow! Beautiful extended structure, but brighht background
#fname = '/data/io/IoIO/reduced/2018-03-02/IPT_Na_R_020r.fits'
#fname = '/data/io/IoIO/reduced/2018-03-02/IPT_Na_R_023r.fits'
#fname = '/data/io/IoIO/reduced/2018-03-02/IPT_Na_R_020r.fits'
#fname = '/data/io/IoIO/reduced/2018-03-02/IPT_Na_R_040r.fits'
#fname = '/data/io/IoIO/reduced/2018-03-02/IPT_Na_R_056r.fits'
# Background getting brighter
#fname = '/data/io/IoIO/reduced/2018-03-02/IPT_Na_R_072r.fits'
#fname = '/data/io/IoIO/reduced/2018-03-02/IPT_Na_R_089r.fits'
# --> Find a way to grep this out of the header to avoid errors
with fits.open(fname) as HDUList:
header = HDUList[0].header
plt.title(header['DATE-OBS'].split('T')[0])
im = HDUList[0].data
im = im * ADU2R_adjust
im = block_reduce(im, block_size=(block_size, block_size), func=np.median)
im = rebin(im, np.asarray(im.shape)/binning)
badc = np.where(im < 0)
im[badc] = 1
Rjpix = header['ANGDIAM']/2/plate_scale / (block_size*binning) # arcsec / (arcsec/pix) / (pix/bin)
nr, nc = im.shape
x = (np.arange(nc) - nc/2) / Rjpix
y = (np.arange(nr) - nr/2) / Rjpix
X, Y = np.meshgrid(x, y)
#plt.pcolormesh(X, Y, im, norm=LogNorm(vmin=vmin, vmax=vmax), cmap='YlOrRd')
plt.pcolormesh(X, Y, im, norm=LogNorm(vmin=vmin, vmax=vmax), cmap='gist_heat')
if apertures:
ax.add_patch(rect15)
ax.add_patch(rect30)
ax.add_patch(rect40)
ax.add_patch(rect50)
plt.ylabel('Rj')
plt.xlabel('Rj')
# https://stackoverflow.com/questions/2934878/matplotlib-pyplot-preserve-aspect-ratio-of-the-plot
plt.axis('scaled')
#cbar = plt.colorbar()
#cbar.ax.set_ylabel('Surface brightness (approx. R)')
if ims == 1:
cbar = plt.colorbar()
cbar.ax.set_ylabel('Surface brightness (R)')
plt.show()
return ()
ax = plt.subplot(gs[1])
rect15 = patches.Rectangle((-7.5,-7.5),15,15,linewidth=linewidth,edgecolor='C0',facecolor='none')
rect30 = patches.Rectangle((-15,-15),30,30,linewidth=linewidth,edgecolor='C1',facecolor='none')
rect40 = patches.Rectangle((-20,-20),40,40,linewidth=linewidth,edgecolor='C2',facecolor='none')
rect50 = patches.Rectangle((-25,-25),50,50,linewidth=linewidth,edgecolor='C2',facecolor='none')
# Was using this one
fname = '/data/io/IoIO/reduced/2018-06-12/Na_on-band_005r.fits'
# Too bright
#fname = '/data/io/IoIO/reduced/2018-06-12/Na_on-band_001r.fits'
# Better
#fname = '/data/io/IoIO/reduced/2018-06-12/Na_on-band_002r.fits'
# This looks pretty good
fname = '/data/io/IoIO/reduced/2018-06-12/Na_on-band_003r.fits'
#fname = '/data/io/IoIO/reduced/2018-06-12/Na_on-band_004r.fits'
#fname = '/data/io/IoIO/reduced/2018-06-12/Na_on-band_005r.fits'
# Oversubtracted
#fname = '/data/io/IoIO/reduced/2018-06-12/Na_on-band_006r.fits'
#fname = '/data/io/IoIO/reduced/2018-06-12/Na_on-band_007r.fits'
# OK
#fname = '/data/io/IoIO/reduced/2018-06-12/Na_on-band_008r.fits'
# Ugly
#fname = '/data/io/IoIO/reduced/2018-06-12/Na_on-band_010r.fits'
#fname = '/data/io/IoIO/noCrashPlan/reduced.previous_versions/sent_to_coauthors/2018-06-12/Na_on-band_005r.fits'
with fits.open(fname) as HDUList:
header = HDUList[0].header
plt.title(header['DATE-OBS'].split('T')[0])
im = HDUList[0].data
im = im * ADU2R_adjust
im = block_reduce(im, block_size=(block_size, block_size), func=np.median)
im = rebin(im, np.asarray(im.shape)/binning)
badc = np.where(im < 0)
im[badc] = 1
Rjpix = header['ANGDIAM']/2/plate_scale / (block_size*binning) # arcsec / (arcsec/pix) / (pix/bin)
nr, nc = im.shape
x = (np.arange(nc) - nc/2) / Rjpix
y = (np.arange(nr) - nr/2) / Rjpix
X, Y = np.meshgrid(x, y)
#plt.pcolormesh(X, Y, im, norm=LogNorm(vmin=vmin, vmax=vmax), cmap='YlOrRd')
plt.pcolormesh(X, Y, im, norm=LogNorm(vmin=vmin, vmax=vmax), cmap='gist_heat')
if apertures:
ax.add_patch(rect15)
ax.add_patch(rect30)
ax.add_patch(rect40)
ax.add_patch(rect50)
plt.xlabel('Rj')
plt.axis('scaled')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Surface brightness (R)')
#badc = np.where(np.logical_or(im < 10, im > chop))
#im[badc] = 0
# https://matplotlib.org/examples/pylab_examples/pcolor_log.html
#plt.pcolor(X, Y, im, norm=LogNorm(vmin=im.min(), vmax=im.max()), cmap='PuBu_r')
#plt.subplot(2, 1, 1)
#plt.pcolormesh(X, Y, im, norm=LogNorm(vmin=1, vmax=5000), cmap='hsv')
#plt.pcolormesh(X, Y, im, norm=LogNorm(vmin=1, vmax=5000), cmap='autumn')
#plt.subplot(2, 1, 2)
#plt.pcolor(X, Y, im, norm=LogNorm(vmin=0, vmax=8000), cmap='PuBu_r')
#plt.colorbar()
plt.savefig('Na_Ims_transparent.png', transparent=True)
plt.show()
plot_ims(ims)
# # https://matplotlib.org/gallery/images_contours_and_fields/contourf_log.html#sphx-glr-gallery-images-contours-and-fields-contourf-log-py
#
# # Automatic selection of levels works; setting the
# # log locator tells contourf to use a log scale:
# fig, ax = plt.subplots()
#
# CS = ax.contourf(X, Y, im,
# locator=ticker.LogLocator(),
# cmap=plt.cm.viridis,
# origin=origin)
#
# #CS = plt.contourf(X, Y, im, 10,
# # #[-1, -0.1, 0, 0.1],
# # #alpha=0.5,
# # cmap=plt.cm.viridis,
# # origin=origin)
#
# cbar = plt.colorbar(CS)
# cbar.ax.set_ylabel('Surface brightness (approx. R)')
#plt.figure()
#plt.show()
#import numpy as np
#import matplotlib.pyplot as plt
#
#
#delta = 0.025
#
#x = y = np.arange(-3.0, 3.01, delta)
#X, Y = np.meshgrid(x, y)
#Z1 = plt.mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
#Z2 = plt.mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
#Z = 10 * (Z1 - Z2)
#
#nr, nc = Z.shape
#
## put NaNs in one corner:
#Z[-nr//6:, -nc//6:] = np.nan
## contourf will convert these to masked
#
#
#Z = np.ma.array(Z)
## mask another corner:
#Z[:nr//6, :nc//6] = np.ma.masked
#
## mask a circle in the middle:
#interior = np.sqrt((X**2) + (Y**2)) < 0.5
#Z[interior] = np.ma.masked
#
## We are using automatic selection of contour levels;
## this is usually not such a good idea, because they don't
## occur on nice boundaries, but we do it here for purposes
## of illustration.
#CS = plt.contourf(X, Y, Z, 10,
# #[-1, -0.1, 0, 0.1],
# #alpha=0.5,
# cmap=plt.cm.bone,
# origin=origin)
#
## Note that in the following, we explicitly pass in a subset of
## the contour levels used for the filled contours. Alternatively,
## We could pass in additional levels to provide extra resolution,
## or leave out the levels kwarg to use all of the original levels.
#
#CS2 = plt.contour(CS, levels=CS.levels[::2],
# colors='r',
# origin=origin)
#
#plt.title('Nonsense (3 masked regions)')
#plt.xlabel('word length anomaly')
#plt.ylabel('sentence length anomaly')
#
## Make a colorbar for the ContourSet returned by the contourf call.
#cbar = plt.colorbar(CS)
#cbar.ax.set_ylabel('verbosity coefficient')
## Add the contour line levels to the colorbar
#cbar.add_lines(CS2)
#
#plt.figure()
#
## Now make a contour plot with the levels specified,
## and with the colormap generated automatically from a list
## of colors.
#levels = [-1.5, -1, -0.5, 0, 0.5, 1]
#CS3 = plt.contourf(X, Y, Z, levels,
# colors=('r', 'g', 'b'),
# origin=origin,
# extend='both')
## Our data range extends outside the range of levels; make
## data below the lowest contour level yellow, and above the
## highest level cyan:
#CS3.cmap.set_under('yellow')
#CS3.cmap.set_over('cyan')
#
#CS4 = plt.contour(X, Y, Z, levels,
# colors=('k',),
# linewidths=(3,),
# origin=origin)
#plt.title('Listed colors (3 masked regions)')
#plt.clabel(CS4, fmt='%2.1f', colors='w', fontsize=14)
#
## Notice that the colorbar command gets all the information it
## needs from the ContourSet object, CS3.
#plt.colorbar(CS3)
#
## Illustrate all 4 possible "extend" settings:
#extends = ["neither", "both", "min", "max"]
#cmap = plt.cm.get_cmap("winter")
#cmap.set_under("magenta")
#cmap.set_over("yellow")
## Note: contouring simply excludes masked or nan regions, so
## instead of using the "bad" colormap value for them, it draws
## nothing at all in them. Therefore the following would have
## no effect:
## cmap.set_bad("red")
#
#fig, axs = plt.subplots(2, 2)
#fig.subplots_adjust(hspace=0.3)
#
#for ax, extend in zip(axs.ravel(), extends):
# cs = ax.contourf(X, Y, Z, levels, cmap=cmap, extend=extend, origin=origin)
# fig.colorbar(cs, ax=ax, shrink=0.9)
# ax.set_title("extend = %s" % extend)
# ax.locator_params(nbins=4)
#
#plt.show()
|
{"hexsha": "c2a303cf1965fe7cc009f60143d217c0cd799314", "size": 12030, "ext": "py", "lang": "Python", "max_stars_repo_path": "Na_im.py", "max_stars_repo_name": "jpmorgen/IoIO", "max_stars_repo_head_hexsha": "c22523d77bc38162cabf3ddb6d7446e4005864e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-12T01:45:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-12T01:45:40.000Z", "max_issues_repo_path": "Na_im.py", "max_issues_repo_name": "jpmorgen/IoIO", "max_issues_repo_head_hexsha": "c22523d77bc38162cabf3ddb6d7446e4005864e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Na_im.py", "max_forks_repo_name": "jpmorgen/IoIO", "max_forks_repo_head_hexsha": "c22523d77bc38162cabf3ddb6d7446e4005864e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-12T01:47:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-12T01:47:03.000Z", "avg_line_length": 36.4545454545, "max_line_length": 139, "alphanum_fraction": 0.6287614298, "include": true, "reason": "import numpy,from astropy", "num_tokens": 3758}
|
"""
File: pfb_peek.py
USAGE:
python pfb_peek.py <pfb_file>
DESCRIPTION:
Peeks into a parflow .pfb file and display a summary of the file.
This prints to stdout a summary of the .pfb file. It prints the file header from the first 64 bytes.
Then prints the subgrid headers of the first 2 subgrids and the last subgrid.
The purpose of this utility is to assist with debugging so you can view a summary of a PFB file.
COMPONENT:
This class can also be used as a component, called by a python program or test case.
For example,
from pfb_peek import PFBPeek
summary = PFBPeek()
file_header = summary.open_pfb("myfile.pfb")
number_of_subgrids = int(file_header.get('n_subgrids', 0))
for i in range(0, number_of_subgrids):
subgrid_header = self.read_subgrid_header()
data = self.read_subgrid_data(subgrid_header)
print(subgrid_header)
print(data)
This is not as efficient as using the pf_xarray methods, but it allows access to the
details of the file headers and subgrid headers and subgrid data for testing and debugging.
"""
from ctypes import cdll
import sys
import os
import struct
import numpy as np
class PFBPeek:
def __init__(self):
self.fp = None
self.file_name = None
self.header = None
def close(self):
if self.fp is not None:
try:
self.fp.close()
self.fp = None
except Exception as e:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.fp is not None:
try:
self.fp.close()
self.fp = None
except Exception as e:
pass
def run(self):
"""Read the command line arguments and print summary of the PFB File."""
if len(sys.argv) < 2:
print("Error: PFB File name is required")
sys.exit(-1)
file_name = sys.argv[1]
if not os.path.exists(file_name):
print(f"File '{file_name}' does not exist.")
sys.exit(-1)
self.file_name = file_name
self.open_pfb(file_name)
print(self.header)
checksum = 0
first_cell = 0
printed_dots = False
number_of_subgrids = int(self.header.get('n_subgrids', 0))
for i in range(0, number_of_subgrids):
subgrid_header = self.read_subgrid_header()
data = self.read_subgrid_data(subgrid_header)
checksum = checksum + float(np.sum(data))
if i == 0:
first_cell = data[0][0][0]
if i < 2 or i >= number_of_subgrids - 1:
print()
print(f"Subgrid #{i}")
print(subgrid_header)
print(data)
elif not printed_dots:
print()
print("... more subgrids ...")
printed_dots = True
print()
print("Checksum of all subgrid values")
print(checksum)
print()
print("Value of first cell")
print(first_cell)
def open_pfb(self, file_name):
"""Read the .pfb file_name and print the summary."""
self.fp = open(file_name, "rb")
self.file_name = file_name
self.read_header()
return self.header
def read_header(self):
"""Read the pfb file header into self.header."""
self.fp.seek(0)
self.header = {}
self.header['x'] = struct.unpack('>d', self.fp.read(8))[0]
self.header['y'] = struct.unpack('>d', self.fp.read(8))[0]
self.header['z'] = struct.unpack('>d', self.fp.read(8))[0]
self.header['nx'] = struct.unpack('>i', self.fp.read(4))[0]
self.header['ny'] = struct.unpack('>i', self.fp.read(4))[0]
self.header['nz'] = struct.unpack('>i', self.fp.read(4))[0]
self.header['dx'] = struct.unpack('>d', self.fp.read(8))[0]
self.header['dy'] = struct.unpack('>d', self.fp.read(8))[0]
self.header['dz'] = struct.unpack('>d', self.fp.read(8))[0]
self.header['n_subgrids'] = struct.unpack('>i', self.fp.read(4))[0]
def read_subgrid_header(self):
"""Read the subgrid header from the file and return the header as a dict."""
subgrid_header = {}
subgrid_header['ix'] = struct.unpack('>i', self.fp.read(4))[0]
subgrid_header['iy'] = struct.unpack('>i', self.fp.read(4))[0]
subgrid_header['iz'] = struct.unpack('>i', self.fp.read(4))[0]
subgrid_header['nx'] = struct.unpack('>i', self.fp.read(4))[0]
subgrid_header['ny'] = struct.unpack('>i', self.fp.read(4))[0]
subgrid_header['nz'] = struct.unpack('>i', self.fp.read(4))[0]
subgrid_header['rx'] = struct.unpack('>i', self.fp.read(4))[0]
subgrid_header['ry'] = struct.unpack('>i', self.fp.read(4))[0]
subgrid_header['rz'] = struct.unpack('>i', self.fp.read(4))[0]
return subgrid_header
def read_subgrid_data(self, subgrid_header):
"""Read the data of the subgrid. Returns a numpy array mapped to the subgrid data."""
ix = subgrid_header['ix']
iy = subgrid_header['iy']
iz = subgrid_header['iz']
nx = subgrid_header['nx']
ny = subgrid_header['ny']
nz = subgrid_header['nz']
offset = self.fp.tell()
shape = [nz, ny, nx]
data = np.memmap(
self.file_name,
dtype=np.float64,
mode='r',
offset=offset,
shape=tuple(shape),
order='F'
).byteswap()
offset = offset + nx*ny*nz*8
self.fp.seek(offset)
return data
if __name__ == '__main__':
main = PFBPeek()
main.run()
|
{"hexsha": "c9a032f13373fdf2a1956d31ec997bbd5aefda12", "size": 5901, "ext": "py", "lang": "Python", "max_stars_repo_path": "pf_xarray/tests/pfb_peek.py", "max_stars_repo_name": "wh3248/pf-xarray", "max_stars_repo_head_hexsha": "f971e0c3e9962958fcf807e45d2623a0784cba8c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pf_xarray/tests/pfb_peek.py", "max_issues_repo_name": "wh3248/pf-xarray", "max_issues_repo_head_hexsha": "f971e0c3e9962958fcf807e45d2623a0784cba8c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2021-09-24T01:04:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-19T16:41:21.000Z", "max_forks_repo_path": "pf_xarray/tests/pfb_peek.py", "max_forks_repo_name": "wh3248/pf-xarray", "max_forks_repo_head_hexsha": "f971e0c3e9962958fcf807e45d2623a0784cba8c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-19T01:08:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T01:08:44.000Z", "avg_line_length": 34.7117647059, "max_line_length": 108, "alphanum_fraction": 0.5624470429, "include": true, "reason": "import numpy", "num_tokens": 1446}
|
import sys
sys.path.append('../../')
import unittest
import numpy as np
from qwopt.compiler.parser import GraphParser
class GraphParserTest(unittest.TestCase):
def test_dim(self):
test_mat = [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
gparser = GraphParser(test_mat)
gdim = gparser.dim()
self.assertEqual(gdim, (4, 4), 'Unexpected')
def test_len(self):
test_mat = [[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]
gparser = GraphParser(test_mat)
glen = len(gparser)
self.assertEqual(glen, 4, 'Unexpected')
def test_n_partition(self):
test_mat = [[0, 0, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 1]]
gparser = GraphParser(test_mat)
n_partition = gparser.n_partitions()
self.assertEqual(n_partition, 3, 'number of partitions')
def test_n_connections(self):
test_mat = [[0, 0, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 1]]
gparser = GraphParser(test_mat)
gpartition = gparser.n_connections()
bolmx = gpartition == [3, 2, 3, 4]
self.assertEqual(all(bolmx), True, 'number of connections')
def test_matrix_opt(self):
test_mat = [[0, 1, 1, 1],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 1, 0]]
gparser = GraphParser(test_mat)
gmatrix = gparser.graph_opt()
cmatrix = np.array([[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 1, 0, 1],
[0, 0, 1, 0]])
# test_mat2 = np.array([[0, 1, 0, 0, 1, 0],
# [0, 0, 0, 1, 1, 0],
# [0, 0, 0, 1, 1, 1],
# [0, 1, 1, 0, 0, 0],
# [0, 1, 0, 0, 0, 1],
# [0, 1, 0, 0, 1, 0]])
bolmx = gmatrix == cmatrix
judger = [all(i) for i in bolmx]
self.assertEqual(all(judger), True, 'matrix optimization')
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "345402cd4eaf1f6b4774927650a2338be5593007", "size": 2260, "ext": "py", "lang": "Python", "max_stars_repo_path": "qwopt/test/parser_test.py", "max_stars_repo_name": "Chibikuri/qwopt", "max_stars_repo_head_hexsha": "e65549db83142af4c6b63cce9f55050ee87fb27a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-05-25T15:07:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-17T07:38:06.000Z", "max_issues_repo_path": "qwopt/test/parser_test.py", "max_issues_repo_name": "Chibikuri/qwopt", "max_issues_repo_head_hexsha": "e65549db83142af4c6b63cce9f55050ee87fb27a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-01-05T07:31:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T03:47:49.000Z", "max_forks_repo_path": "qwopt/test/parser_test.py", "max_forks_repo_name": "Chibikuri/qwopt", "max_forks_repo_head_hexsha": "e65549db83142af4c6b63cce9f55050ee87fb27a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-21T14:22:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-21T14:22:36.000Z", "avg_line_length": 33.2352941176, "max_line_length": 75, "alphanum_fraction": 0.4234513274, "include": true, "reason": "import numpy", "num_tokens": 741}
|
import matplotlib.pyplot as plt
import numpy as np
# add noise to y axis to avoid overlapping
def rand_jitter(arr):
# "Range" = (max(arr) - min(arr))
nosie = .01 * (max(arr) - min(arr))
return arr + np.random.randn(len(arr))
# https://stackoverflow.com/questions/4383571/importing-files-from-different-folder
# # some_file.py
# import sys
# sys.path.insert(0, '/path/to/application/app/folder')
# import some_file
# ...
# sys.path.append('/path/to/application/app/folder') is cleaner imo
# ...
# Yep, it is, but inserting it at the beginning has the benefit of guaranteeing that the path is searched before others (even built-in ones) in the case of naming conflicts.
# https://docs.python.org/2/tutorial/modules.html#packages
# https://stackoverflow.com/questions/17547699/what-does-the-jitter-function-do-in-r
# According to the documentation, the explanation for the jitter function is "Add a small amount of noise to a numeric vector."
# ...
# Jittering indeed means just adding random noise to a vector of numeric values, by default this is done in jitter-function by drawing samples from the uniform distribution. The range of values in the jittering is chosen according to the data, if amount-parameter is not provided.
# I think term 'jittering' covers other distributions than uniform, and it is typically used to better visualize overlapping values, such as integer covariates.
# This helps grasp where the density of observations is high.
# It is good practice to mention in the figure legend if some of the values have been jittered, even if it is obvious.
# Here is an example visualization with the jitter-function as well as a normal distribution jittering where I arbitrarily threw in value sd=0.1
# ...
# A really good explanation of the Jitter effect and why it is necessary can be found in the Swirl course on Regression Models in R.
# The course says that if you do not have jitter, many people will have the same height, so points falls on top of each other which is why some of the circles in the first plot look darker than others. However, by using R's function "jitter" on the children's heights, we can spread out the data to simulate the measurement errors and make high frequency heights more visible.
# http://swirlstats.com/scn/regmod.html
# https://github.com/swirldev/swirl_courses/tree/master/Regression_Models
# https://thomasleeper.com/Rcourse/Tutorials/jitter.html
# http://stat.ethz.ch/R-manual/R-devel/library/base/html/jitter.html
# Description
# Add a small amount of noise to a numeric vector.
# Usage
# jitter(x, factor = 1, amount = NULL)
# Arguments
# x
# numeric vector to which jitter should be added.
# factor
# numeric.
# amount
# numeric; if positive, used as amount (see below), otherwise, if = 0 the default is factor * z/50.
# Default (NULL): factor * d/5 where d is about the smallest difference between x values.
# Details
# The result, say r, is r <- x + runif(n, -a, a) where n <- length(x) and a is the amount argument (if specified).
# Let z <- max(x) - min(x) (assuming the usual case). The amount a to be added is either provided as positive argument amount or otherwise computed from z, as follows:
# If amount == 0, we set a <- factor * z/50 (same as S).
# If amount is NULL (default), we set a <- factor * d/5 where d is the smallest difference between adjacent unique (apart from fuzz) x values.
# Value
# jitter(x, ...) returns a numeric of the same length as x, but with an amount of noise added in order to break ties.
# See Also: http://stat.ethz.ch/R-manual/R-devel/library/graphics/html/rug.html
#> jitter
#function (x, factor = 1, amount = NULL)
#{
# if (length(x) == 0L)
# return(x)
# if (!is.numeric(x))
# stop("'x' must be numeric")
# z <- diff(r <- range(x[is.finite(x)]))
# if (z == 0)
# z <- abs(r[1L])
# if (z == 0)
# z <- 1
# if (is.null(amount)) {
# d <- diff(xx <- unique(sort.int(round(x, 3 - floor(log10(z))))))
# d <- if (length(d))
# min(d)
# else if (xx != 0)
# xx/10
# else z/10
# amount <- factor/5 * abs(d)
# }
# else if (amount == 0)
# amount <- factor * (z/50)
# x + stats::runif(length(x), -amount, amount)
#}
#<bytecode: 0x000000001d198300>
#<environment: namespace:base>
|
{"hexsha": "c9371075add54ebd67285a385122202297f68547", "size": 4360, "ext": "py", "lang": "Python", "max_stars_repo_path": "jb/rand_jitter.py", "max_stars_repo_name": "james-w-balcomb/kaggle--home-credit-default-risk", "max_stars_repo_head_hexsha": "b8346ba1640c42daf8457588f7fadd81ded74b8f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-06-13T14:47:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T09:46:11.000Z", "max_issues_repo_path": "jb/rand_jitter.py", "max_issues_repo_name": "james-w-balcomb/kaggle--home-credit-default-risk", "max_issues_repo_head_hexsha": "b8346ba1640c42daf8457588f7fadd81ded74b8f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jb/rand_jitter.py", "max_forks_repo_name": "james-w-balcomb/kaggle--home-credit-default-risk", "max_forks_repo_head_hexsha": "b8346ba1640c42daf8457588f7fadd81ded74b8f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-06-04T00:43:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-05T14:58:44.000Z", "avg_line_length": 45.8947368421, "max_line_length": 376, "alphanum_fraction": 0.6912844037, "include": true, "reason": "import numpy", "num_tokens": 1140}
|
# vs_circuit_solver.py
# версия 0.1
# язык Python
#
# программа подбора значений R,C для вариантов электронной схемы
# исходя из моделирования подобной схемы в ngspice
# поставляется без всякой оптимизации, ибо имеет целью установление методики
# расчета таких вещей и определения границ применимости этой методики
#
# автор В.Симонов, 22-июль-2020
# vasily_simonov@mail.ru, github.com/vasily84
#
# license : это модуль в любом виде можно использовать в любых целях.
# Ссылка на автора приветствуется, но не является обязательной
#
import scipy.optimize as spo
import scipy.fft as scf
import math
import numpy as np
import matplotlib.pyplot as plt
from ctypes import c_double
import json
# внешние модули
import MySpice.MySpice as spice
import ivcmp.ivcmp as ivcmp
import gc
### SETTINGS ################################################################
# метод сравнения кривых тока и напряжения
# может быть : 'ivcmp','type_ps'
MISFIT_METHOD = 'ivcmp'
#MISFIT_METHOD = 'type_ps'
# частота, Гц
INIT_F = 1e4
# амплитудное напряжение, Вольт, может изменится при загрузке внешнего файла данных
INIT_V = 5
# токоограничивающий резистор, Ом
INIT_Rcs = 1e2
# SIGNAL/NOISE ratio
INIT_SNR = 120.0
#INIT_SNR = 35.0
# число циклов колебаний напряжения в записи
INIT_CYCLE = 10
# падение напряжения на диоде
# Диод считается полностью проводимым при напряжении больше чем DIODE_VOLTAGE,
# при меньшем полность закрыт. (Приближение)
DIODE_VOLTAGE = 0.7
#
SMALL_VOLTAGE = 0.1
# "огромное сопротивление".
HUGE_R = 1e10 # 10 ГОм
# "большое сопротивление"
BIG_R = 1e8 # 100 МОм
# "мизерное сопротивление"
NULL_R = 1e-6 # 1 мкОм
# "мизерная емкость","огромная емкость"
NONE_C = 1e-15 # 0.001 пФ
HUGE_C = 1e-3 # 1000 мкФ
# погрешность подбора кривых- критерий остановки. Подбор длится до тех пор,
# пока функция сравнения не вернет значение CompareIvc()<=IVCMP_TOLERANCE
#IVCMP_TOLERANCE = 5e-3
IVCMP_TOLERANCE = 6e-2
# погрешность подбора номиналов в процентах. Номиналы емкостей считаются по
# реактивному сопротивлению!. Подробности см. scipy.minimize(method='Powell')
VALUES_TOLERANCE = 1e-2
# число вычислений функции в процессе оптимизации. При малых значениях-
# минимально возможное число
MAXFEV = 100
# число точек в массивах тока и напряжения, может измениться при загрузке
# внешнего файла данных
MAX_NUM_POINTS = 100
min_ivc = 1
#############################################################################
# результат последнего моделирования в PySpice
analysis = None
# целевая кривая с током. Та, которую мы подбираем
target_VCurrent = None
# измеренное прибором напряжение в точке после резистора Rcs
target_input_dummy = None
target_fileName = ''
# целевая кривая с током для сравнения в библиотеке ivcmp
target_IVCurve = None
# название временного файла схемы для запуска PySpice
circuit_SessionFileName = 'var1.cir'
# список значений для файла шаблона схемы. Число элементов - не меньше, чем
# знаков {} в файле шаблона схемы
#Xi_long = [0.,0.,0.,0., 0.,0.,0., 0.,0.,0.,0.]
Xi_long = np.array([0.,0.,0.,0., 0.,0.,0., 0.,0.,0.,0.])
# Маска оптимизируемых параметров - список булевого типа, например -
# Xi_long = [a, b, c, d]
# Xi_mask = [False,True,False,True] -> X_short = [b,d]
Xi_mask = [False,False,False,False, False,False,False, False,False,False,False]
#### ФУНКЦИИ ДЛЯ ШАБЛОНА, ЦЕЛЕВОЙ МОДЕЛИ И МАСКИ ПАРАМЕТРОВ ##################
def Xi_unroll(x_short):
XL = Xi_long.copy()
j = 0
for i in range(0,len(Xi_mask)):
if Xi_mask[i]:
XL[i] += x_short[j] #
j += 1
return XL
def Xi_pack(Xi_):
xi = []
for i in range(0,len(Xi_mask)):
if Xi_mask[i]:
xi += [Xi_[i]]
return xi
# установить все известные номиналы
def set_circuit_nominals(nominals):
global Xi_long
Xi_long = nominals.copy()
def reset_Xi_variable():
for i in range(len(Xi_mask)):
Xi_mask[i] = False
def set_Xi_variable(vlist):
for v in vlist:
if v=='R1': Xi_mask[0] = True
if v=='C1': Xi_mask[1] = True
if v=='_R_C1': Xi_mask[2] = True
if v=='_R_D1': Xi_mask[3] = True
if v=='R2': Xi_mask[4] = True
if v=='C2': Xi_mask[5] = True
if v=='_R_C2': Xi_mask[6] = True
if v=='R3': Xi_mask[7] = True
if v=='C3': Xi_mask[8] = True
if v=='_R_C3': Xi_mask[9] = True
if v=='_R_D3': Xi_mask[10] = True
def sign(value):
if value < 0:
return -1
else:
return 1
# инициализировать целевую модель, промоделировав файл схемы
def init_target_by_circuitFile(fileName = circuit_SessionFileName):
global target_VCurrent, target_input_dummy, target_IVCurve
global circuit_SessionFileName
global Z123_sch
Z123_sch = None
var1 = circuit_SessionFileName
circuit_SessionFileName = fileName
process_circuitFile()
circuit_SessionFileName = var1
target_VCurrent = analysis.VCurrent
target_input_dummy = analysis.input_dummy
iv_curve = ivcmp.IvCurve()
iv_curve.length = MAX_NUM_POINTS-1
for i in range(MAX_NUM_POINTS-1):
iv_curve.voltages[i] = c_double(analysis.input_dummy[i]) # Ток и напряжение были поменяны местами
iv_curve.currents[i] = c_double(analysis.VCurrent[i])
min_var_c = 0.01 * np.max(iv_curve.currents[:MAX_NUM_POINTS-1]) # value of noise for current
min_var_v = 0.01 * np.max(iv_curve.voltages[:MAX_NUM_POINTS-1]) # value of noise for voltage
#if (abs(min_var_c) < INIT_V/INIT_Rcs*0.03):
# min_var_c = sign(min_var_c)*INIT_V/INIT_Rcs*0.03
ivcmp.SetMinVC(min_var_v, min_var_c) # Правильные значения фильтров для корректной работы
target_IVCurve = iv_curve
# инициализировать целевую модель данными из json файла, установить число точек на кривой MAX_NUM_POINTS
# определенными из файла
def init_target_from_jsnFile(fileName, N):
global target_fileName
target_fileName = fileName
ivc_real = open_board(fileName)
if ivc_real == None:
print('open_board() failed')
return
print('record number = '+str(N))
target_voltages = ivc_real["elements"][0]["pins"][N]["iv_curves"][0]["voltages"]
target_currents = ivc_real["elements"][0]["pins"][N]["iv_curves"][0]["currents"]
# частота, Гц
initF = ivc_real["elements"][0]["pins"][N]["iv_curves"][0]["measurement_settings"]["probe_signal_frequency"]
print('INIT_F = '+str(initF))
# амплитудное напряжение, Вольт, может изменится при загрузке внешнего файла данных
initV = ivc_real["elements"][0]["pins"][N]["iv_curves"][0]["measurement_settings"]["max_voltage"]
print('INIT_V = '+str(initV))
# токоограничивающий резистор, Ом
initRcs = ivc_real["elements"][0]["pins"][N]["iv_curves"][0]["measurement_settings"]["internal_resistance"]
print('INIT_Rcs = '+str(initRcs))
return initF, initV, initRcs, target_voltages, target_currents
def init_target_Data(target_voltages, target_currents, initF=1e4, initV=5, initRcs=1e2, initSNR=120, cycle=10, ivcmpTolerance = 6e-2):
global MAX_NUM_POINTS,INIT_V,INIT_F,INIT_Rcs
global target_VCurrent,target_input_dummy,target_IVCurve
target_input_dummy = target_voltages
target_VCurrent = target_currents
INIT_F = initF
INIT_V = initV
INIT_Rcs = initRcs
INIT_SNR = initSNR
INIT_CYCLE = cycle
MAX_NUM_POINTS = len(target_input_dummy)
print('MAX_NUM_POINTS = '+str(MAX_NUM_POINTS))
iv_curve1 = ivcmp.IvCurve()
iv_curve1.length = MAX_NUM_POINTS-1
for i in range(MAX_NUM_POINTS-1):
iv_curve1.voltages[i] = c_double(target_input_dummy[i]) # Ток и напряжение были поменяны местами
iv_curve1.currents[i] = c_double(target_VCurrent[i])
min_var_c = 0.01 * np.max(iv_curve1.currents[:MAX_NUM_POINTS-1]) # value of noise for current
min_var_v = 0.01 * np.max(iv_curve1.voltages[:MAX_NUM_POINTS-1]) # value of noise for voltage
ivcmp.SetMinVC(min_var_v, min_var_c) # Правильные значения фильтров для корректной работы
target_IVCurve = iv_curve1
return
def Xi_to_RC(Xi):
RC = Xi.copy()
RC[0] = np.abs(Xi[0])
RC[1] = np.abs(R_to_C(Xi[1])) # C1
RC[2] = np.abs(Xi[2])
RC[3] = np.abs(Xi[3])
RC[4] = np.abs(Xi[4])
RC[5] = np.abs(R_to_C(Xi[5])) # C2
RC[6] = np.abs(Xi[6])
RC[7] = np.abs(Xi[7])
RC[8] = np.abs(R_to_C(Xi[8])) # C3
RC[9] = np.abs(Xi[9])
RC[10] = np.abs(Xi[10])
return RC
# в наборе строк шаблона схемы сделать замену {} на значения
# варьирования Xi_values, сохранить заданным с именем
def generate_circuitFile_by_values( Xi_values):
rc_values = Xi_to_RC(Xi_values)
with open(circuit_SessionFileName, 'w') as newF:
newF.write('* cir file corresponding to the equivalent circuit.\n')
# * Цепь 1
if rc_values[0]<BIG_R: # цепь R1 присутствует
if rc_values[2]>= BIG_R: # C1 присутствует
newF.write('R1 _net1 input {:e}\n'.format(rc_values[0]))
newF.write('C1 _net0 _net1 {:e}\n'.format(rc_values[1]))
else: # С1 нет
newF.write('R1 _net0 input {:e}\n'.format(rc_values[0]))
if rc_values[3]>= BIG_R: # D1 присутствует
newF.write('D1 _net0 0 DMOD_D1 AREA=1.0 Temp=26.85\n')
else: # вместо D1 перемычка
newF.write('R_D1 0 _net0 {:e}\n'.format(rc_values[3]))
# * Цепь 2
if rc_values[4]<BIG_R:
if rc_values[6]>= BIG_R: # C2 присутствует
newF.write('R2 _net4 input {:e}\n'.format(rc_values[4]))
newF.write('C2 0 _net4 {:e}\n'.format(rc_values[5]))
else: # вместо С2 перемычка, R2 сразу на землю
newF.write('R2 0 input {:e}\n'.format(rc_values[4]))
# * Цепь 3
if rc_values[7]<BIG_R:
if rc_values[9]>=BIG_R: # C3 присутствует
newF.write('R3 _net3 input {:e}\n'.format(rc_values[7]))
newF.write('C3 _net2 _net3 {:e}\n'.format(rc_values[8]))
else: # С3 нет
newF.write('R3 _net2 input {:e}\n'.format(rc_values[7]))
if rc_values[10]>=BIG_R: # D3 присутствует
newF.write('D3 0 _net2 DMOD_D1 AREA=1.0 Temp=26.85\n')
else: # вместо D3 перемычка
newF.write('R_D3 0 _net2 {:e}\n'.format(rc_values[10]))
# есть диоды, добавляем модель
if (rc_values[10]>=BIG_R)or(rc_values[3]>= BIG_R):
newF.write('.MODEL DMOD_D1 D (Is=2.22e-10 N=1.65 Cj0=4e-12 M=0.333 Vj=0.7 Fc=0.5 Rs=0.0686 Tt=5.76e-09 Ikf=0 Kf=0 Af=1 Bv=75 Ibv=1e-06 Xti=3 Eg=1.11 Tcv=0 Trs=0 Ttt1=0 Ttt2=0 Tm1=0 Tm2=0 Tnom=26.85 )\n')
newF.write('.END')
## end of with
input_data = None
# промоделировать файл схемы
def process_circuitFile():
global analysis,input_data
if input_data is None:
input_data = spice.Init_Data(INIT_F, INIT_V, INIT_Rcs,INIT_SNR )
try:
circuit = spice.LoadFile(circuit_SessionFileName)
except:
print('spice.LoadFile() failed')
try:
analysis = spice.CreateCVC1(circuit, input_data, MAX_NUM_POINTS, "input", INIT_CYCLE)
except:
print('spice.CreateCVC1() failed')
# последний анализ перевести в форму, пригодную для сравнения в ivcmp
iv_curve = None
def analysis_to_IVCurve():
global iv_curve
if iv_curve is None:
iv_curve = ivcmp.IvCurve()
iv_curve.length = MAX_NUM_POINTS-1
for i in range(MAX_NUM_POINTS-1):
iv_curve.voltages[i] = c_double(analysis.input_dummy[i])
iv_curve.currents[i] = c_double(analysis.VCurrent[i])
return iv_curve
def V_div_I(v,i):
try:
r = v/i
except ArithmeticError:
r = HUGE_R
return r
# вывести на график результат моделирования
def analysis_plot(title='',pngName=''):
plt.figure(1, (20, 10))
plt.grid()
# целевая ВАХ
plt.plot(target_input_dummy, target_VCurrent,color='red')
# ВАХ результат подбора
plt.plot(analysis.input_dummy, analysis.VCurrent,color='blue')
s=''
if (not title==''):
s = title
elif not target_fileName=='':
s = target_fileName
s = s+', misfit='+ format(misfit_result,'0.5E')+', ivcmp='+format(ivcmp_result,'0.5E')
plt.title(s)
plt.xlabel('Напряжение [В]')
plt.ylabel('Сила тока [А]')
if(not pngName==''):
plt.savefig(pngName)
plt.xlim([-INIT_V, INIT_V])
plt.ylim([-INIT_V/INIT_Rcs, INIT_V/INIT_Rcs])
plt.show()
#### ФУНКЦИИ СРАВНЕНИЯ ВАХ ###################################################
def C_to_R(c):
r = 1/(2.*np.pi*INIT_F*c)
return r
def R_to_C(r):
c = 1/(2.*np.pi*INIT_F*r)
if math.isinf(c):
c = 1e20
return c
def analysis_misfit_ivcmp():
global min_ivc
step_IVCurve = analysis_to_IVCurve()
res = ivcmp.CompareIvc(target_IVCurve, step_IVCurve)
if min_ivc > res:
min_ivc = res
#print(res, min_ivc)
return res
# вычислить несовпадение последнего анализа и целевой функции.
def analysis_misfit():
curr_t = target_VCurrent
curr_a = analysis.VCurrent
volt_t = target_input_dummy
volt_a = analysis.input_dummy
# метод сравнения кривых по несовпадению кривых мощности.
# учитывает возможное несогласование фаз сигналов
if MISFIT_METHOD == 'type_ps':
fullV_target = np.zeros_like(target_input_dummy)
fullV_A = np.zeros_like(target_input_dummy)
signal_target = np.zeros_like(target_input_dummy)
signal_A = np.zeros_like(target_input_dummy)
signal_cmp = np.zeros_like(target_input_dummy)
for i in range(len(fullV_target)):
# полные напряжения возбуждения
fullV_target[i] = target_input_dummy[i]+INIT_Rcs*target_VCurrent[i]
fullV_A[i] = analysis.input_dummy[i]+INIT_Rcs*analysis.VCurrent[i]
# мощности, ушедшие в нагрузку
#signal_target[i] = fullV_target[i]*target_VCurrent[i]
#signal_A[i] = fullV_A[i]*analysis.VCurrent[i]
signal_target[i] = target_VCurrent[i]
signal_A[i] = analysis.VCurrent[i]
# выравнивание фаз по максимуму сигнала
index_target = np.argmax(fullV_target)
index_A = np.argmax(fullV_A)
# фазовый сдвиг в отсчетах
phase_shift =index_A-index_target+len(signal_target)
for i in range(len(signal_target)):
i_A = (i+phase_shift)%len(signal_target)
# разница мгновенной мощности
#signal_cmp[i] = np.abs(signal_target[i]-signal_A[i_A])
signal_cmp[i] = (signal_target[i]-signal_A[i_A])**2
return math.fsum(signal_cmp)
if MISFIT_METHOD == 'power_fft':
r = scf.rfft(curr_t*volt_t-curr_a*volt_a)
return math.fsum(r)
if MISFIT_METHOD == 'sko':
r = (curr_t-curr_a)
r2 = np.abs(r)
return math.fsum(r2)
if MISFIT_METHOD == 'ivcmp':
step_IVCurve = analysis_to_IVCurve()
res = ivcmp.CompareIvc(target_IVCurve, step_IVCurve)
return res
###
s = "unknown MISFIT_METHOD = '"+str(MISFIT_METHOD)+"'"
raise RuntimeError(s)
#### ФУНКЦИИ РЕШАТЕЛЯ ########################################################
# вектор, обеспечивающий минимум оптимизируемой функции
Xi_result = np.array([0.,0.,0.,0., 0.,0.,0., 0.,0.,0.,0.])
# текущий найденный минимум оптимизируемой функции
misfit_result = 0.
# результат сравнения найденного минимума по функцией CompareIvc()
ivcmp_result = 0.
# счетчик числа вызовов функции оптимизатором
FitterCount = 0
BestMisfitCount = 0
FITTER_SUCCESS = False
def calculate_misfit(Xi):
generate_circuitFile_by_values(Xi)
process_circuitFile()
misfit = analysis_misfit()
return misfit
# функция вызывается оптимизатором
def fitter_subroutine(Xargs):
global Xi_result,misfit_result,FitterCount,BestMisfitCount,ivcmp_result,FITTER_SUCCESS
FitterCount += 1
xi = Xi_unroll(Xargs)
misfit = calculate_misfit(xi)
if MISFIT_METHOD == 'ivcmp':
ivcmp_result = misfit
#print("fCount="+str(FitterCount)+', misfit='+str(Mscalar)+', Xargs='+str(Xargs))
# первый запуск
if FitterCount<=1:
Xi_result = xi.copy()
misfit_result = misfit
ivcmp_result = analysis_misfit_ivcmp()
BestMisfitCount = 0
#print("fCount="+str(FitterCount)+', mCount='+str(BestMisfitCount)+', misfit='+str(misfit)+', Xargs='+str(Xargs))
# лучший случай
if misfit<misfit_result:
Xi_result = xi.copy()
misfit_result = misfit
ivcmp_result = analysis_misfit_ivcmp()
BestMisfitCount += 1
#print("fCount="+str(FitterCount)+', mCount='+str(BestMisfitCount)+', misfit='+str(misfit)+', Xargs='+str(Xargs))
# дополнительная проверка
if ivcmp_result<=IVCMP_TOLERANCE: # достигли необходимой точности
FITTER_SUCCESS = True
return misfit
#
def fitter_callback(Xk):
global FITTER_SUCCESS
if ivcmp_result<=IVCMP_TOLERANCE: # достигли необходимой точности
FITTER_SUCCESS = True
return True
return False
# запустить автоподбор - сравнение по сумме отклонений точек
def run_fitter(result_cir_file_name='',result_csv_file_name=''):
Xargs = Xi_pack(Xi_long)
for i in range(0,len(Xargs)):
Xargs[i] = 0.
resX = spo.minimize(fitter_subroutine,Xargs,method='Powell',callback=fitter_callback, options={'maxfev':MAXFEV,'xtol':VALUES_TOLERANCE})
if(not result_csv_file_name==''):
spice.SaveFile(analysis, result_csv_file_name)
if(not result_cir_file_name==''):
generate_circuitFile_by_values(resX.x)
return True
### элементарная схема ###
##############################################################################
def Sch_init():
R = HUGE_R# 100.
C = NONE_C# 1e-6
sch = {}
sch['R1'] = R
sch['C1'] = C # [1]
sch['_R_C1'] = NULL_R
sch['_R_D1'] = HUGE_R
sch['R2'] = R
sch['C2'] = C # [5]
sch['_R_C2'] = NULL_R
sch['R3'] = R
sch['C3'] = C #[8]
sch['_R_C3'] = NULL_R
sch['_R_D3'] = HUGE_R
return sch
def Sch_get_Xi(sch):
xi = []
for k in sch:
if(k=='C1')or(k=='C2')or(k=='C3'):
xi+= [C_to_R(sch[k])]
else:
xi += [sch[k]]
return xi
def Sch_load_from_Xi(sch,Xi):
j = 0
for k in sch:
if(k=='C1')or(k=='C2')or(k=='C3'):
sch[k] = R_to_C(Xi[j])
else:
sch[k] = Xi[j]
j += 1
CODE2_COUNT = 4
# ses - сессия варьирования, которую необходимо проинициализировать
# swcode - числовой код,от 0 до 255 включительно, задает положения переключателей
# code2 - дополнительный код, для каждого варианта swcode передавать code2=0,1,2, ...
# до тех пор, пока функция не вернет False
def Session_init_by_approximation(ses,swcode,code2,title=''):
sch = ses['start_sch']
res = Z123_approximation(sch,swcode,code2,title)
Session_set_switchers(ses,swcode)
Session_run1(ses)
return res # функция больше не вызывается
#############################################################################
## ФУНКЦИИ НУЛЕВОГО ПОДБОРА (ПРИСТРЕЛКА) ####################################
#############################################################################
# полное напряжение цепи - до резистора Rcs.
target_fullVoltage = None
# ток цепи, с коррекцией смещения нуля
corrected_VCurrent = None
# ток через нашу упрощенную цепь
def I_from_VR1R2R3(V,R1,R2,R3):
I = V/(INIT_Rcs+R2)
R1 = np.abs(R1)
R2 = np.abs(R2)
R3 = np.abs(R3)
V2 = R2*I
# диод VD1 открыт
if V2>=DIODE_VOLTAGE:
up_part = V*(R1+R2)-R2*DIODE_VOLTAGE
down_part = R1*R2+R1*INIT_Rcs+R2*INIT_Rcs
Id = up_part/down_part
return Id
# диод VD3 открыт
if V2 <=-DIODE_VOLTAGE:
up_part = V*(R3+R2)+R2*DIODE_VOLTAGE
down_part = R3*R2+R3*INIT_Rcs+R2*INIT_Rcs
Id = up_part/down_part
return Id
# случай, когда диоды VD1 и VD3 закрыты - просто закон ома
return I
# сопротивление из известных значений
def R1_from_R2VI(R2,V,I):
I2 = V/(INIT_Rcs+R2)
#диод открыт
if(I2*R2)<(DIODE_VOLTAGE-SMALL_VOLTAGE):
print('R1_from_R2VI() Error: DIODE VD1 CLOSED!!')
raise RuntimeError("R1_from_R2VI() Error: DIODE VD1 CLOSED!!") from None
up_part = R2*(V-I*INIT_Rcs-DIODE_VOLTAGE)
#print('up_part='+str(up_part))
down_part = I*(R2+INIT_Rcs)-V
#print('down_part='+str(down_part))
return up_part/down_part
# сопротивление из известных значений
def R3_from_R2VI(R2,V,I):
I2 = V/(INIT_Rcs+R2)
#диод открыт
if(I2*R2)>-(DIODE_VOLTAGE-SMALL_VOLTAGE):
raise RuntimeError("R3_from_R2VI() Error: DIODE VD3 CLOSED!!") from None
up_part = R2*(V-I*INIT_Rcs+DIODE_VOLTAGE)
down_part = I*(R2+INIT_Rcs)-V
return up_part/down_part
# измерить непосредственно r2
def measure_r2():
v_r2 = target_input_dummy
r_summ = 0.
r_count = 0
for i in range(len(v_r2)):
if(np.abs(v_r2[i])>SMALL_VOLTAGE) and (np.abs(v_r2[i])<DIODE_VOLTAGE):
r_i = V_div_I(v_r2[i],corrected_VCurrent[i])
if(r_i>=HUGE_R):
continue
r_summ += np.abs(r_i)
r_count +=1
#print('r2='+str(r_i))
try:
R = r_summ/r_count
except:
R = HUGE_R
return R
# измерить непосредственно r1
def measure_r1_by_R2(R2):
i = np.argmax(target_fullVoltage)
try:
r = R1_from_R2VI(R2,target_fullVoltage[i],corrected_VCurrent[i])
except:
r = NULL_R
#print('r1='+str(r))
return r
# измерить непосредственно r3
def measure_r3_by_R2(R2):
i = np.argmin(target_fullVoltage)
try:
r = R3_from_R2VI(R2,target_fullVoltage[i],corrected_VCurrent[i])
except:
r = NULL_R
#print('r3='+str(r))
return r
def get_r_high():
i = np.argmax(target_fullVoltage)
r = V_div_I(target_fullVoltage[i],corrected_VCurrent[i])
return r
def get_r_low():
i = np.argmin(target_fullVoltage)
r = V_div_I(target_fullVoltage[i],corrected_VCurrent[i])
return r
def get_r_hight_sub_diode():
i = np.argmax(target_fullVoltage)
r = V_div_I(target_fullVoltage[i]-DIODE_VOLTAGE,corrected_VCurrent[i])
return r
def get_r_low_sub_diode():
i = np.argmin(target_fullVoltage)
r = V_div_I(target_fullVoltage[i]+DIODE_VOLTAGE,corrected_VCurrent[i])
return r
# при вычислений запоминает лучший результат по совпадению
# кривых.
min_r123_misfit = None
min_r123_x = None
def min_r123_subroutine(x):
global min_r123_misfit,min_r123_x
r1 = x[0]
r2 = x[1]
r3 = x[2]
E_r123 = np.zeros_like(target_fullVoltage)
for i in range(len(E_r123)):
I = I_from_VR1R2R3(target_fullVoltage[i],r1,r2,r3)
#E_r123[i] = np.abs(I-corrected_VCurrent[i])
E_r123[i] = (I-corrected_VCurrent[i])**2
Result = math.fsum(E_r123)
if (min_r123_misfit is None)or(Result<min_r123_misfit):
min_r123_x = [r1,r2,r3]
min_r123_misfit = Result
return Result
# измерить смещение нуля в пределах напряжений, где диоды закрыты
def measure_zero_drift():
z_value = 0.
z_count = 0
for i in range(len(target_VCurrent)):
if(np.abs(target_input_dummy[i])<(DIODE_VOLTAGE)):
z_value += target_VCurrent[i]
z_count += 1
try:
z_drift = z_value/z_count
except:
z_drift = 0.
print('z_drift='+str(z_drift))
return z_drift
# проверить границы номиналов емкости,
# установить граничные значения, если выходит за пределы
def C_to_norm(C):
if C<NONE_C:
return NONE_C
if C>HUGE_C:
return HUGE_C
return C
def phase_to_norm(phase):
pass
# инициализация, исходя из того Rcs может быть
# десятки килоОм
Z123_sch = None
def Z123_approximation(sch,swcode,code2,title=''):
global Z123_sch,target_fullVoltage,min_r123_misfit,corrected_VCurrent
if Z123_sch is None:
Z123_sch = Sch_init()
zero_drift = measure_zero_drift()
target_fullVoltage = np.copy(target_VCurrent)
corrected_VCurrent = np.copy(target_VCurrent)
for i in range(len(target_input_dummy)):
target_fullVoltage[i] = target_input_dummy[i]+INIT_Rcs*target_VCurrent[i]
#corrected_VCurrent[i] = target_VCurrent[i]-zero_drift
corrected_VCurrent[i] = target_VCurrent[i]
else:
# копирование
sch['R1'] = Z123_sch['R1']
sch['C1'] = Z123_sch['C1']
sch['R2'] = Z123_sch['R2']
sch['C2'] = Z123_sch['C2']
sch['R3'] = Z123_sch['R3']
sch['C3'] = Z123_sch['C3']
return False # больше не вызывать
########################################################
r2 = measure_r2()
r1 = measure_r1_by_R2(r2)
r3 = measure_r3_by_R2(r2)
# обнуляем пристрелку
min_r123_misfit = None
# варианты значений сопротивлений схем
# основной вариант аналитического приближения, срабатывает почти всегда
min_r123_subroutine([r1,r2,r3])
# разные варианты с меньшей абсолютной погрешностью
# для аналитического приближения
min_r123_subroutine([r1,HUGE_R,r3])
min_r123_subroutine([r1,HUGE_R,measure_r3_by_R2(HUGE_R)])
min_r123_subroutine([measure_r1_by_R2(HUGE_R),HUGE_R,r3])
min_r123_subroutine([measure_r1_by_R2(HUGE_R),HUGE_R,measure_r3_by_R2(HUGE_R)])
min_r123_subroutine([measure_r1_by_R2(HUGE_R),HUGE_R,r3])
min_r123_subroutine([measure_r1_by_R2(HUGE_R),HUGE_R,HUGE_R])
min_r123_subroutine([HUGE_R,HUGE_R,measure_r3_by_R2(HUGE_R)])
# разные варианты с меньшей абсолютной погрешностью
# для приближения диода с идеальной ВАХ
r1_0 = get_r_high()
r1_d = get_r_hight_sub_diode()
r3_0 = get_r_low()
r3_d = get_r_low_sub_diode()
min_r123_subroutine([r1_d,HUGE_R,r3_d])
min_r123_subroutine([r1_d,r3_0,HUGE_R])
min_r123_subroutine([HUGE_R,r1_0,r3_d])
min_r123_subroutine([HUGE_R,r3_0,HUGE_R])
min_r123_subroutine([HUGE_R,r1_0,HUGE_R])
min_r123_subroutine([NULL_R,r2,NULL_R])
min_r123_subroutine([NULL_R,r2,r3])
min_r123_subroutine([r1,r2,NULL_R])
# маловероятно, но пусть будет
min_r123_subroutine([r1,NULL_R,r3])
r1 = np.abs(min_r123_x[0])
r2 = np.abs(min_r123_x[1])
r3 = np.abs(min_r123_x[2])
Rc1 = 1./(1./r1+1./r2)
Rc2 = 1./(1./r1+1./r2+1./r3)
Rc3 = 1./(1./r2+1./r3)
phase_1 = 360*(np.argmax(target_fullVoltage)-np.argmax(target_VCurrent))/MAX_NUM_POINTS
phase_3 = 360*(np.argmin(target_fullVoltage)-np.argmin(target_VCurrent))/MAX_NUM_POINTS
print('phase_1='+str(phase_1))
print('phase_3='+str(phase_3))
phase_1 = np.abs(phase_1)%90
phase_3 = np.abs(phase_3)%90
if phase_1 < 5: phase_1 = 5
if phase_3 < 5: phase_3 = 5
if phase_1 > 85 : phase_1 = 85
if phase_3 > 85: phase_3 = 85
phase_2 = (phase_1+phase_3)/2.
print('phase_1*='+str(phase_1))
print('phase_2*='+str(phase_2))
print('phase_3*='+str(phase_3))
с1 = R_to_C(Rc1*np.cos(phase_1*np.pi/180))
с1 = C_to_norm(с1)
с2 = R_to_C(Rc2*np.cos(phase_2*np.pi/180))
с2 = C_to_norm(с2)
с3 = R_to_C(Rc3*np.cos(phase_3*np.pi/180))
с3 = C_to_norm(с3)
Z123_sch['R1'] = r1
Z123_sch['C1'] = с1
Z123_sch['R2'] = r2
Z123_sch['C2'] = с2
Z123_sch['R3'] = r3
Z123_sch['C3'] = с3
str_0 ='\nr1_o={:2.1e}, r2_o={:2.1e}, r3_o={:2.1e}'.format(r1,r2,r3)
plt.title('Пристрелка '+title+str_0)
plt.plot(target_input_dummy,target_VCurrent,c='red')
print('r1_o = '+str(r1))
print('r2_o = '+str(r2))
print('r3_o = '+str(r3))
print('с1_o = '+str(с1))
print('с2_o = '+str(с2))
print('с3_o = '+str(с3))
curr_r123 = np.zeros_like(target_fullVoltage)
for i in range(len(curr_r123)):
curr_r123[i] = I_from_VR1R2R3(target_fullVoltage[i],r1,r2,r3)
plt.plot(target_input_dummy,curr_r123,c='blue')
plt.legend(['реальные даные','Н.У. подбора'])
plt.show()
# plt.plot(target_input_dummy)
# plt.show()
# plt.plot(target_VCurrent)
# plt.show()
# именно такое копирование, ибо надо сохранить ссылку
sch['R1'] = Z123_sch['R1']
sch['C1'] = Z123_sch['C1']
sch['R2'] = Z123_sch['R2']
sch['C2'] = Z123_sch['C2']
sch['R3'] = Z123_sch['R3']
sch['C3'] = Z123_sch['C3']
return
#############################################################################
#############################################################################
#############################################################################
def Sch_saveToFile(sch,fileName):
global circuit_SessionFileName
s = circuit_SessionFileName
circuit_SessionFileName = fileName
try:
Session_run1(sch)
except:
with open(fileName, 'w') as newF:
json.dump(sch,newF)
print(sch['Xi_variable'])
circuit_SessionFileName = s
return
def init_target_by_Sch(sch):
global Z123_sch
Z123_sch = None
generate_circuitFile_by_values(Sch_get_Xi(sch))
init_target_by_circuitFile()
return
#############################################################################
def Session_create(start_sch):
s = {}
s['start_sch'] = start_sch
return s
# выполнить схему один раз
def Session_run1(session):
global misfit_result, ivcmp_result
try:
sch = session['result_sch']
except KeyError:
sch = session['start_sch']
xi = Sch_get_Xi(sch)
set_circuit_nominals(xi)
session['misfit'] = calculate_misfit(xi)
misfit_result = session['misfit']
if MISFIT_METHOD == 'ivcmp':
ivcmp_result = misfit_result
# запустить подбор для сессии
def Session_run_fitter(session):
global FitterCount
FitterCount = 0
try:
sch = session['result_sch']
except KeyError:
sch = session['start_sch']
else:
session['start_sch']=sch
var_list = session['Xi_variable']
set_circuit_nominals(Sch_get_Xi(sch))
set_Xi_variable(var_list)
try:
run_fitter()
except:
print('NGSPICE EXCEPTION')
sch2 = Sch_init()
Sch_load_from_Xi(sch2, Xi_result)
session['result_sch'] = sch2
session['misfit'] = misfit_result
session['fCount'] = FitterCount
session['mCount'] = BestMisfitCount
# проверить, имеет ли смысл такая установка переключателей в схеме
def is_valid_switchers(swcode):
if swcode & (1+2+3): # все ветви заглушены
return False
# все разыгрывание по заглушенной первой ветке
if (swcode==1)or(swcode==1+8)or(swcode==1+16)or(swcode==1+8+16):
return False
# все разыгрывание по заглушенной второй ветке
if (swcode==2)or(swcode==2+128):
return False
# все разыгрывание по заглушенной третьей ветке
if (swcode==3)or(swcode==3+32)or(swcode==3+64)or(swcode==3+32+64):
return False
return True
# установить переключатели для схемы.
def Session_set_switchers(session, swcode):
sch = session['start_sch']
var_list = []
if swcode & 1: # ветка 1
#print('dnp branch1')
sch['R1'] = HUGE_R
else:
var_list +=['R1']
if swcode & 2: # ветка 2
#print('dnp branch2')
sch['R2'] = HUGE_R
else:
var_list += ['R2']
if swcode & 4: # ветка 3
#print('dnp branch3')
sch['R3'] = HUGE_R
else:
var_list += ['R3']
if swcode & 8: # C1
#print('dnp C1')
sch['_R_C1'] = NULL_R
else:
sch['_R_C1'] = HUGE_R
var_list += ['C1']
if swcode & 16: # D1
#print('dnp D1')
sch['_R_D1'] = NULL_R
else:
sch['_R_D1'] = HUGE_R
if swcode & 32: # C3
#print('dnp C3')
sch['_R_C3'] = NULL_R
else:
sch['_R_C3'] = HUGE_R
var_list += ['C3']
if swcode & 64: # D3
#print('dnp D3')
sch['_R_D3'] = NULL_R
else:
sch['_R_D3'] = HUGE_R
if swcode & 128: # C2
#print('dnp C2')
sch['_R_C2'] = NULL_R
else:
sch['_R_C2'] = HUGE_R
var_list += ['C2']
session['Xi_variable'] = var_list
def Session_processAll(fileName='result.txt'):
global FITTER_SUCCESS,VALUES_TOLERANCE,MAXFEV
FITTER_SUCCESS = False
ses_list = []
best_ses = None
best_misfit = 2 # заведомо большое число
## создаем список сессий для старта
for swcode in range(255):
if not is_valid_switchers(swcode): continue
code2 = 0
next_code2 = True
while next_code2:
sch0 = Sch_init()
ses = Session_create(sch0)
next_code2 = Session_init_by_approximation(ses,swcode,code2,fileName)
code2 += 1
ses_list += [ses]
if ses['misfit']<IVCMP_TOLERANCE: # условие останова удовлетворено
best_ses = ses
best_misfit = best_ses['misfit']
print(ses['start_sch'])
analysis_plot('FITTER SUCCESS')
print('FITTER_SUCCESS!!\nmisfit = '+str(best_misfit))
#Sch_saveToFile(ses['start_sch'], fileName)
Sch_saveToFile(best_ses, fileName)
print('good case!!')
return
## end_for
print('pre init completed')
## сортируем сессии, чтобы начать подбор с наиболее подходящих
ses_list = sorted(ses_list,key=lambda s:s['misfit'])
best_ses = ses_list[0]
best_misfit = best_ses['misfit']
## запускаем автоподбор, пока не будут удовлетворены условия останова
for ses in ses_list:
Session_run_fitter(ses)
if(ses['misfit']<best_misfit):
best_misfit = ses['misfit']
best_ses = ses
print('misfit = '+str(best_misfit))
#print(ses['result_sch'])
#analysis_plot()
if FITTER_SUCCESS:
print(ses['result_sch'])
analysis_plot('FITTER SUCCESS')
print('FITTER_SUCCESS!!\nmisfit = '+str(best_misfit))
#Sch_saveToFile(ses['result_sch'], fileName)
Sch_saveToFile(best_ses, fileName)
return
## end_for
# подбор завершился неудачно, выводим что есть
print('FITTER routine unsuccessfull\nmisfit = '+str(best_ses['misfit']))
#Sch_saveToFile(best_ses['result_sch'], fileName)
Sch_saveToFile(best_ses, fileName)
Session_run1(best_ses)
analysis_plot('FITTER routine unsuccessfull')
def open_board(path):
with open(path, "r") as dump_file:
ivc_real = json.load(dump_file)
return ivc_real
return None
#############################################################################
def test2():
sch = Sch_init()
sch['R1'] = 1e2
sch['C1'] = 1e-5
sch['_R_C1'] = HUGE_R
sch['R3'] = 1e3
init_target_by_Sch(sch)
print('test2()')
Session_processAll('test2.txt')
def test3():
sch = Sch_init()
sch['R2'] = 1e2
sch['_R_C2'] = HUGE_R
sch['C2'] = 1e-7
init_target_by_Sch(sch)
print('test3()')
Session_processAll('test3.txt')
def test4():
sch = Sch_init()
sch['R2'] = 1e2
sch['_R_C2'] = HUGE_R
sch['C2'] = 1e-7
sch['R3'] = 1e3
init_target_by_Sch(sch)
print('test4()')
Session_processAll('test4.txt')
def test5():
sch = Sch_init()
sch['R1'] = 1e2
sch['C1'] = NONE_C
sch['_R_C1'] = NULL_R
sch['_R_D1'] = NULL_R
sch['R2'] = NULL_R
sch['_R_C2'] = HUGE_R
sch['C2'] = 1e-7
init_target_by_Sch(sch)
print('test5()')
Session_processAll('test5.txt')
def test_data_jsn(jsn_data,N,fileName='result.txt'):
global Z123_sch
gc.collect()
print('\n')
print(jsn_data)
InitF, InitV, InitRcs, target_voltages, target_currents = init_target_from_jsnFile(jsn_data, N)
init_target_Data(target_voltages, target_currents, initF=InitF, initV=InitV, initRcs=InitRcs, cycle=100)
Session_processAll(fileName)
def test_circuit(circuitFile,resultFile = 'result.txt'):
gc.collect()
print('\n')
print(circuitFile)
init_target_by_circuitFile(circuitFile)
Session_processAll(resultFile)
##############################################################
def main():
#for k in range(10):
# test_data_jsn("data\\100hz.json",k,'data\\100hz_{}.txt'.format(k))
k = 4
test_data_jsn("vs_circuit_solver\\data\\100khz.json",k,'vs_circuit_solver\\data\\100khz_{}.txt'.format(k))
if __name__=='__main__':
main()
##############################################################################
|
{"hexsha": "3dadbecacffb0a124c3eb501a4f9e43915d34fb7", "size": 37787, "ext": "py", "lang": "Python", "max_stars_repo_path": "vs_circuit_solver/vs_circuit_solver.py", "max_stars_repo_name": "EPC-MSU/VS-circuit-solver", "max_stars_repo_head_hexsha": "7d877dcc6b5aed5919c8f0833fc003a7fa651bc5", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vs_circuit_solver/vs_circuit_solver.py", "max_issues_repo_name": "EPC-MSU/VS-circuit-solver", "max_issues_repo_head_hexsha": "7d877dcc6b5aed5919c8f0833fc003a7fa651bc5", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vs_circuit_solver/vs_circuit_solver.py", "max_forks_repo_name": "EPC-MSU/VS-circuit-solver", "max_forks_repo_head_hexsha": "7d877dcc6b5aed5919c8f0833fc003a7fa651bc5", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.894778481, "max_line_length": 215, "alphanum_fraction": 0.605128748, "include": true, "reason": "import numpy,import scipy", "num_tokens": 12270}
|
import argparse
import os, sys
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision
from torchvision import transforms
from PIL import Image
import numpy as np
from tqdm import tqdm
from sklearn.cluster import KMeans
from scipy.stats import ortho_group
from utlis import set_args, set_optimizer
from utlis import save_model
from utlis import AverageMeter
from utlis import txt_logger
from utlis import adjust_learning_rate
from network.resnet_deconv import SupConResNet
from losses import SupConLoss
from data_utlis import SpatialDataset
from data_utlis import MyTransform
def costomize_args(args):
return args
def set_dataloader(args):
"""use args.dataset decide which dataset to use and return dataloader"""
mytransform = MyTransform(args)
train_transform = mytransform.train_transform(ssl=True)
eval_transform = mytransform.val_transform()
if args.dataset == 'mnist':
train_dataset = torchvision.datasets.MNIST(root=args.loading_path, train=True, download=True,
transform=train_transform)
test_dataset = torchvision.datasets.MNIST(root=args.loading_path, train=False, download=True,
transform=eval_transform)
elif args.dataset == 'spatial':
train_dataset = SpatialDataset(args.data_root, args.data_file_name, return_idx=True, transform=train_transform)
test_dataset = SpatialDataset(args.data_root, args.data_file_name, return_idx=True, transform=eval_transform)
else:
raise NotImplemented("dataset {} is not implemented.".format(args.dataset))
# train loader
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
# test loader
test_dataloader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
return train_dataloader, test_dataloader, train_dataset, test_dataset
def get_model(args, logger):
model = SupConResNet(name=args.model)
criterion = SupConLoss(temperature=args.temp)
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
print("Used devices: {}".format(torch.cuda.device_count()))
model.encoder = torch.nn.DataParallel(model.encoder)
model = model.cuda()
criterion = criterion.cuda()
cudnn.benchmark = True
if args.resume_model_path:
# get pre ssl epoch
ckpt = torch.load(args.resume_model_path, map_location='cpu')
state_dict = ckpt['model']
new_state_dict = {}
for k, v in state_dict.items():
if torch.cuda.device_count() > 1:
print(k)
#if k.split(".")[0] != 'head':
# k = ".".join([k.split(".")[0], "module"] + k.split(".")[1:])
else:
k = k.replace("module.", "")
new_state_dict[k] = v
state_dict = new_state_dict
model.load_state_dict(state_dict)
logger.logger.info("Model loaded! Pretrained from epoch {}".format(args.pre_ssl_epoch))
return model, criterion
def train(train_loader, model, optimizer, epoch, args, scheduler, criterion, label_store):
"""one epoch training
params:
- label_store: [len(train_loader),], a torch tensor, each entry represents its pseudo-class in this train epoch
"""
# TODO: rewrite this and fill all empty lines!
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
for _, (images, _, idx) in tqdm(enumerate(train_loader), total=len(train_loader)):
"""params:
img: [bz, C, H, W]
labels: [bz,]
idx: [bz,]
"""
images = torch.cat([images[0], images[1]], dim=0)
images = images.cuda()
# labels = labels.cuda()
bsz = idx.shape[0]
# convert idx to real labels
labels = torch.LongTensor(label_store[idx]).cuda() # [bz, ]
features = model(images)
f1, f2 = torch.split(features, [bsz, bsz], dim=0)
features = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)], dim=1)
# compute loss
loss = criterion(features, labels)
# update metric
losses.update(loss.item(), bsz)
# SGD
optimizer.zero_grad()
loss.backward()
optimizer.step()
if args.lr_scheduling == 'reduce': # reduce on pleatau
scheduler.step(loss)
return losses.avg
def kmean_cluster(args, model, k, train_loader, label_store):
"""perform clustering on given feture space"""
features_list = []
idxs = []
model.eval()
for _, (images, _, idx) in tqdm(enumerate(train_loader), total=len(train_loader)):
with torch.no_grad():
images = images.cuda()
features = model(images)
features_list.append(features.cpu().numpy())
idxs.append(idx)
features_np = np.concatenate(features_list, axis=0)
idxs = np.concatenate(idxs, axis=0)
print("clustering on ", features_np.shape)
k_means = KMeans(n_clusters=k, n_init=20)
k_means.fit(features_np)
new_labels = k_means.labels_
kmeans_loss = k_means.inertia_
centers = k_means.cluster_centers_
# print("idx max{} min {}; label_store {}".format(idxs.max(), idxs.min(), label_store.shape))
label_store[idxs] = new_labels
print(f"Done clustering (cluster loss) {kmeans_loss} ------------")
return label_store, centers, kmeans_loss
def main():
args = set_args()
args = costomize_args(args)
# log
scalar_logger = txt_logger(args.saving_path, args, 'python ' + ' '.join(sys.argv))
# data loader
train_loader, eval_loader, train_dataset, eval_dataset = set_dataloader(args)
model, criterion = get_model(args, scalar_logger)
optimizer, scheduler = set_optimizer(args, model)
# training routine
# resume model path
if args.resume_model_path:
start = args.pre_ssl_epoch
else:
start = 0
kmeans_losses = []
# k-means clustering for initialization
print("Initialization (K-means) ---------")
label_store = np.zeros(len(train_dataset), dtype=np.int32)
print("---label_store size", label_store.shape)
k_clusters = args.k_clusters
label_store, centers, kmeans_loss = kmean_cluster(args, model, k_clusters, eval_loader, label_store)
kmeans_losses.append(kmeans_loss) # initial kmeans loss
# train
losses = []
print("Begin Training -------------------------")
for epoch in range(start + 1, args.epochs + 1):
# adj learning rate
adjust_learning_rate(args, optimizer, epoch)
# train for one epoch
loss = train(train_loader, model, optimizer, epoch, args, scheduler, criterion, label_store)
losses.append(loss)
# file logger
scalar_logger.log_value(epoch, ('loss', loss),
('learning_rate', optimizer.param_groups[0]['lr']),
('lc_len', len(np.unique(label_store))),
)
if (epoch + 1) % args.save_freq == 0 or epoch == args.epochs:
save_file = os.path.join(
args.saving_path, 'ckpt_epoch_{}.pth'.format(epoch+1))
save_model(model, optimizer, args, epoch, save_file)
# save loss
np.save(os.path.join(args.saving_path, "train_loss.npy"), losses)
# save latent class assignment
save_file_lat_class_assign = os.path.join(args.saving_path, 'latent_class_{}.npy'.format(epoch+1))
np.save(save_file_lat_class_assign, label_store)
# log latent class
unique_latent_class = np.unique(label_store)
latent_class_stats = {}
for i in unique_latent_class:
latent_class_stats[i] = np.where(label_store == i)[0].shape[0]
scalar_logger.log_value(epoch, ('lc_assign', latent_class_stats))
if (epoch + 1) % args.kmeans_freq == 0:
print("Perform (K-means) ---------")
label_store, centers, kmeans_loss = kmean_cluster(args, model, k_clusters, eval_loader, label_store)
kmeans_losses.append(kmeans_loss)
np.save(os.path.join(args.saving_path, "Kmeans_loss.npy"), kmeans_losses)
return
if __name__ == '__main__':
main()
|
{"hexsha": "43d1043317898b4642b6c414573dc3fda193df8f", "size": 8685, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/trainMyPCL.py", "max_stars_repo_name": "Crazy-Jack/SpatialExpGeneCluster", "max_stars_repo_head_hexsha": "9e57c308d1c577a936a2358d0641c65b8130034f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/trainMyPCL.py", "max_issues_repo_name": "Crazy-Jack/SpatialExpGeneCluster", "max_issues_repo_head_hexsha": "9e57c308d1c577a936a2358d0641c65b8130034f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/trainMyPCL.py", "max_forks_repo_name": "Crazy-Jack/SpatialExpGeneCluster", "max_forks_repo_head_hexsha": "9e57c308d1c577a936a2358d0641c65b8130034f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8884297521, "max_line_length": 119, "alphanum_fraction": 0.6324697755, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1929}
|
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Auto Mode TF Cross Layer Equalization """
from typing import Tuple, List, Union, Dict
import numpy as np
import tensorflow as tf
import libpymo
from aimet_tensorflow.common.connectedgraph import ConnectedGraph
from aimet_tensorflow.common.operation import Op
from aimet_tensorflow.batch_norm_fold import fold_all_batch_norms
from aimet_tensorflow.utils.graph_saver import save_and_load_graph
from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils
import aimet_tensorflow.utils.op.relu as ReluUtils
from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils
from aimet_common.utils import AimetLogger
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.CrosslayerEqualization)
ScaleFactor = Union[np.ndarray, Tuple[np.ndarray]]
ClsSet = Union[Tuple[tf.Operation, tf.Operation],
Tuple[tf.Operation, tf.Operation, tf.Operation]]
class GraphSearchUtils:
""" Implements graph search utils required by CLE feature"""
def __init__(self, model: tf.Graph, start_op_names: Union[str, List[str]], output_op_names: Union[str, List[str]]):
if isinstance(start_op_names, str):
start_op_names = [start_op_names]
if isinstance(output_op_names, str):
output_op_names = [output_op_names]
self._connected_graph = ConnectedGraph(model, start_op_names, output_op_names)
def find_and_replace_relu6_with_relu(self, sess: tf.Session) -> tf.Session:
"""
finds and replaces Relu6 ops with Relu
:return: updated session
"""
for op in self._connected_graph.get_all_ops().values():
if op.type in ['Relu6']:
# send the session here, so we make the update on sess.graph (active graph)
ReluUtils.replace_relu6_with_relu(sess, op.get_module())
# in the end update the session
after_relu_replace_sess = save_and_load_graph('./replace_relu6_with_relu', sess)
return after_relu_replace_sess
@staticmethod
def find_downstream_layer_groups_to_scale(op, layer_groups, visited_nodes, current_group=None):
"""
Populates all the layer groups eligible for cross layer scaling
:param op: starting op
:param layer_groups: layer_groups as empty list
:param visited_nodes: all the ops that have been visited
:param current_group: op groups
:return: None. Updates layer_groups[] if groups are found.
"""
if not current_group:
current_group = []
if op in visited_nodes:
return
visited_nodes.append(op)
logger.debug("Visiting node: {%s}", op.dotted_name)
# If current node is Conv2D, add to the current group
if op.type in ['Conv2D', 'DepthwiseConv2dNative']:
current_group.append(op)
# Terminating condition for current group
if not (op.type in ['Conv2D', 'DepthwiseConv2dNative', 'Relu', 'Pad', 'Identity']):
if (len(current_group) > 1) and (current_group not in layer_groups):
layer_groups.append(current_group)
node_set = [op.dotted_name for op in current_group]
logger.debug("Added new set of nodes: {%s}", node_set)
current_group = []
if op.output:
for consumer in op.output.consumers:
GraphSearchUtils.find_downstream_layer_groups_to_scale(consumer, layer_groups, visited_nodes,
current_group)
# Reached a leaf.. See if the current group has something to grab
if (len(current_group) > 1) and (current_group not in layer_groups):
layer_groups.append(current_group)
node_set = [op.dotted_name for op in current_group]
logger.debug("Added new set of nodes: {%s}", node_set)
def find_layer_groups_to_scale_as_conn_ops(self) -> List[List[Op]]:
"""
:return: List of groups of layers. Each group can be independently equalized
"""
# Find the input node(s) in the graph
input_nodes = []
for op in self._connected_graph.get_all_ops().values():
if op.inputs and op.inputs[0].is_model_input:
input_nodes.append(op)
layer_groups = []
visited_nodes = []
for op in input_nodes:
self.find_downstream_layer_groups_to_scale(op=op, layer_groups=layer_groups,
visited_nodes=visited_nodes)
return layer_groups
def find_layer_groups_to_scale(self):
"""
Find layer groups for scaling as tf ops
:return: groups for scaling as tf ops
"""
layer_groups_as_conn_graph_ops = self.find_layer_groups_to_scale_as_conn_ops()
layer_groups_as_tf_ops = self.convert_conn_graph_ops_to_tf_op(layer_groups_as_conn_graph_ops)
return layer_groups_as_tf_ops
@staticmethod
def convert_conn_graph_ops_to_tf_op(op_groups: List[List[Op]]) -> \
List[List[tf.Operation]]:
"""
Helper function to get op list as tf.Operation type to be usable for updating/scaling weights and biases
using generic apis for tensor updates.
:param op_groups: list of op groups as TfOperation type of used by Connected Graph
:return: lis of op groups as tf.Operation (standard TF op type)
"""
layer_groups_as_tf_ops = []
for ops in op_groups:
curr_group = []
for op in ops:
curr_group.append(op.get_module())
layer_groups_as_tf_ops.append(curr_group)
return layer_groups_as_tf_ops
@staticmethod
def convert_layer_group_to_cls_sets(layer_group):
"""
Helper function to convert a layer group to a list of cls sets
:param layer_group: Given layer group to convert
:return: List of cls sets
"""
cls_sets = []
prev_layer_to_scale = layer_group.pop(0)
while layer_group:
next_layer_to_scale = layer_group.pop(0)
if next_layer_to_scale.type in ['DepthwiseConv2dNative']:
next_non_depthwise_conv_layer = layer_group.pop(0)
cls_sets.append((prev_layer_to_scale, next_layer_to_scale, next_non_depthwise_conv_layer))
prev_layer_to_scale = next_non_depthwise_conv_layer
else:
cls_sets.append((prev_layer_to_scale, next_layer_to_scale))
prev_layer_to_scale = next_layer_to_scale
return cls_sets
@staticmethod
def is_relu_activation_present_in_cls_sets(cls_sets: List[ClsSet]) -> List[bool]:
"""
check if there is Relu activations between cls sets
:param cls_sets: cls conv op pairs
:return: list of relu activation preset flags(True or False)
corresponding to input cls_sets list
"""
is_relu_activation_in_cls_sets = []
for cls_set in cls_sets:
# We need to check activation functions for all layers but the last one in the set
# Because we are only interested in checking activation functions between the layers we will scale
cls_set = cls_set[:-1]
is_relu_activation_in_cls_set = ()
for conv_op in cls_set:
is_relu_activation_in_cls_set += (ReluUtils.does_conv_have_relu_activation(conv_op), )
if len(is_relu_activation_in_cls_set) == 1:
is_relu_activation_in_cls_set = is_relu_activation_in_cls_set[0]
is_relu_activation_in_cls_sets.append(is_relu_activation_in_cls_set)
return is_relu_activation_in_cls_sets
@staticmethod
def map_op_names_to_ops(sess: tf.Session) -> Dict[str, tf.Operation]:
"""
After the fold and cls , the graph is updated, so are the ops
So, we need a way to map ops we stored on graph we began with, to perform
high bias fold operation on latest ops in the updated graph.
:param sess: active TF session (tf.Session type)
:return: a dictionary of op names mapped to ops in the given new session.
Note : only stores infor pertaining to bn and conv ops required by high bias fold.
"""
tf_names_op_dict = {}
with sess.graph.as_default():
op_list = sess.graph.get_operations()
for op in op_list:
if op.type in ['Conv2D', 'DepthwiseConv2dNative', 'FusedBatchNormV3']:
tf_names_op_dict[op.name] = op
return tf_names_op_dict
class ClsSetInfo:
"""
This class hold information about the layers in a CLS set, along with corresponding scaling factors
for CLS set layers
"""
class ClsSetLayerPairInfo:
"""
Models a pair of layers that were scaled using CLS. And related information.
:param layer1: layer as tf.Operation
:param layer2: layer as tf.Operation
:param scale_factor: scale factors as np.ndarray
:param relu_activation_between_layers: list of flags per layer set indicating\
if they have Relu activations in-between.
"""
def __init__(self, layer1: tf.Operation, layer2: tf.Operation,
scale_factor: np.ndarray, relu_activation_between_layers):
self.layer1 = layer1
self.layer2 = layer2
self.scale_factor = scale_factor
self.relu_activation_between_layers = relu_activation_between_layers
def __init__(self, cls_pair_1: ClsSetLayerPairInfo, cls_pair_2: ClsSetLayerPairInfo = None):
if cls_pair_2:
self.cls_pair_info_list = [cls_pair_1, cls_pair_2]
else:
self.cls_pair_info_list = [cls_pair_1]
@staticmethod
def map_cls_sets_to_new_session(tf_names_op_dict: Dict[str, tf.Operation], cls_set_info_list):
"""
Helper function to updates ops stored during cls to be used by high bias fold with updated session.
:param tf_names_op_dict: map of tf op names to ops
:param cls_set_info_list: list of ClsSetInfo type
:return: None /cls_set_info_list updated in-place
"""
for cls_set_info in cls_set_info_list:
for cls_pair_info in cls_set_info.cls_pair_info_list:
# refresh the ops, so we can perform high bias fold with info saved during cls.
cls_pair_info.layer1 = tf_names_op_dict[cls_pair_info.layer1.name]
cls_pair_info.layer2 = tf_names_op_dict[cls_pair_info.layer2.name]
class CrossLayerScaling:
""" implements auto mode cross-layer-scaling technique to a model """
@staticmethod
def scale_cls_sets(sess: tf.Session, cls_sets: List[ClsSet]) -> List[ScaleFactor]:
"""
Scale multiple CLS sets
:param sess: Current session
:param cls_sets: List of CLS sets
:return: Scaling factors calculated and applied for each CLS set in order
"""
scale_factor_list = []
for cls_set in cls_sets:
scale_factor = CrossLayerScaling.scale_cls_set(sess, cls_set)
scale_factor_list.append(scale_factor)
return scale_factor_list
@staticmethod
def scale_cls_set(sess: tf.Session, cls_set: ClsSet) -> ScaleFactor:
"""
Scale a CLS set
:param sess: Current session
:param cls_set: Either a pair or regular conv layers or a triplet of depthwise separable layers
:return: Scaling factor calculated and applied
"""
if len(cls_set) == 3:
scale_factor = CrossLayerScaling.scale_cls_set_with_depthwise_layers(sess, cls_set)
else:
scale_factor = CrossLayerScaling.scale_cls_set_with_conv_layers(sess, cls_set)
return scale_factor
@staticmethod
def scale_cls_set_with_conv_layers(model: tf.Session, cls_set: Tuple[tf.Operation, tf.Operation]) -> np.ndarray:
"""
API to invoke equalize layer params (update for weights and bias is in place)
:param model: active TF session
:param cls_set: Consecutive Conv layers Tuple whose weights and biases need to be equalized
:return: Scaling factor S_12 for each conv layer pair: numpy array
"""
with model.graph.as_default():
for module in cls_set:
if module.type not in ['Conv2D', 'DepthwiseConv2dNative']:
raise ValueError("Only conv layers are supported for cross layer equalization")
# Create structs for holding layer weights and bias parameters
prev_layer_params = libpymo.EqualizationParams()
curr_layer_params = libpymo.EqualizationParams()
# send as [Noc, Nic, kh, kw], TF format is [kh, kw, Nic, Noc]
prev_layer_params.weight = WeightTensorUtils.get_tensor_as_numpy_data(model, cls_set[0]). \
transpose((3, 2, 0, 1)).reshape(-1)
weight_shape = WeightTensorUtils.get_tensor_shape(cls_set[0])
prev_layer_params.weightShape = [weight_shape[3], weight_shape[2], weight_shape[0], weight_shape[1]]
prev_layer_params.isBiasNone = BiasUtils.is_bias_none(cls_set[0])
# send as [Noc, Nic, kh, kw], TF format is [kh, kw, Nic, Noc]
curr_layer_params.weight = WeightTensorUtils.get_tensor_as_numpy_data(model, cls_set[1]). \
transpose((3, 2, 0, 1)).reshape(-1)
weight_shape = WeightTensorUtils.get_tensor_shape(cls_set[1])
curr_layer_params.weightShape = [weight_shape[3], weight_shape[2], weight_shape[0], weight_shape[1]]
if not BiasUtils.is_bias_none(cls_set[0]):
prev_layer_params.bias = BiasUtils.get_bias_as_numpy_data(model, cls_set[0]).reshape(-1)
else:
prev_layer_params.isBiasNone = True
scaling_factor = libpymo.scaleLayerParams(prev_layer_params, curr_layer_params)
# convert received formats back to TF
# TF format is [kh, kw, Nic, Noc]
numpy_weight_reshaped = np.reshape(prev_layer_params.weight, prev_layer_params.weightShape). \
transpose((2, 3, 1, 0))
WeightTensorUtils.update_tensor_for_op(model, cls_set[0], numpy_weight_reshaped)
numpy_weight_reshaped = np.reshape(curr_layer_params.weight, curr_layer_params.weightShape). \
transpose((2, 3, 1, 0))
WeightTensorUtils.update_tensor_for_op(model, cls_set[1], numpy_weight_reshaped)
if not BiasUtils.is_bias_none(cls_set[0]):
numpy_bias_reshaped = np.reshape(prev_layer_params.bias, BiasUtils.get_shape(cls_set[0]))
BiasUtils.update_bias_for_op(model, cls_set[0], numpy_bias_reshaped)
return scaling_factor
@staticmethod
def scale_cls_set_with_depthwise_layers(model: tf.Session, cls_set: Tuple[tf.Operation,
tf.Operation,
tf.Operation]) ->\
[np.ndarray, np.ndarray]:
"""
API to invoke equalize layer params for depth wise separable layers(update for weights and bias is in place)
:param model: active tf session
:param cls_set: Consecutive Conv layers whose weights and biases need to be equalized.
Second Conv layer is a depth-wise conv and third conv layer is point-wise conv
:return: Scaling factors S_12 and S_23 : numpy arrays
"""
# make sure you define the session and graph scope before making any graph updates.
with model.graph.as_default():
for module in cls_set:
if module.type not in ['Conv2D', 'DepthwiseConv2dNative']:
raise ValueError("Only conv layers are supported for cross layer equalization")
# Create structs for holding layer weights and bias parameters
prev_layer_params = libpymo.EqualizationParams()
curr_layer_params = libpymo.EqualizationParams()
next_layer_params = libpymo.EqualizationParams()
# send as [Noc, Nic, kh, kw], TF format is [kh, kw, Nic, Noc]
prev_layer_params.weight = WeightTensorUtils.get_tensor_as_numpy_data(model, cls_set[0]). \
transpose((3, 2, 0, 1)).reshape(-1)
weight_shape = WeightTensorUtils.get_tensor_shape(cls_set[0])
prev_layer_params.weightShape = [weight_shape[3], weight_shape[2], weight_shape[0], weight_shape[1]]
prev_layer_params.isBiasNone = BiasUtils.is_bias_none(cls_set[0])
# depthwise layer outputs is set to 1 in TF
# send as [Nic, Noc, kh, kw], TF format is [kh, kw, Nic, Noc]
curr_layer_params.weight = WeightTensorUtils.get_tensor_as_numpy_data(model, cls_set[1]). \
transpose((2, 3, 0, 1)).reshape(-1)
weight_shape = WeightTensorUtils.get_tensor_shape(cls_set[1])
# depthwise layer outputs is set to 1 in TF
# send as [Nic, Noc, kh, kw], TF format is [kh, kw, Nic, Noc]
curr_layer_params.weightShape = [weight_shape[2], weight_shape[3], weight_shape[0], weight_shape[1]]
curr_layer_params.isBiasNone = BiasUtils.is_bias_none(cls_set[1])
# send as [Noc, Nic, kh, kw] , TF format is [kh, kw, Nic, Noc]
next_layer_params.weight = WeightTensorUtils.get_tensor_as_numpy_data(model, cls_set[2]). \
transpose((3, 2, 0, 1)).reshape(-1)
weight_shape = WeightTensorUtils.get_tensor_shape(cls_set[2])
next_layer_params.weightShape = [weight_shape[3], weight_shape[2], weight_shape[0], weight_shape[1]]
if not BiasUtils.is_bias_none(cls_set[0]):
prev_layer_params.bias = BiasUtils.get_bias_as_numpy_data(model, cls_set[0]).reshape(-1)
else:
prev_layer_params.isBiasNone = True
if not BiasUtils.is_bias_none(cls_set[1]):
curr_layer_params.bias = BiasUtils.get_bias_as_numpy_data(model, cls_set[1]).reshape(-1)
else:
curr_layer_params.isBiasNone = True
scaling_params = libpymo.scaleDepthWiseSeparableLayer(prev_layer_params, curr_layer_params,
next_layer_params)
# convert received formats back to TF
# TF format is [kh, kw, Nic, Noc]
numpy_weight_reshaped_0 = np.reshape(prev_layer_params.weight, prev_layer_params.weightShape). \
transpose((2, 3, 1, 0))
WeightTensorUtils.update_tensor_for_op(model, cls_set[0], numpy_weight_reshaped_0)
# depthwise layer
numpy_weight_reshaped_1 = np.reshape(curr_layer_params.weight, curr_layer_params.weightShape). \
transpose((2, 3, 0, 1))
WeightTensorUtils.update_tensor_for_op(model, cls_set[1], numpy_weight_reshaped_1)
numpy_weight_reshaped_2 = np.reshape(next_layer_params.weight, next_layer_params.weightShape). \
transpose((2, 3, 1, 0))
WeightTensorUtils.update_tensor_for_op(model, cls_set[2], numpy_weight_reshaped_2)
if not BiasUtils.is_bias_none(cls_set[0]):
numpy_bias_reshaped = np.reshape(prev_layer_params.bias, BiasUtils.get_shape(cls_set[0]))
BiasUtils.update_bias_for_op(model, cls_set[0], numpy_bias_reshaped)
if not BiasUtils.is_bias_none(cls_set[1]):
numpy_bias_reshaped = np.reshape(curr_layer_params.bias, BiasUtils.get_shape(cls_set[1]))
BiasUtils.update_bias_for_op(model, cls_set[1], numpy_bias_reshaped)
return scaling_params.scalingMatrix12, scaling_params.scalingMatrix23
@staticmethod
def create_cls_set_info_list(cls_sets: List[ClsSet], scale_factors: List[ScaleFactor],
is_relu_activation_in_cls_sets):
"""
Binds information from there separate lists into one [ClsInfoSet] data-structure
:param cls_sets: List of CLS sets
:param scale_factors: Scale-factors for each cls-set
:param is_relu_activation_in_cls_sets: Information if there is relu activation in each cls-set
:return: List of ClsSetInfo
"""
cls_set_info_list = []
assert len(cls_sets) == len(scale_factors) == len(is_relu_activation_in_cls_sets)
for index, cls_set in enumerate(cls_sets):
if isinstance(scale_factors[index], tuple):
# If we are dealing with a triplet of layers, then we should have 2 scale factors and 2 relu flags
# Assert that this is true
assert len(cls_set) == 3
assert len(scale_factors[index]) == len(is_relu_activation_in_cls_sets[index]) == 2
cls_pair_1 = ClsSetInfo.ClsSetLayerPairInfo(cls_set[0], cls_set[1], scale_factors[index][0],
is_relu_activation_in_cls_sets[index][0])
cls_pair_2 = ClsSetInfo.ClsSetLayerPairInfo(cls_set[1], cls_set[2], scale_factors[index][1],
is_relu_activation_in_cls_sets[index][1])
cls_set_info = ClsSetInfo(cls_pair_1, cls_pair_2)
else:
cls_pair = ClsSetInfo.ClsSetLayerPairInfo(cls_set[0], cls_set[1], scale_factors[index],
is_relu_activation_in_cls_sets[index])
cls_set_info = ClsSetInfo(cls_pair)
cls_set_info_list.append(cls_set_info)
return cls_set_info_list
@staticmethod
def scale_model(sess: tf.Session, input_op_names: Union[str, List[str]], output_op_names: Union[str, List[str]])\
-> (tf.Session, List[ClsSetInfo]):
"""
Uses cross-layer scaling to scale all applicable layers in the given model
:param sess: Session containing graph to scale
:param input_op_names: Names of starting ops in the model
:param output_op_names: List of output op names of the model, used to help ConnectedGraph determine valid ops
(to ignore training ops for example). If None, all ops in the model are considered valid.
:return: updated session, CLS information for each CLS set
"""
if isinstance(input_op_names, str):
input_op_names = [input_op_names]
if isinstance(output_op_names, str):
output_op_names = [output_op_names]
# Find layer groups
graph_search = GraphSearchUtils(sess.graph, input_op_names, output_op_names)
layer_groups_as_tf_ops = graph_search.find_layer_groups_to_scale()
# Find cls sets from the layer groups
cls_sets = []
for layer_group in layer_groups_as_tf_ops:
cls_set = graph_search.convert_layer_group_to_cls_sets(layer_group)
cls_sets += cls_set
# Scale the CLS sets
scale_factors = CrossLayerScaling.scale_cls_sets(sess, cls_sets)
# Find if there were relu activations between layers of each cls set
is_relu_activation_in_cls_sets = graph_search.is_relu_activation_present_in_cls_sets(cls_sets)
# Convert to a list of cls-set-info elements
cls_set_info_list = CrossLayerScaling.create_cls_set_info_list(cls_sets, scale_factors,
is_relu_activation_in_cls_sets)
# save and load the updated graph after scaling
after_cls_sess = save_and_load_graph('./temp_cls', sess)
return after_cls_sess, cls_set_info_list
class HighBiasFold:
"""
Class to apply the high-bias-fold technique to a given model
"""
ActivationIsReluForFirstModule = bool
ScaleForFirstModule = np.ndarray
@staticmethod
def get_bn_params_for_bias_fold(sess: tf.Session, bn_op: tf.Operation, scaling_parameter: np.ndarray):
"""
:param sess: active TF session
:param bn_op: tf Operation type fused batchnorm op.
:param scaling_parameter: scaling param as np.ndarray
:return: bn_params as BNParamsHighBiasFold type.
"""
bn_params = libpymo.BNParamsHighBiasFold()
# Scaling gamma and beta parameter of batch norm layer
gamma = BNUtils.get_gamma_as_numpy_data(sess, bn_op).reshape(-1)
bn_params.gamma = np.divide(gamma, scaling_parameter)
beta = BNUtils.get_beta_as_numpy_data(sess, bn_op).reshape(-1)
bn_params.beta = np.divide(beta, scaling_parameter)
return bn_params
@staticmethod
def _refresh_layer_set_info_before_hbf(sess: tf.Session,
folded_pairs: List[Tuple[tf.Operation, tf.Operation]],
cls_set_info_list: List[ClsSetInfo])\
-> (List[ClsSetInfo], Dict[str, tf.Operation]):
"""
As the tensorflow session gets updated, info on op references need to be refreshed.
:param folded_pairs: bn conv op pairs saved during batchnorm fold.
:param cls_set_info_list: conv layer info saved during cross layer scaling
:return: refreshes both data sets to reflect references on new TF session.
"""
bn_dict = {}
dict_names_to_tf_ops = GraphSearchUtils.map_op_names_to_ops(sess)
# update info saved during batchnorm fold
for conv_bn in folded_pairs:
# get the new op ref from it's name
bn_dict[conv_bn[0].name] = dict_names_to_tf_ops[conv_bn[1].name]
# update info saved during cls
ClsSetInfo.map_cls_sets_to_new_session(dict_names_to_tf_ops, cls_set_info_list)
return cls_set_info_list, bn_dict
@staticmethod
def bias_fold(sess: tf.Session, folded_pairs: List[Tuple[tf.Operation, tf.Operation]],
cls_set_info_list: List[ClsSetInfo]) -> tf.Session:
"""
Folds bias values greater than 3 * sigma to next layer's bias
:param sess: Current session
:param folded_pairs: Key: Conv/Linear layer Value: Corresponding folded BN layer
:param cls_set_info_list: List of info elements for each cls set
:return: updated session after graph updates from hbf
"""
with sess.graph.as_default():
# refresh the references saved during bn fold and cls.
cls_set_info_list, bn_layers = HighBiasFold._refresh_layer_set_info_before_hbf(sess, folded_pairs,
cls_set_info_list)
if not bn_layers:
logger.error('High Bias folding is not supported for models without BatchNorm Layers')
return sess
for cls_set_info in cls_set_info_list:
for cls_pair_info in cls_set_info.cls_pair_info_list:
# check if we have a corresponding bn layer
if cls_pair_info.layer1.name in bn_layers.keys():
# check if bias present in given conv2D(s)
if BiasUtils.is_bias_none(cls_pair_info.layer1) or BiasUtils.is_bias_none(cls_pair_info.layer2):
continue
prev_layer_params = libpymo.LayerParams()
curr_layer_params = libpymo.LayerParams()
scaling_parameter = cls_pair_info.scale_factor
prev_layer_bn_params =\
HighBiasFold.get_bn_params_for_bias_fold(sess,
bn_layers[cls_pair_info.layer1.name],
scaling_parameter)
prev_layer_params.activationIsRelu = cls_pair_info.relu_activation_between_layers
prev_layer_params.bias =\
BiasUtils.get_bias_as_numpy_data(sess, cls_pair_info.layer1).reshape(-1)
prev_bias_shape = BiasUtils.get_shape(cls_pair_info.layer1)
weight_shape = WeightTensorUtils.get_tensor_shape(cls_pair_info.layer1)
prev_layer_params.weightShape = [weight_shape[3], weight_shape[2], weight_shape[0],
weight_shape[1]]
curr_layer_params.bias =\
BiasUtils.get_bias_as_numpy_data(sess, cls_pair_info.layer2).reshape(-1)
curr_bias_shape = BiasUtils.get_shape(cls_pair_info.layer2)
weight_shape = WeightTensorUtils.get_tensor_shape(cls_pair_info.layer2)
# Handle depthwise layer case
# for a depthwise layer num outputs is set to 1 in TF
# send as [Nic, Noc, kh, kw], TF format is [kh, kw, Nic, Noc]
if cls_pair_info.layer2.type in ['DepthwiseConv2dNative']:
c_wt = WeightTensorUtils.get_tensor_as_numpy_data(
sess, cls_pair_info.layer2).transpose((2, 3, 0, 1))
curr_layer_params.weight = c_wt.reshape(-1)
curr_layer_params.weightShape = [weight_shape[2], weight_shape[3], weight_shape[0],
weight_shape[1]]
else:
# send as [Noc, Nic, kh, kw], TF format is [kh, kw, Nic, Noc]
c_wt = WeightTensorUtils.get_tensor_as_numpy_data(
sess, cls_pair_info.layer2).transpose((3, 2, 0, 1))
curr_layer_params.weight = c_wt.reshape(-1)
curr_layer_params.weightShape = [weight_shape[3], weight_shape[2], weight_shape[0],
weight_shape[1]]
libpymo.updateBias(prev_layer_params, curr_layer_params, prev_layer_bn_params)
BiasUtils.update_bias_for_op(sess, cls_pair_info.layer1, np.reshape(prev_layer_params.bias,
prev_bias_shape))
BiasUtils.update_bias_for_op(sess, cls_pair_info.layer2, np.reshape(curr_layer_params.bias,
curr_bias_shape))
else:
logger.info("skipping layer: {%s}", cls_pair_info.layer1.name)
# save and load the updated graph after high bias fold update
aftr_hbf_sess = save_and_load_graph('./temp_hbf', sess)
return aftr_hbf_sess
def equalize_model(sess: tf.Session, start_op_names: Union[str, List[str]],
output_op_names: Union[str, List[str]]) -> tf.Session:
"""
High-level API to perform Cross-Layer Equalization (CLE) on the given model. The model is equalized in place.
:param sess: tf Session with model to equalize
:param start_op_names: Names of starting ops in the given model
:param output_op_names: List of output op names of the model, used to help ConnectedGraph determine valid ops
(to ignore training ops for example).
:return: updated session after bn fold, cls and hbf.
"""
if not isinstance(start_op_names, (str, List)):
logger.error('start op names must be passed as a string or a List of strings')
if isinstance(start_op_names, str):
start_op_names = [start_op_names]
# fold batchnorm layers
after_bn_fold_sess, folded_pairs = fold_all_batch_norms(sess, start_op_names, output_op_names)
# replace any ReLU6 layers with ReLU
graph_util = GraphSearchUtils(after_bn_fold_sess.graph, start_op_names, output_op_names)
after_relu_replace_sess = graph_util.find_and_replace_relu6_with_relu(after_bn_fold_sess)
# perform cross-layer scaling on applicable layer sets
after_cls_sess, cls_set_info_list = CrossLayerScaling.scale_model(after_relu_replace_sess, start_op_names,
output_op_names)
# high-bias fold
after_hbf_sess = HighBiasFold.bias_fold(after_cls_sess, folded_pairs, cls_set_info_list)
return after_hbf_sess
|
{"hexsha": "d16d855fd4f368cdc0850dd99146f277d3de54b0", "size": 34558, "ext": "py", "lang": "Python", "max_stars_repo_path": "TrainingExtensions/tensorflow/src/python/aimet_tensorflow/cross_layer_equalization.py", "max_stars_repo_name": "quic-bharathr/aimet", "max_stars_repo_head_hexsha": "363308217dca3fc52644bdda31e69e356397adaf", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-08-23T13:00:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T10:52:36.000Z", "max_issues_repo_path": "TrainingExtensions/tensorflow/src/python/aimet_tensorflow/cross_layer_equalization.py", "max_issues_repo_name": "4ant00ra/aimet", "max_issues_repo_head_hexsha": "c6ffd3c31c290fe0913b50831d58534f6df61d76", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TrainingExtensions/tensorflow/src/python/aimet_tensorflow/cross_layer_equalization.py", "max_forks_repo_name": "4ant00ra/aimet", "max_forks_repo_head_hexsha": "c6ffd3c31c290fe0913b50831d58534f6df61d76", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-06T18:40:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-06T18:40:33.000Z", "avg_line_length": 46.7, "max_line_length": 120, "alphanum_fraction": 0.6381156317, "include": true, "reason": "import numpy", "num_tokens": 7335}
|
{- Byzantine Fault Tolerant Consensus Verification in Agda, version 0.9.
Copyright (c) 2021, Oracle and/or its affiliates.
Licensed under the Universal Permissive License v 1.0 as shown at https://opensource.oracle.com/licenses/upl
-}
open import LibraBFT.Concrete.Records
open import LibraBFT.Concrete.System
open import LibraBFT.Concrete.System.Parameters
import LibraBFT.Concrete.Properties.Common as Common
import LibraBFT.Concrete.Properties.VotesOnce as VO
open import LibraBFT.Impl.Consensus.Network as Network
open import LibraBFT.Impl.Consensus.Network.Properties as NetworkProps
open import LibraBFT.Impl.Consensus.RoundManager
import LibraBFT.Impl.Handle as Handle
open import LibraBFT.Impl.Handle.Properties
open import LibraBFT.Impl.IO.OBM.InputOutputHandlers
open import LibraBFT.Impl.IO.OBM.Properties.InputOutputHandlers
open import LibraBFT.Impl.Properties.Common
open import LibraBFT.ImplShared.Consensus.Types
open import LibraBFT.ImplShared.Consensus.Types.EpochDep
open import LibraBFT.ImplShared.Interface.Output
open import LibraBFT.ImplShared.Util.Crypto
open import LibraBFT.ImplShared.Util.Dijkstra.All
open ReachableSystemStateProps
open import LibraBFT.Impl.Properties.Util
open import Optics.All
open import Util.Lemmas
open import Util.PKCS
open import Util.Prelude
open Invariants
open RoundManagerTransProps
open import LibraBFT.Abstract.Types.EpochConfig UID NodeId
open ParamsWithInitAndHandlers Handle.InitHandler.initAndHandlers
open import LibraBFT.ImplShared.Util.HashCollisions Handle.InitHandler.initAndHandlers
open import Yasm.Yasm ℓ-RoundManager ℓ-VSFP ConcSysParms
Handle.InitHandler.initAndHandlers
PeerCanSignForPK PeerCanSignForPK-stable
open Structural impl-sps-avp
open import LibraBFT.Impl.Handle.InitProperties
open initHandlerSpec
-- This module proves the two "VotesOnce" proof obligations for our handler.
module LibraBFT.Impl.Properties.VotesOnce (𝓔 : EpochConfig) where
------------------------------------------------------------------------------
newVote⇒lv≡
: ∀ {pre : SystemState}{pid s' acts v m pk}
→ ReachableSystemState pre
→ StepPeerState pid (msgPool pre) (initialised pre) (peerStates pre pid) (s' , acts)
→ v ⊂Msg m
→ send m ∈ acts
→ (sig : WithVerSig pk v)
→ Meta-Honest-PK pk
→ ¬ (∈BootstrapInfo-impl fakeBootstrapInfo (ver-signature sig))
→ ¬ MsgWithSig∈ pk (ver-signature sig) (msgPool pre)
→ LastVoteIs s' v
newVote⇒lv≡ {pid = pid} preach (step-init rm×acts uni) v⊂m send∈acts sig hpk ¬bootstrap ¬mws∈pool
with initHandlerSpec.contract pid fakeBootstrapInfo rm×acts
...| init-contract
with initHandlerSpec.ContractOk.isInitPM init-contract send∈acts
...| (_ , refl , noSigs)
with v⊂m
...| vote∈qc vs∈qc v≈rbld qc∈pm = ⊥-elim (noSigs vs∈qc qc∈pm)
newVote⇒lv≡{pre}{pid}{s'}{v = v}{m}{pk} preach sps@(step-msg{sndr , nm} m∈pool ini) (vote∈qc{vs}{qc} vs∈qc v≈rbld qc∈m) m∈acts sig hpk ¬bootstrap ¬msb4
with cong _vSignature v≈rbld
...| refl = ⊥-elim ∘′ ¬msb4 $ qcVoteSigsSentB4-handle pid preach sps m∈acts qc∈m sig vs∈qc v≈rbld ¬bootstrap
newVote⇒lv≡{pre}{pid}{v = v} preach (step-msg{sndr , P pm} m∈pool ini) vote∈vm m∈acts sig hpk ¬bootstrap ¬msb4
with handleProposalSpec.contract! 0 pm (msgPool pre) (peerStates pre pid)
...| handleProposalSpec.mkContract _ invalidProposal _ vac _ _
-- TODO-2: DRY fail. This pattern arises several times in this file, where we need to know that
-- the proposal being processed is valid, and to use handleProposalSpec to derive a contradiction
-- if is not. Some are identical, some are not.
with BlockId-correct? (pm ^∙ pmProposal)
...| no ¬validProposal = ⊥-elim (sendVote∉actions {outs = handleOuts} {st = handlePre}
(sym (proj₂ $ invalidProposal ¬validProposal)) m∈acts)
where
handlePre = peerStates pre pid
handleOuts = LBFT-outs (handle pid (P pm) 0) (peerStates pre pid)
...| yes refl
with vac refl (nohc preach m∈pool pid ini (invariantsCorrect pid pre ini preach) refl refl)
...| Voting.mkVoteAttemptCorrectWithEpochReq (inj₁ (_ , voteUnsent)) sdEpoch≡? =
⊥-elim (¬voteUnsent voteUnsent)
where
handleOuts = LBFT-outs (handle pid (P pm) 0) (peerStates pre pid)
¬voteUnsent : ¬ Voting.VoteUnsentCorrect (peerStates pre pid) _ _ _ _
¬voteUnsent (Voting.mkVoteUnsentCorrect noVoteMsgOuts _) =
sendVote∉actions{outs = handleOuts}{st = peerStates pre pid}
(sym noVoteMsgOuts) m∈acts
...| Voting.mkVoteAttemptCorrectWithEpochReq (inj₂ (Voting.mkVoteSentCorrect (VoteMsg∙new v' _) rcvr voteMsgOuts vgCorrect)) sdEpoch≡? =
sentVoteIsPostLV
where
handlePost = LBFT-post (handle pid (P pm) 0) (peerStates pre pid)
handleOuts = LBFT-outs (handle pid (P pm) 0) (peerStates pre pid)
sentVoteIsPostLV : LastVoteIs handlePost v
sentVoteIsPostLV
with Voting.VoteGeneratedCorrect.state vgCorrect
...| RoundManagerTransProps.mkVoteGenerated lv≡v _
rewrite sym lv≡v
= cong (just ∘ _^∙ vmVote) (sendVote∈actions{outs = handleOuts}{st = peerStates pre pid} (sym voteMsgOuts) m∈acts)
newVote⇒lv≡{pre}{pid}{s' = s'}{v = v} preach (step-msg{sndr , V vm} m∈pool ini) vote∈vm m∈outs sig hpk ¬bootstrap ¬msb4 =
⊥-elim (sendVote∉actions{outs = hvOut}{st = hvPre} (sym noVotes) m∈outs)
where
hvPre = peerStates pre pid
hvOut = LBFT-outs (handleVote 0 vm) hvPre
open handleVoteSpec.Contract (handleVoteSpec.contract! 0 vm (msgPool pre) hvPre)
------------------------------------------------------------------------------
oldVoteRound≤lvr
: ∀ {pid pk v}{pre : SystemState}
→ (r : ReachableSystemState pre)
→ Meta-Honest-PK pk
→ (sig : WithVerSig pk v)
→ ¬ (∈BootstrapInfo-impl fakeBootstrapInfo (ver-signature sig))
→ MsgWithSig∈ pk (ver-signature sig) (msgPool pre)
→ PeerCanSignForPK pre v pid pk
→ (peerStates pre pid) ^∙ rmEpoch ≡ (v ^∙ vEpoch)
→ v ^∙ vRound ≤ Meta.getLastVoteRound ((peerStates pre pid) ^∙ pssSafetyData-rm)
oldVoteRound≤lvr{pid} (step-s preach step@(step-peer{pid'} sp@(step-cheat cmc))) hpk sig ¬bootstrap mws∈pool pcsfpk epoch≡
-- `pid`'s state is untouched by this step
rewrite cheatStepDNMPeerStates₁{pid = pid'}{pid' = pid} sp unit
= oldVoteRound≤lvr preach hpk sig ¬bootstrap mws∈prePool pcsfpkPre epoch≡
where
-- The cheat step could not have been where the signed message was introduced,
-- so there must be a signed message in the pool prior to this
mws∈prePool = ¬cheatForgeNew sp refl unit hpk mws∈pool (¬subst ¬bootstrap (msgSameSig mws∈pool))
-- `pid` can sign for the message in the previous system state
pcsfpkPre = PeerCanSignForPKProps.msb4 preach step pcsfpk hpk sig mws∈prePool
oldVoteRound≤lvr{pid}{v = v} step*@(step-s{pre = pre}{post = post@._} preach step@(step-peer{pid'} sp@(step-honest{st = ppost}{outs} sps))) hpk sig ¬bootstrap mws∈pool pcsfpk epoch≡
with msgSameSig mws∈pool
...| refl
with newMsg⊎msgSentB4 preach sps hpk (msgSigned mws∈pool) ¬bootstrap (msg⊆ mws∈pool) (msg∈pool mws∈pool)
...| Right msb4 = helpSentB4
where
pcsfpkPre : PeerCanSignForPK pre v pid _
pcsfpkPre = PeerCanSignForPKProps.msb4 preach step pcsfpk hpk sig msb4
ovrHyp : peerStates pre pid ^∙ rmEpoch ≡ v ^∙ vEpoch → v ^∙ vRound ≤ Meta.getLastVoteRound ((peerStates pre pid) ^∙ pssSafetyData-rm)
ovrHyp ep≡ = oldVoteRound≤lvr{pre = pre} preach hpk sig ¬bootstrap msb4 pcsfpkPre ep≡
helpSentB4 : v ^∙ vRound ≤ Meta.getLastVoteRound ((peerStates post pid) ^∙ pssSafetyData-rm)
helpSentB4
with pid ≟ pid'
-- A step by `pid'` step cannot affect `pid`'s state
...| no pid≢
rewrite sym (pids≢StepDNMPeerStates{pre = pre} sps pid≢)
= ovrHyp epoch≡
...| yes refl = ≤-trans (ovrHyp epochPre≡) lvr≤
where
-- If a vote signed by a peer exists in the past, and that vote has an
-- epoch id associated to it that is the same as the peer's post-state
-- epoch, then the peer has that same epoch id in its immediately preceding
-- pre-state.
epochPre≡ : peerStates pre pid ^∙ rmEpoch ≡ v ^∙ vEpoch
epochPre≡ =
ReachableSystemStateProps.mws∈pool⇒epoch≡{v = v}{ppost}{outs} preach sps
pcsfpkPre hpk sig ¬bootstrap msb4 epoch≡'
where
open ≡-Reasoning
epoch≡' : ppost ^∙ rmEpoch ≡ v ^∙ vEpoch
epoch≡' = begin
ppost ^∙ rmEpoch ≡⟨ cong (_^∙ rmEpoch) (StepPeer-post-lemma sp) ⟩
peerStates (StepPeer-post{pre = pre} sp) pid' ^∙ rmEpoch ≡⟨ epoch≡ ⟩
v ^∙ vEpoch ∎
ini : initialised pre pid' ≡ initd
ini = ReachableSystemStateProps.mws∈pool⇒initd preach pcsfpkPre hpk sig ¬bootstrap msb4
lvr≤ : Meta.getLastVoteRound ((peerStates pre pid) ^∙ pssSafetyData-rm) ≤ Meta.getLastVoteRound ((peerStates post pid) ^∙ pssSafetyData-rm)
lvr≤
rewrite sym (StepPeer-post-lemma{pre = pre} sp)
= lastVotedRound-mono pid' pre preach ini sps
(trans epochPre≡ (sym epoch≡))
-- The vote was newly sent this round
...| Left (m∈outs , pcsfpkPost , ¬msb4)
-- ... and it really is the same vote, because there has not been a hash collision
with sameSig⇒sameVoteData (msgSigned mws∈pool) sig (msgSameSig mws∈pool)
...| Left nonInjSHA256 = ⊥-elim (PerReachableState.meta-no-collision step* nonInjSHA256)
...| Right refl
with PeerCanSignForPKProps.pidInjective pcsfpk pcsfpkPost refl
...| refl = ≡⇒≤ vr≡lvrPost
where
vr≡lvrPost : v ^∙ vRound ≡ Meta.getLastVoteRound ((peerStates (StepPeer-post sp) pid) ^∙ pssSafetyData-rm)
vr≡lvrPost
rewrite sym (StepPeer-post-lemma sp)
with newVote⇒lv≡{pre = pre}{pid = pid} preach sps (msg⊆ mws∈pool) m∈outs (msgSigned mws∈pool) hpk ¬bootstrap ¬msb4
...| lastVoteIsJust
with ppost ^∙ pssSafetyData-rm ∙ sdLastVote
...| nothing = absurd (just _ ≡ nothing) case lastVoteIsJust of λ ()
...| just _ rewrite just-injective (sym lastVoteIsJust) = refl
------------------------------------------------------------------------------
sameERasLV⇒sameId-lem₁ :
∀ {pid pid' pk s acts}{pre : SystemState}
→ ReachableSystemState pre
→ (sp : StepPeer pre pid' s acts)
→ ∀ {v v'} → Meta-Honest-PK pk
→ PeerCanSignForPK (StepPeer-post sp) v pid pk
→ (sig' : WithVerSig pk v') → ¬ (∈BootstrapInfo-impl fakeBootstrapInfo (ver-signature sig'))
→ (mws : MsgWithSig∈ pk (ver-signature sig') (msgPool pre))
→ v ≡L v' at vEpoch → v ≡L v' at vRound
→ Σ[ mws ∈ MsgWithSig∈ pk (ver-signature sig') (msgPool pre) ]
(¬ ∈BootstrapInfo-impl fakeBootstrapInfo (ver-signature ∘ msgSigned $ mws)
× PeerCanSignForPK pre v pid pk
× v ≡L msgPart mws at vEpoch
× v ≡L msgPart mws at vRound
× msgPart mws ≡L v' at vProposedId)
sameERasLV⇒sameId-lem₁{pid}{pid'}{pk}{pre = pre} rss sp {v}{v'} hpk pcsfpk sig' ¬bootstrap mws ≡epoch ≡round =
mws , ¬bootstrap' , pcsfpkPre
, trans ≡epoch (cong (_^∙ vdProposed ∙ biEpoch) (sym ≡voteData))
, trans ≡round (cong (_^∙ vdProposed ∙ biRound) (sym ≡voteData))
, cong (_^∙ vdProposed ∙ biId) ( ≡voteData)
where
-- That message has the same signature as `v'`, so it has the same vote data
-- (unless there was a collision, which we currently assume does not occur).
≡voteData : msgPart mws ≡L v' at vVoteData
≡voteData = ⊎-elimˡ (PerReachableState.meta-no-collision rss) (sameSig⇒sameVoteData sig' (msgSigned mws) (sym ∘ msgSameSig $ mws))
¬bootstrap' : ¬ ∈BootstrapInfo-impl fakeBootstrapInfo (ver-signature ∘ msgSigned $ mws)
¬bootstrap' rewrite msgSameSig mws = ¬bootstrap
-- The peer can sign for `v` now, so it can sign for `v` in the preceeding
-- step, because there is an honestly signed message part for the peer's pubkey in the
-- current epoch already in the pool.
pcsfpkPre : PeerCanSignForPK pre v pid pk
pcsfpkPre = PeerCanSignForPKProps.msb4-eid≡ rss (step-peer sp) hpk pcsfpk ≡epoch sig' mws
sameERasLV⇒sameId
: ∀ {pid pid' pk}{st : SystemState}
→ ReachableSystemState st
→ ∀{v v' m'} → Meta-Honest-PK pk
→ just v ≡ peerStates st pid ^∙ pssSafetyData-rm ∙ sdLastVote
→ PeerCanSignForPK st v pid pk
→ v' ⊂Msg m' → (pid' , m') ∈ (msgPool st)
→ (sig' : WithVerSig pk v') → ¬ (∈BootstrapInfo-impl fakeBootstrapInfo (ver-signature sig'))
→ v ≡L v' at vEpoch → v ≡L v' at vRound
→ v ≡L v' at vProposedId
-- Cheat steps cannot be where an honestly signed message originated.
sameERasLV⇒sameId{pid}{pid'}{pk} (step-s{pre = pre} rss (step-peer sp@(step-cheat cmc))) {v}{v'}{m'} hpk ≡pidLV pcsfpk v'⊂m' m'∈pool sig' ¬bootstrap ≡epoch ≡round
with sameERasLV⇒sameId-lem₁ rss sp hpk pcsfpk sig' ¬bootstrap mws ≡epoch ≡round
where
-- Track down the honestly signed message which existed before.
mws : MsgWithSig∈ pk (ver-signature sig') (msgPool pre)
mws = ¬cheatForgeNew sp refl unit hpk (mkMsgWithSig∈ m' v' v'⊂m' pid' m'∈pool sig' refl) ¬bootstrap
...| mws , ¬bootstrap' , pcsfpkPre , ≡epoch' , ≡round' , v'id≡ =
trans (sameERasLV⇒sameId rss hpk ≡pidLVPre pcsfpkPre (msg⊆ mws) (msg∈pool mws) (msgSigned mws) ¬bootstrap' ≡epoch' ≡round') v'id≡
where
-- The state of `pid` is unchanged
≡pidLVPre : just v ≡ peerStates pre pid ^∙ pssSafetyData-rm ∙ sdLastVote
≡pidLVPre = trans ≡pidLV (cong (_^∙ pssSafetyData-rm ∙ sdLastVote) (cheatStepDNMPeerStates₁ sp unit))
-- Initialization steps cannot be where an honestly signed message originated
sameERasLV⇒sameId {pid} {pid'} {pk}
(step-s rss step@(step-peer{pre = pre} sp@(step-honest{pid“} sps@(step-init {rm} rm×acts uni))))
{v} {v'} {m'} hpk ≡pidLV pcsfpk v'⊂m' m'∈pool sig' ¬bootstrap ≡epoch ≡round
with pid ≟ pid“
-- If this isn't `pid`, the step does not affect `pid`'s state
...| no pid≢
rewrite sym $ pids≢StepDNMPeerStates{pre = pre} sps pid≢
= sameERasLV⇒sameId rss hpk ≡pidLV pcsfpkPre v'⊂m' (m'∈poolb4 v'⊂m') sig' ¬bootstrap ≡epoch ≡round
where
m'∈poolb4 : v' ⊂Msg m' → (pid' , m') ∈ (msgPool pre)
m'∈poolb4 v'⊂m'
with Any-++⁻ _ m'∈pool
...| inj₂ x = x
...| inj₁ x
with initHandlerSpec.contract pid“ fakeBootstrapInfo rm×acts
...| init-contract
with initHandlerSpec.ContractOk.isInitPM init-contract (proj₁ (senderMsgPair∈⇒send∈ _ x))
...| (pm , refl , noSigs)
with v'⊂m'
...| vote∈qc vs∈qc v≈rbld qc∈nm
= ⊥-elim (noSigs vs∈qc qc∈nm)
mws : MsgWithSig∈ pk (ver-signature sig') (msgPool pre)
mws = mkMsgWithSig∈ _ _ v'⊂m' _ (m'∈poolb4 v'⊂m') sig' refl
pcsfpkPre : PeerCanSignForPK pre v pid pk
pcsfpkPre = PeerCanSignForPKProps.msb4-eid≡ rss step hpk pcsfpk ≡epoch sig' mws
-- If this is `pid`, the last vote cannot be a `just`!
...| yes refl
rewrite sym (StepPeer-post-lemma sp)
with initHandlerSpec.contract pid fakeBootstrapInfo rm×acts
...| init-contract
with initHandlerSpec.ContractOk.sdLVnothing init-contract
...| lv≡nothing
= absurd just v ≡ nothing case trans ≡pidLV lv≡nothing of λ ()
sameERasLV⇒sameId{pid}{pid'}{pk} (step-s rss (step-peer{pre = pre} sp@(step-honest{pid“} sps@(step-msg{sndr , m} m∈pool ini)))) {v}{v'} hpk ≡pidLV pcsfpk v'⊂m' m'∈pool sig' ¬bootstrap ≡epoch ≡round
with newMsg⊎msgSentB4 rss sps hpk sig' ¬bootstrap v'⊂m' m'∈pool
-- The message has been sent before
...| Right mws'
with sameERasLV⇒sameId-lem₁ rss sp hpk pcsfpk sig' ¬bootstrap mws' ≡epoch ≡round
...| mws , ¬bootstrap' , pcsfpkPre , ≡epoch' , ≡round' , v'id≡
with pid ≟ pid“
-- If this isn't `pid`, the step does not affect `pid`'s state
...| no pid≢
rewrite sym $ pids≢StepDNMPeerStates{pre = pre} sps pid≢
= trans (sameERasLV⇒sameId rss hpk ≡pidLV pcsfpkPre (msg⊆ mws) (msg∈pool mws) (msgSigned mws) ¬bootstrap' ≡epoch' ≡round') v'id≡
-- This is `pid`, so we need to know what message it was processing
...| yes refl
rewrite sym $ StepPeer-post-lemma{pre = pre} sp
= trans (sameERasLV⇒sameId rss hpk (≡pidLVPre m m∈pool ≡pidLV) pcsfpkPre (msg⊆ mws) (msg∈pool mws) (msgSigned mws) ¬bootstrap' ≡epoch' ≡round') v'id≡
where
≡pidLVPre : (m : NetworkMsg) → (sndr , m) ∈ msgPool pre → just v ≡ LBFT-post (handle pid m 0) (peerStates pre pid) ^∙ pssSafetyData-rm ∙ sdLastVote → just v ≡ peerStates pre pid ^∙ pssSafetyData-rm ∙ sdLastVote
-- Last vote doesn't change when processing a vote message
≡pidLVPre (V vm) m∈pool ≡pidLV = begin
just v ≡⟨ ≡pidLV ⟩
hvPos ^∙ pssSafetyData-rm ∙ sdLastVote ≡⟨ cong (_^∙ sdLastVote) (sym noSDChange) ⟩
hvPre ^∙ pssSafetyData-rm ∙ sdLastVote ∎
where
open ≡-Reasoning
hvPre = peerStates pre pid
hvPos = LBFT-post (handleVote 0 vm) hvPre
hvOut = LBFT-outs (handleVote 0 vm) hvPre
open handleVoteSpec.Contract (handleVoteSpec.contract! 0 vm (msgPool pre) hvPre)
-- Commit messages are only for reasoning about correctness
≡pidLVPre (C cm) m∈pool ≡pidLV = ≡pidLV
≡pidLVPre (P pm) m∈pool ≡pidLV = analyzeVoteAttempt
where
hpPre = peerStates pre pid“
hpPos = LBFT-post (handleProposal 0 pm) hpPre
open handleProposalSpec.Contract (handleProposalSpec.contract! 0 pm (msgPool pre) hpPre)
renaming (rmInv to rmInvP)
open Invariants.RoundManagerInv (invariantsCorrect pid“ pre ini rss)
-- when the last vote is the same in pre and post states
module OldVote (lv≡ : hpPre ≡L hpPos at pssSafetyData-rm ∙ sdLastVote) where
open ≡-Reasoning
≡pidLVPre₁ : just v ≡ hpPre ^∙ pssSafetyData-rm ∙ sdLastVote
≡pidLVPre₁ = begin
just v ≡⟨ ≡pidLV ⟩
hpPos ^∙ pssSafetyData-rm ∙ sdLastVote ≡⟨ sym lv≡ ⟩
hpPre ^∙ pssSafetyData-rm ∙ sdLastVote ∎
-- When a new vote is generated, its round is strictly greater than that of the previous vote we attempted to send.
module NewVote
(vote : Vote) (lv≡v : just vote ≡ hpPos ^∙ pssSafetyData-rm ∙ sdLastVote)
(lvr< : hpPre [ _<_ ]L hpPos at pssSafetyData-rm ∙ sdLastVotedRound)
(lvr≡ : vote ^∙ vRound ≡ hpPos ^∙ pssSafetyData-rm ∙ sdLastVotedRound)
(sdEpoch≡ : hpPre ^∙ pssSafetyData-rm ∙ sdEpoch ≡ pm ^∙ pmProposal ∙ bEpoch)
(blockTriggered : Voting.VoteMadeFromBlock vote (pm ^∙ pmProposal))
where
-- `vote` comes from the peer handler contract
v≡vote : v ≡ vote
v≡vote = just-injective $ begin
just v ≡⟨ ≡pidLV ⟩
hpPos ^∙ pssSafetyData-rm ∙ sdLastVote ≡⟨ sym lv≡v ⟩
just vote ∎
where open ≡-Reasoning
-- The round of `v'` must be less than the round of the vote stored in `sdLastVote`
rv'≤lvrPre : v' ^∙ vRound ≤ Meta.getLastVoteRound (hpPre ^∙ pssSafetyData-rm)
rv'≤lvrPre = oldVoteRound≤lvr rss hpk sig' ¬bootstrap mws pcsfpkPre'
(ReachableSystemStateProps.mws∈pool⇒epoch≡ rss (step-msg m∈pool ini)
pcsfpkPre' hpk sig' ¬bootstrap mws ≡epoch“)
where
pcsfpkPre' = peerCanSignEp≡ pcsfpkPre ≡epoch
open ≡-Reasoning
≡epoch“ : hpPos ^∙ rmEpoch ≡ v' ^∙ vEpoch
≡epoch“ = begin
hpPos ^∙ rmEpoch ≡⟨ sym noEpochChange ⟩
hpPre ^∙ rmEpoch ≡⟨ rmEpochsMatch ⟩
hpPre ^∙ pssSafetyData-rm ∙ sdEpoch ≡⟨ sdEpoch≡ ⟩
pm ^∙ pmProposal ∙ bEpoch ≡⟨ sym $ Voting.VoteMadeFromBlock.epoch≡ blockTriggered ⟩
vote ^∙ vEpoch ≡⟨ cong (_^∙ vEpoch) (sym v≡vote) ⟩
v ^∙ vEpoch ≡⟨ ≡epoch ⟩
v' ^∙ vEpoch ∎
rv'<rv : v' [ _<_ ]L v at vRound
rv'<rv = begin
(suc $ v' ^∙ vRound) ≤⟨ s≤s rv'≤lvrPre ⟩
(suc $ Meta.getLastVoteRound (hpPre ^∙ pssSafetyData-rm)) ≤⟨ s≤s lvRound≤ ⟩
(suc $ hpPre ^∙ pssSafetyData-rm ∙ sdLastVotedRound) ≤⟨ lvr< ⟩
hpPos ^∙ pssSafetyData-rm ∙ sdLastVotedRound ≡⟨ sym lvr≡ ⟩
vote ^∙ vRound ≡⟨ sym (cong (_^∙ vRound) v≡vote) ⟩
v ^∙ vRound ∎
where
open ≤-Reasoning
open SafetyDataInv (SafetyRulesInv.sdInv rmSafetyRulesInv)
analyzeVoteAttempt : just v ≡ peerStates pre pid ^∙ pssSafetyData-rm ∙ sdLastVote
analyzeVoteAttempt
with BlockId-correct? (pm ^∙ pmProposal)
...| no ¬validProposal rewrite sym (proj₁ (invalidProposal ¬validProposal)) = ≡pidLV
...| yes refl
with voteAttemptCorrect refl (nohc rss m∈pool pid ini (invariantsCorrect pid pre ini rss) refl refl)
...| Voting.mkVoteAttemptCorrectWithEpochReq (Left (_ , Voting.mkVoteUnsentCorrect noVoteMsgOuts nvg⊎vgusc)) sdEpoch≡?
with nvg⊎vgusc
...| Left (mkVoteNotGenerated lv≡ lvr≤) = OldVote.≡pidLVPre₁ lv≡
...| Right (Voting.mkVoteGeneratedUnsavedCorrect vote (Voting.mkVoteGeneratedCorrect (mkVoteGenerated lv≡v voteSrc) blockTriggered))
with voteSrc
...| Left (mkVoteOldGenerated lvr≡ lv≡) = OldVote.≡pidLVPre₁ lv≡
...| Right (mkVoteNewGenerated lvr< lvr≡) =
⊥-elim (<⇒≢ (NewVote.rv'<rv vote lv≡v lvr< lvr≡ sdEpoch≡? blockTriggered) (sym ≡round))
analyzeVoteAttempt | yes refl | Voting.mkVoteAttemptCorrectWithEpochReq (Right (Voting.mkVoteSentCorrect vm pid voteMsgOuts vgCorrect)) sdEpoch≡?
with vgCorrect
...| Voting.mkVoteGeneratedCorrect (mkVoteGenerated lv≡v voteSrc) blockTriggered
with voteSrc
...| Left (mkVoteOldGenerated lvr≡ lv≡) = OldVote.≡pidLVPre₁ lv≡
...| Right (mkVoteNewGenerated lvr< lvr≡) =
⊥-elim (<⇒≢ (NewVote.rv'<rv (vm ^∙ vmVote) lv≡v lvr< lvr≡ sdEpoch≡? blockTriggered) (sym ≡round))
-- This is the origin of the message
sameERasLV⇒sameId{pid}{pid'}{pk} (step-s rss (step-peer{pre = pre} sp@(step-honest{pid“} sps@(step-msg{sndr , m} m∈pool ini)))) {v}{v'} hpk ≡pidLV pcsfpk v'⊂m' m'∈pool sig' ¬bootstrap ≡epoch ≡round
| Left (m'∈acts , pcsfpk' , ¬msb4)
-- So `pid“` must be `pid`
with PeerCanSignForPKProps.pidInjective pcsfpk pcsfpk' ≡epoch
...| refl
with v'⊂m'
-- QC vote signatures have been sent before
...| vote∈qc{qc = qc} vs∈qc v≈ qc∈m'
rewrite cong _vSignature v≈
= ⊥-elim ∘′ ¬msb4 $ qcVoteSigsSentB4-handle pid rss sps m'∈acts qc∈m' sig' vs∈qc v≈ ¬bootstrap
...| vote∈vm
rewrite sym $ StepPeer-post-lemma{pre = pre} sp
= sameId m m∈pool m'∈acts ≡pidLV
where
handlePre = peerStates pre pid
handleOuts : NetworkMsg → List Output
handleOuts m = LBFT-outs (handle sndr m 0) handlePre
handlePst : NetworkMsg → RoundManager
handlePst m = LBFT-post (handle sndr m 0) handlePre
sameId : ∀ {sndr} m → (sndr , m) ∈ msgPool pre
→ send (V (VoteMsg∙new v' _)) ∈ outputsToActions{State = handlePre} (handleOuts m)
→ just v ≡ handlePst m ^∙ pssSafetyData-rm ∙ sdLastVote → v ≡L v' at vProposedId
sameId (P pm) m∈pool m'∈acts ≡pidLV = analyzeVoteAttempt
where
open handleProposalSpec.Contract (handleProposalSpec.contract! 0 pm (msgPool pre) handlePre)
analyzeVoteAttempt : v ≡L v' at vProposedId
analyzeVoteAttempt
with BlockId-correct? (pm ^∙ pmProposal)
...| no ¬validProposal = ⊥-elim (sendVote∉actions {outs = handleOuts (P pm)} {st = handlePre}
(sym (proj₂ $ invalidProposal ¬validProposal)) m'∈acts)
...| yes refl
with voteAttemptCorrect refl (nohc rss m∈pool pid ini (invariantsCorrect pid pre ini rss) refl refl)
...| Voting.mkVoteAttemptCorrectWithEpochReq (Left (_ , vuc)) sdEpoch≡? =
⊥-elim (sendVote∉actions {outs = handleOuts (P pm)} {st = handlePre} (sym $ Voting.VoteUnsentCorrect.noVoteMsgOuts vuc) m'∈acts)
...| Voting.mkVoteAttemptCorrectWithEpochReq (Right (Voting.mkVoteSentCorrect vm pid voteMsgOuts vgCorrect)) sdEpoch≡?
with vgCorrect
...| Voting.mkVoteGeneratedCorrect (mkVoteGenerated lv≡v voteSrc) blockTriggered = cong (_^∙ vProposedId) v≡v'
where
open ≡-Reasoning
v≡v' : v ≡ v'
v≡v' = just-injective $ begin
just v
≡⟨ ≡pidLV ⟩
(handlePst (P pm) ^∙ pssSafetyData-rm ∙ sdLastVote)
≡⟨ sym lv≡v ⟩
just (vm ^∙ vmVote)
≡⟨ cong (just ∘ _^∙ vmVote) (sym $ sendVote∈actions{outs = handleOuts (P pm)}{st = handlePre} (sym voteMsgOuts) m'∈acts) ⟩
just v' ∎
sameId (V vm) _ m'∈acts ≡pidLV =
⊥-elim (sendVote∉actions {outs = hvOuts} {st = peerStates pre pid} (sym noVotes) m'∈acts)
where
hvOuts = LBFT-outs (handleVote 0 vm) (peerStates pre pid)
open handleVoteSpec.Contract (handleVoteSpec.contract! 0 vm (msgPool pre) handlePre)
sameId (C x) _ ()
------------------------------------------------------------------------------
votesOnce₁ : Common.IncreasingRoundObligation Handle.InitHandler.initAndHandlers 𝓔
votesOnce₁ {pid = pid} {pid'} {pk = pk} {pre = pre} preach
(step-init {rm} rm×acts uni)
{v} {m} {v'} {m'} hpk v⊂MsgPpm m∈acts sig ¬bootstrap ¬msb pcspkv v'⊂m' m'∈pool sig' ¬bootstrap' eid≡
with initHandlerSpec.contract pid fakeBootstrapInfo rm×acts
...| init-contract
with initHandlerSpec.ContractOk.isInitPM init-contract m∈acts
...| (_ , _ , noSigs)
with v⊂MsgPpm
...| vote∈qc vs∈qc v≈rbld qc∈nm
= ⊥-elim (noSigs vs∈qc qc∈nm)
votesOnce₁ {pid = pid} {pid'} {pk = pk} {pre = pre} preach sps@(step-msg {sndr , P pm} m∈pool ini) {v} {m} {v'} {m'} hpk (vote∈qc {vs} {qc} vs∈qc v≈rbld qc∈m) m∈acts sig ¬bootstrap ¬msb pcspkv v'⊂m' m'∈pool sig' ¬bootstrap' eid≡
with cong _vSignature v≈rbld
...| refl = ⊥-elim ∘′ ¬msb $ qcVoteSigsSentB4-handle pid preach sps m∈acts qc∈m sig vs∈qc v≈rbld ¬bootstrap
votesOnce₁ {pid = pid} {pid'} {pk = pk} {pre = pre} preach sps@(step-msg {sndr , P pm} m∈pool ini) {v} {.(V (VoteMsg∙new v _))} {v'} {m'} hpk vote∈vm m∈acts sig ¬bootstrap ¬msb pcspkv v'⊂m' m'∈pool sig' ¬bootstrap' eid≡
with handleProposalSpec.contract! 0 pm (msgPool pre) (peerStates pre pid)
...| handleProposalSpec.mkContract _ invProp noEpochChange vac _ _
with BlockId-correct? (pm ^∙ pmProposal)
...| no ¬validProposal = ⊥-elim (sendVote∉actions {outs = hpOut} {st = hpPre} (sym (proj₂ $ invProp ¬validProposal)) m∈acts )
where
hpPre = peerStates pre pid
hpOut = LBFT-outs (handleProposal 0 pm) hpPre
...| yes refl
with vac refl (nohc preach m∈pool pid ini (invariantsCorrect pid pre ini preach) refl refl)
...| Voting.mkVoteAttemptCorrectWithEpochReq (inj₁ (_ , Voting.mkVoteUnsentCorrect noVoteMsgOuts nvg⊎vgusc)) sdEpoch≡? =
⊥-elim (sendVote∉actions{outs = LBFT-outs (handleProposal 0 pm) (peerStates pre pid)}{st = peerStates pre pid} (sym noVoteMsgOuts) m∈acts)
...| Voting.mkVoteAttemptCorrectWithEpochReq (inj₂ (Voting.mkVoteSentCorrect vm pid₁ voteMsgOuts vgCorrect)) sdEpoch≡?
with sendVote∈actions{outs = LBFT-outs (handleProposal 0 pm) (peerStates pre pid)}{st = peerStates pre pid} (sym voteMsgOuts) m∈acts
...| refl = ret
where
-- Some definitions
step = step-peer (step-honest sps)
rmPre = peerStates pre pid
rmPost = peerStates (StepPeer-post{pre = pre} (step-honest sps)) pid
-- State invariants
rmInvs = invariantsCorrect pid pre ini preach
open RoundManagerInv rmInvs
-- Properties of `handleProposal`
postLV≡ : just v ≡ (rmPost ^∙ pssSafetyData-rm ∙ sdLastVote)
postLV≡ =
trans (RoundManagerTransProps.VoteGenerated.lv≡v ∘ Voting.VoteGeneratedCorrect.state $ vgCorrect)
(cong (_^∙ pssSafetyData-rm ∙ sdLastVote) (StepPeer-post-lemma (step-honest sps)))
-- The proof
m'mwsb : MsgWithSig∈ pk (ver-signature sig') (msgPool pre)
m'mwsb = mkMsgWithSig∈ m' v' v'⊂m' pid' m'∈pool sig' refl
pcspkv'-pre : PeerCanSignForPK pre v' pid pk
pcspkv'-pre = PeerCanSignForPKProps.msb4 preach step (peerCanSignEp≡{v' = v'} pcspkv eid≡) hpk sig' m'mwsb
rv'≤rv : v' ^∙ vRound ≤ v ^∙ vRound
rv'≤rv =
≤-trans
(oldVoteRound≤lvr preach hpk sig' ¬bootstrap' m'mwsb pcspkv'-pre (trans rmPreEsEpoch≡ eid≡))
realLVR≤rv
where
open ≡-Reasoning
-- TODO-1 : `rmPreSdEpoch≡` can be factored out into a lemma.
-- Something like: for any reachable state where a peer sends a vote, the
-- epoch for that vote is the peer's sdEpoch / esEpoch.
rmPreSdEpoch≡ : rmPre ^∙ pssSafetyData-rm ∙ sdEpoch ≡ v ^∙ vEpoch
rmPreSdEpoch≡
with Voting.VoteGeneratedCorrect.state vgCorrect
| Voting.VoteGeneratedCorrect.blockTriggered vgCorrect
...| RoundManagerTransProps.mkVoteGenerated lv≡v (Left (RoundManagerTransProps.mkVoteOldGenerated lvr≡ lv≡)) | _
with SafetyDataInv.lvEpoch≡ ∘ SafetyRulesInv.sdInv $ rmSafetyRulesInv
...| sdEpochInv rewrite trans lv≡ (sym lv≡v) = sym sdEpochInv
rmPreSdEpoch≡
| RoundManagerTransProps.mkVoteGenerated lv≡v (Right (RoundManagerTransProps.mkVoteNewGenerated lvr< lvr≡)) | bt =
trans sdEpoch≡? (sym ∘ proj₁ ∘ Voting.VoteMadeFromBlock⇒VoteEpochRoundIs $ bt)
rmPreEsEpoch≡ : rmPre ^∙ rmEpochState ∙ esEpoch ≡ v ^∙ vEpoch
rmPreEsEpoch≡ =
begin rmPre ^∙ rmEpochState ∙ esEpoch ≡⟨ rmEpochsMatch ⟩
rmPre ^∙ pssSafetyData-rm ∙ sdEpoch ≡⟨ rmPreSdEpoch≡ ⟩
v ^∙ vEpoch ∎
realLVR≤rv : Meta.getLastVoteRound (rmPre ^∙ pssSafetyData-rm) ≤ v ^∙ vRound
realLVR≤rv
with Voting.VoteGeneratedCorrect.state vgCorrect
...| RoundManagerTransProps.mkVoteGenerated lv≡v (inj₁ (RoundManagerTransProps.mkVoteOldGenerated lvr≡ lv≡))
rewrite trans lv≡ (sym lv≡v)
= ≤-refl
...| RoundManagerTransProps.mkVoteGenerated lv≡v (inj₂ (RoundManagerTransProps.mkVoteNewGenerated lvr< lvr≡))
with rmPre ^∙ pssSafetyData-rm ∙ sdLastVote
| SafetyDataInv.lvRound≤ ∘ SafetyRulesInv.sdInv $ rmSafetyRulesInv
...| nothing | _ = z≤n
...| just lv | round≤ = ≤-trans (≤-trans round≤ (<⇒≤ lvr<)) (≡⇒≤ (sym lvr≡))
ret : v' [ _<_ ]L v at vRound ⊎ Common.VoteForRound∈ Handle.InitHandler.initAndHandlers 𝓔 pk (v ^∙ vRound) (v ^∙ vEpoch) (v ^∙ vProposedId) (msgPool pre)
ret
with <-cmp (v' ^∙ vRound) (v ^∙ vRound)
...| tri< rv'<rv _ _ = Left rv'<rv
...| tri≈ _ rv'≡rv _
= Right (Common.mkVoteForRound∈ _ v' v'⊂m' pid' m'∈pool sig' (sym eid≡) rv'≡rv
(sym (sameERasLV⇒sameId (step-s preach step) hpk postLV≡ pcspkv v'⊂m' (Any-++ʳ _ m'∈pool) sig' ¬bootstrap' eid≡ (sym rv'≡rv) )))
...| tri> _ _ rv'>rv = ⊥-elim (≤⇒≯ rv'≤rv rv'>rv)
votesOnce₁{pid = pid}{pid'}{pk = pk}{pre = pre} preach sps@(step-msg{sndr , V vm} m∈pool ini){v}{m}{v'}{m'} hpk v⊂m m∈acts sig ¬bootstrap ¬msb vspk v'⊂m' m'∈pool sig' ¬bootstrap' eid≡
with v⊂m
...| vote∈qc vs∈qc v≈rbld qc∈m rewrite cong _vSignature v≈rbld =
⊥-elim ∘′ ¬msb $ qcVoteSigsSentB4-handle pid preach sps m∈acts qc∈m sig vs∈qc v≈rbld ¬bootstrap
...| vote∈vm =
⊥-elim (sendVote∉actions{outs = hvOut}{st = hvPre} (sym noVotes) m∈acts)
where
hvPre = peerStates pre pid
hvOut = LBFT-outs (handleVote 0 vm) hvPre
open handleVoteSpec.Contract (handleVoteSpec.contract! 0 vm (msgPool pre) hvPre)
------------------------------------------------------------------------------
votesOnce₂ : VO.ImplObligation₂ Handle.InitHandler.initAndHandlers 𝓔
votesOnce₂ {pid} {pk = pk} {pre} rss
(step-init {rm} rm×acts uni)
hpk v⊂m m∈acts sig ¬bootstrap ¬msb4 pcsfpk v'⊂m' m'∈acts sig' ¬bootstrap' ¬msb4' pcsfpk' ≡epoch ≡round
with initHandlerSpec.contract pid fakeBootstrapInfo rm×acts
...| init-contract
with initHandlerSpec.ContractOk.isInitPM init-contract m∈acts
...| (_ , refl , noSigs)
with v⊂m
...| vote∈qc vs∈qc v≈rbld qc∈pm = ⊥-elim (noSigs vs∈qc qc∈pm)
votesOnce₂{pid}{pk = pk}{pre} rss (step-msg{sndr , m“} m“∈pool ini){v}{v' = v'} hpk v⊂m m∈acts sig ¬bootstrap ¬msb4 pcsfpk v'⊂m' m'∈acts sig' ¬bootstrap' ¬msb4' pcsfpk' ≡epoch ≡round
with v⊂m
...| vote∈qc vs∈qc v≈rbld qc∈m rewrite cong _vSignature v≈rbld =
⊥-elim ∘′ ¬msb4 $ qcVoteSigsSentB4-handle pid rss (step-msg m“∈pool ini) m∈acts qc∈m sig vs∈qc v≈rbld ¬bootstrap
...| vote∈vm
with v'⊂m'
...| vote∈qc vs∈qc' v≈rbld' qc∈m' rewrite cong _vSignature v≈rbld' =
⊥-elim ∘′ ¬msb4' $ qcVoteSigsSentB4-handle pid rss (step-msg m“∈pool ini) m'∈acts qc∈m' sig' vs∈qc' v≈rbld' ¬bootstrap'
...| vote∈vm
with m“
...| P pm = cong (_^∙ vProposedId) v≡v'
where
hpPool = msgPool pre
hpPre = peerStates pre pid
hpOut = LBFT-outs (handleProposal 0 pm) hpPre
open handleProposalSpec.Contract (handleProposalSpec.contract! 0 pm hpPool hpPre)
v≡v' : v ≡ v'
v≡v'
with BlockId-correct? (pm ^∙ pmProposal)
...| no ¬validProposal = ⊥-elim (sendVote∉actions {outs = hpOut} {st = hpPre} (sym (proj₂ $ invalidProposal ¬validProposal)) m∈acts)
...| yes refl
with voteAttemptCorrect refl (nohc rss m“∈pool pid ini (invariantsCorrect pid pre ini rss) refl refl )
...| Voting.mkVoteAttemptCorrectWithEpochReq (Left (_ , Voting.mkVoteUnsentCorrect noVoteMsgOuts _)) _ =
⊥-elim (sendVote∉actions{outs = hpOut}{st = hpPre} (sym noVoteMsgOuts) m∈acts)
...| Voting.mkVoteAttemptCorrectWithEpochReq (Right (Voting.mkVoteSentCorrect vm pid voteMsgOuts _)) _ = begin
v ≡⟨ cong (_^∙ vmVote) (sendVote∈actions{outs = hpOut}{st = hpPre} (sym voteMsgOuts) m∈acts) ⟩
vm ^∙ vmVote ≡⟨ (sym $ cong (_^∙ vmVote) (sendVote∈actions{outs = hpOut}{st = hpPre} (sym voteMsgOuts) m'∈acts)) ⟩
v' ∎
where
open ≡-Reasoning
...| V vm = ⊥-elim (sendVote∉actions{outs = hvOut}{st = hvPre} (sym noVotes) m∈acts)
where
hvPre = peerStates pre pid
hvOut = LBFT-outs (handle pid (V vm) 0) hvPre
open handleVoteSpec.Contract (handleVoteSpec.contract! 0 vm (msgPool pre) hvPre)
|
{"hexsha": "8e071ba4b8532447d7491e3c8f57d7223e714738", "size": 33911, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/LibraBFT/Impl/Properties/VotesOnce.agda", "max_stars_repo_name": "LaudateCorpus1/bft-consensus-agda", "max_stars_repo_head_hexsha": "a4674fc473f2457fd3fe5123af48253cfb2404ef", "max_stars_repo_licenses": ["UPL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LibraBFT/Impl/Properties/VotesOnce.agda", "max_issues_repo_name": "LaudateCorpus1/bft-consensus-agda", "max_issues_repo_head_hexsha": "a4674fc473f2457fd3fe5123af48253cfb2404ef", "max_issues_repo_licenses": ["UPL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LibraBFT/Impl/Properties/VotesOnce.agda", "max_forks_repo_name": "LaudateCorpus1/bft-consensus-agda", "max_forks_repo_head_hexsha": "a4674fc473f2457fd3fe5123af48253cfb2404ef", "max_forks_repo_licenses": ["UPL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.3803030303, "max_line_length": 228, "alphanum_fraction": 0.6522072484, "num_tokens": 11909}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Jinsong Liu <jinsongliu@utexas.edu>
#
# Distributed under terms of the GNU-License license.
"""
"""
import sys
sys.path.append('/Users/jinsongliu/Box Sync/Dissertation_UT/OMAE2018/UQ_FOWT')
import os
import csv
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from statsmodels.distributions.empirical_distribution import ECDF
import matplotlib.cm as cm
from math import atan2
from utility.dataIO import *
MUSEmarker = ['o','s','p','*','x','d','h']
MUSEcolors = ['b','g','r','c','m','y','k']
# labels = {
# 'QUAD' : 'QuadPts: ',
# 'QUANT' : 'LHS Quantile: ',
# 'MC' : 'MCS: ',
# 'ED' : 'DOE: '
# }
def setMSUEPlot():
font = {'family' : 'normal',
# 'weight' : 'bold',
'size' : 18}
figure = {'figsize': (10,10)}
mpl.rc('font', **font)
mpl.rc('figure', **figure)
plt.clf()
def ECPlot(prefix, space,figHandle=None, figname='Norway5EC2DHT'):
setMSUEPlot()
if figHandle:
fig, ax = figHandle
else:
fig = plt.figure() # create a figure object
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
if space.upper()=='ZETA':
space2plot = np.arange(0,2)
pos = np.array([[1,1,1,1,1,1],[3,5.5,8,10,12,13]]).T
elif space.upper()=='PHY':
space2plot = np.arange(2,4)
pos = np.array([[2,2,1.8,1.8,1.8,1.8],[15,20,23,27,30,32]]).T
else:
ValueError('Space is Zeta or Phy')
filelist = [f for f in os.listdir(os.getcwd()) if f.startswith(prefix)]
print prefix, " Number of files:", len(filelist)
for i, filename in enumerate(filelist):
print "Processing file:", i
p = float(filename[21:-4])
# data = np.genfromtxt(filename,delimiter=',')
data = iter_loadtxt(filename)
data = data[:,space2plot]
mu = np.mean(data, axis=0)
data = data - mu
data = data.tolist()
data.sort(key=lambda c:atan2(c[0], c[1]))
data = np.array(data)
data = data + mu
# plt.plot(data[:,0],data[:,1],'-', label='50-year EC' + filename[21:-4])
ax.plot(data[:,0],data[:,1],'-',color='Gray')
ax.text(pos[i,0],pos[i,1], 'p3='+'{:.0e}'.format(p),fontsize=10,color='Gray')
if space.upper() == 'ZETA':
ax.set_title("Environmental Contour in $\zeta$ Space")
ax.set_xlabel("$\zeta_1$")
ax.set_ylabel("$\zeta_2$")
ax.set_xlim(0,20)
ax.set_ylim(0,20)
# plt.axis('equal')
else:
ax.set_title("Environmental Contour in Physical Space")
ax.set_ylabel("Peak period, $T_{p} (s)$")
ax.set_xlabel("Significant wave height, $H_s (m)$")
ax.grid('on')
ax.legend(loc=0)
fig.savefig(figname+'_'+space+'.eps')
return (fig,ax)
def sampPlot(space, prefix,labels=[], figHandle=None):
if figHandle:
fig, ax = figHandle
else:
fig = plt.figure() # create a figure object
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
if space.upper()=='ZETA':
space2plot = np.arange(0,2)
pos = np.array([[1,1,1,1,1,1],[3,5.5,8,10,12,13]]).T
elif space.upper()=='PHY':
space2plot = np.arange(-2,0)
pos = np.array([[2,2,1.8,1.8,1.8,1.8],[15,20,23,27,30,32]]).T
else:
ValueError('Space is Zeta or Phy')
filelist = [f for f in os.listdir(os.getcwd()) if f.startswith(prefix)]
print prefix, "Number of files:", len(filelist)
for i, filename in enumerate(filelist):
print "Processing file:",filename
# data = np.genfromtxt(filename,delimiter=',')
data = iter_loadtxt(filename)
data = data[:,space2plot]
if labels:
ax.scatter(data[:,0],data[:,1],s=50,marker = MUSEmarker[i],color=MUSEcolors[i],label=labels[i])
else:
ax.scatter(data[:,0],data[:,1],s=50,marker = MUSEmarker[i],color=MUSEcolors[i])
if space.upper() == 'ZETA':
ax.set_title("Traning Samples in $\zeta$ Space")
ax.set_xlabel("$\zeta_1$")
ax.set_ylabel("$\zeta_2$")
ax.set_xlim(0,20)
ax.set_ylim(0,20)
# plt.axis('equal')
else:
ax.set_title("Training Samples in Physical Space")
ax.set_ylabel("Peak period, $T_{p} (s)$")
ax.set_xlabel("Significant wave height, $H_s (m)$")
ax.set_xlim(0,22)
ax.set_ylim(0,50)
ax.grid()
ax.legend(loc=0)
fig.savefig('TrainingSamples_'+space+'.eps')
return (fig,ax)
def ExPlot(data,q=1e-4, R=1,labels=[],color='k',figHandle=None,figname='ExceedencePlot'):
if figHandle:
fig, ax = figHandle
else:
fig = plt.figure() # create a figure object
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
# data = iter_loadtxt(filename)
M = int(len(data)/R)
if M < 1.0/q:
raise ValueError('Not enough samples to get specified quantile ', str(q))
data = data.reshape((M,R))
conf=[]
for i in xrange(R):
ecdf = ECDF(data[:,i])
conf.append(next(ecdf.x[i] for i, xx in enumerate(1-ecdf.y) if xx<q))
x,y = ecdf.x, ecdf.y
M1 = int(M * 0.9)
M2 = M - M1
ind1 = np.linspace(0,M1,num=int(M1/10),dtype=int)
ind2 = np.linspace(M1+1, M, num=M2, dtype=int)
ind = np.append(ind1, ind2)
if labels:
ax.plot(x, 1-y, color=color, label=labels[i])
else:
ax.plot(x, 1-y, color=color)
# for i, filename in enumerate(filelist):
# print "Processing file:", i
# data = np.genfromtxt(filename,delimiter=',')
# # print data
# ecdf = ECDF(data[:,4])
# conf.append(next(ecdf.x[i] for i, xx in enumerate(1-ecdf.y) if xx<1e-4))
# x,y = ecdf.x,ecdf.y
# if len(x) > 1e6:
# ind = np.linspace(0,1e5,1e4,dtype=np.int32)
# ind = np.append(ind, np.arange(1e5,1e6,dtype=np.int32))
# x=x[ind]
# y=y[ind]
# plt.plot(x, 1-y)
conf.sort()
q0 = 1.0/M
print "Exceedence interval: [", conf[0], conf[-1], " ]"
ax.plot([conf[0],conf[0]],[q0,q], '--', color='Gray')
ax.plot([conf[-1],conf[-1]],[q0,q], '--', color='Gray')
ax.plot([0, conf[-1]],[q,q], '--', color='Gray')
ax.text(2,1e-5,'$10^{-'+ '{:.1E}'.format(q)[-1] +'}$' +' Exceedence Interval:\n ['+ '{:.2f}'.format(conf[0]) +' , '+ '{:.2f}'.format(conf[-1])+']')
# ax.text(2,1e-5, '{:.1E}'.format(q) +' Exceedence Interval:\n ['+ '{:.2f}'.format(conf[0]) +' , '+ '{:.2f}'.format(conf[-1])+']')
ax.set_yscale('log')
ax.set_xlabel('QoI: $f^{T}_{max}$')
ax.set_xlim(0,22)
ax.set_ylabel('Exceedence')
ax.set_title('Exceedence plot of SDOF system with fixed phases')
plt.savefig(figname + '.eps')
return (fig,ax)
# def CVPlot(x,y,*arg)
# plt.plot(x,y, arg)
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot(data[:,2], data[:,3],valiData.Y, 'o',label='Validate Data')
# ax.plot(data[:,2], data[:,3],f_cv[:,0], 'o',label='5')
# # ax.plot(valiData.X[:,0], valiData.X[:,1],f_cv[:,1], 'o', label='6')
# ax.legend()
# plt.show()
|
{"hexsha": "e3986cbd3e554174173aebd1f7b070c8e465d657", "size": 7445, "ext": "py", "lang": "Python", "max_stars_repo_path": "uqra/uqplot/MUSEPlot.py", "max_stars_repo_name": "Jinsongl/UQRA", "max_stars_repo_head_hexsha": "09c7042f8c35a262a942224e2367540b5fd2b077", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "uqra/uqplot/MUSEPlot.py", "max_issues_repo_name": "Jinsongl/UQRA", "max_issues_repo_head_hexsha": "09c7042f8c35a262a942224e2367540b5fd2b077", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "uqra/uqplot/MUSEPlot.py", "max_forks_repo_name": "Jinsongl/UQRA", "max_forks_repo_head_hexsha": "09c7042f8c35a262a942224e2367540b5fd2b077", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9424778761, "max_line_length": 151, "alphanum_fraction": 0.5417058428, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 2318}
|
using Documenter
push!(LOAD_PATH, "../../src")
using Stipple, Stipple.Elements, Stipple.Layout, Stipple.Typography
makedocs(
sitename = "Stipple - data dashboards and reactive UIs for Julia",
format = Documenter.HTML(prettyurls = false),
pages = [
"Home" => "index.md",
"Stipple API" => [
"Elements" => "api/elements.md",
"Layout" => "api/layout.md",
"NamedTuples" => "api/namedtuples.md",
"Stipple" => "api/stipple.md",
"Typography" => "api/typography.md",
]
],
)
deploydocs(
repo = "github.com/GenieFramework/Stipple.jl.git",
)
|
{"hexsha": "a5ab07072505141a1a3a0d2c94acfe2f7d21e264", "size": 627, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "jeremiedb/Stipple.jl", "max_stars_repo_head_hexsha": "4dafaa54219a9b837db7e040f3121acd64c94351", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-15T21:37:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-15T21:37:00.000Z", "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "essenciary/Stipple.jl", "max_issues_repo_head_hexsha": "a7ae7d83f9f3cd67a7fd1a7e46414962a32525e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "essenciary/Stipple.jl", "max_forks_repo_head_hexsha": "a7ae7d83f9f3cd67a7fd1a7e46414962a32525e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.08, "max_line_length": 70, "alphanum_fraction": 0.5837320574, "num_tokens": 171}
|
//
// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BEAST_UNIT_TEST_SUITE_LIST_HPP
#define BEAST_UNIT_TEST_SUITE_LIST_HPP
#include <beast/unit_test/suite_info.hpp>
#include <beast/unit_test/detail/const_container.hpp>
#include <boost/assert.hpp>
#include <typeindex>
#include <set>
#include <unordered_set>
namespace beast {
namespace unit_test {
/// A container of test suites.
class suite_list
: public detail::const_container <std::set <suite_info>>
{
private:
#ifndef NDEBUG
std::unordered_set<std::string> names_;
std::unordered_set<std::type_index> classes_;
#endif
public:
/** Insert a suite into the set.
The suite must not already exist.
*/
template<class Suite>
void
insert(
char const* name,
char const* module,
char const* library,
bool manual,
int priority);
};
//------------------------------------------------------------------------------
template<class Suite>
void
suite_list::insert(
char const* name,
char const* module,
char const* library,
bool manual,
int priority)
{
#ifndef NDEBUG
{
std::string s;
s = std::string(library) + "." + module + "." + name;
auto const result(names_.insert(s));
BOOST_ASSERT(result.second); // Duplicate name
}
{
auto const result(classes_.insert(
std::type_index(typeid(Suite))));
BOOST_ASSERT(result.second); // Duplicate type
}
#endif
cont().emplace(make_suite_info<Suite>(
name, module, library, manual, priority));
}
} // unit_test
} // beast
#endif
|
{"hexsha": "41526a2d052b21144b3ac98a4ea9a772a2eb866c", "size": 1801, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/beast/extras/beast/unit_test/suite_list.hpp", "max_stars_repo_name": "sneh19337/rippled", "max_stars_repo_head_hexsha": "442205bdf270d26f0936f7ece5f03bcc952a6a8b", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 3804.0, "max_stars_repo_stars_event_min_datetime": "2015-01-02T01:50:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:28:19.000Z", "max_issues_repo_path": "src/beast/extras/beast/unit_test/suite_list.hpp", "max_issues_repo_name": "madilraza/rippled", "max_issues_repo_head_hexsha": "023f5704d07d09e70091f38a0d4e5df213a3144b", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 3131.0, "max_issues_repo_issues_event_min_datetime": "2015-01-01T04:00:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T19:41:33.000Z", "max_forks_repo_path": "src/beast/extras/beast/unit_test/suite_list.hpp", "max_forks_repo_name": "madilraza/rippled", "max_forks_repo_head_hexsha": "023f5704d07d09e70091f38a0d4e5df213a3144b", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1349.0, "max_forks_repo_forks_event_min_datetime": "2015-01-04T04:36:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:56:50.000Z", "avg_line_length": 22.5125, "max_line_length": 80, "alphanum_fraction": 0.6313159356, "num_tokens": 409}
|
import os
import collections
import logging
import yaml
import torch
import torchvision
import numpy as np
from skimage import io
from mathtools import utils, torchutils, metrics
logger = logging.getLogger(__name__)
class ImageClassifier(torch.nn.Module):
def __init__(
self, out_dim,
feature_dim=None, feature_extractor=None, finetune_extractor=True,
feature_extractor_name='resnet50', feature_extractor_layer=-1):
super().__init__()
self.out_dim = out_dim
if feature_extractor is None:
Extractor = getattr(torchvision.models, feature_extractor_name)
pretrained_model = Extractor(pretrained=True, progress=True)
layers = list(pretrained_model.children())[:feature_extractor_layer]
feature_extractor = torch.nn.Sequential(*layers)
feature_dim = 512
if not finetune_extractor:
for param in feature_extractor.parameters():
param.requires_grad = False
self.feature_extractor = feature_extractor
self.classifier = torch.nn.Linear(feature_dim, out_dim)
def forward(self, inputs):
features = self.feature_extractor(inputs).squeeze(-1).squeeze(-1)
outputs = self.classifier(features)
return outputs
def predict(self, outputs):
preds = outputs.argmax(dim=1)
return preds
class VideoDataset(torch.utils.data.Dataset):
""" A dataset wrapping images stored on disk.
Attributes
----------
_data : tuple(np.ndarray, shape (num_samples, num_dims))
_labels : tuple(np.ndarray, shape (num_samples,))
_device : torch.Device
"""
def __init__(
self, frame_fns, labels, device=None, labels_dtype=None,
transpose_data=False, seq_ids=None, batch_size=None, batch_mode=None):
"""
Parameters
----------
frame_fns : iterable( array_like of string, len (sequence_len) )
labels : iterable( array_like of int, shape (sequence_len,) )
device :
labels_dtype : torch data type
If passed, labels will be converted to this type
sliding_window_args : tuple(int, int, int), optional
A tuple specifying parameters for extracting sliding windows from
the data sequences. This should be ``(dimension, size, step)``---i.e.
the input to ``torch.unfold``. The label of each sliding window is
taken to be the median over the labels in that window.
"""
if seq_ids is None:
raise ValueError("This class must be initialized with seq_ids")
if len(labels[0].shape) == 2:
self.num_label_types = labels[0].shape[1]
elif len(labels[0].shape) < 2:
# self.num_label_types = np.unique(np.hstack(labels)).max() + 1
self.num_label_types = len(np.unique(np.hstack(labels)))
else:
err_str = f"Labels have a weird shape: {labels[0].shape}"
raise ValueError(err_str)
self.transpose_data = transpose_data
self.batch_size = batch_size
self.batch_mode = batch_mode
self._device = device
self._seq_ids = seq_ids
self._frame_fns = frame_fns
self._labels = tuple(
map(lambda x: torch.tensor(x, device=device, dtype=labels_dtype), labels)
)
self._seq_lens = tuple(x.shape[0] for x in self._labels)
if self.batch_size is not None and self.batch_mode == 'flatten':
self.unflatten = tuple(
(seq_index, win_index)
for seq_index, seq_len in enumerate(self._seq_lens)
for win_index in range(0, seq_len, self.batch_size)
)
logger.info('Initialized VideoDataset.')
logger.info(f"{self.num_label_types} unique labels")
def __len__(self):
if self.batch_mode == 'flatten':
return len(self.unflatten)
return len(self._seq_ids)
def __getitem__(self, i):
if self.batch_size is not None:
seq_idx, win_idx = self.unflatten[i]
label_seq = self._labels[seq_idx]
frame_fns = self._frame_fns[seq_idx]
start_idx = win_idx
end_idx = start_idx + self.batch_size
frame_fns = frame_fns[start_idx:end_idx]
label_seq = label_seq[start_idx:end_idx]
else:
label_seq = self._labels[i]
frame_fns = self._frame_fns[i]
data_seq = torch.tensor(
self._load_frames(frame_fns),
device=self._device, dtype=torch.float
)
# shape (batch_size, num_rows, num_cols, num_channels) -->
# (batch_size, num_channels, num_rows, num_cols)
data_seq = data_seq.permute(0, 3, 1, 2)
return data_seq, label_seq, i
def _load_frames(self, frame_fns):
frames = np.stack(tuple(io.imread(fn) for fn in frame_fns), axis=0)
return frames
def main(
out_dir=None, data_dir=None, prefix='trial=',
model_name=None, gpu_dev_id=None, batch_size=None, learning_rate=None,
file_fn_format=None, label_fn_format=None,
start_from=None, stop_at=None,
model_params={}, cv_params={}, train_params={}, viz_params={},
num_disp_imgs=None, viz_templates=None, results_file=None, sweep_param_name=None):
data_dir = os.path.expanduser(data_dir)
out_dir = os.path.expanduser(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
if results_file is None:
results_file = os.path.join(out_dir, 'results.csv')
else:
results_file = os.path.expanduser(results_file)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
io_dir = os.path.join(fig_dir, 'model-io')
if not os.path.exists(io_dir):
os.makedirs(io_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def saveVariable(var, var_name, to_dir=out_data_dir):
utils.saveVariable(var, var_name, to_dir)
# Load data
trial_ids = utils.getUniqueIds(data_dir, prefix=prefix, to_array=True)
vocab = utils.loadVariable('vocab', data_dir)
saveVariable(vocab, 'vocab')
# Define cross-validation folds
data_loader = utils.CvDataset(
trial_ids, data_dir, vocab=vocab, prefix=prefix,
feature_fn_format=file_fn_format, label_fn_format=label_fn_format
)
cv_folds = utils.makeDataSplits(len(data_loader.trial_ids), **cv_params)
device = torchutils.selectDevice(gpu_dev_id)
labels_dtype = torch.long
criterion = torch.nn.CrossEntropyLoss()
metric_names = ('Loss', 'Accuracy')
def make_dataset(fns, labels, ids, batch_mode='sample', shuffle=True):
dataset = VideoDataset(
fns, labels,
device=device, labels_dtype=labels_dtype, seq_ids=ids,
batch_size=batch_size, batch_mode=batch_mode
)
loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=shuffle)
return dataset, loader
for cv_index, cv_fold in enumerate(cv_folds):
if start_from is not None and cv_index < start_from:
continue
if stop_at is not None and cv_index > stop_at:
break
train_data, val_data, test_data = data_loader.getFold(cv_fold)
train_set, train_loader = make_dataset(*train_data, batch_mode='flatten', shuffle=True)
test_set, test_loader = make_dataset(*test_data, batch_mode='flatten', shuffle=False)
val_set, val_loader = make_dataset(*val_data, batch_mode='flatten', shuffle=True)
logger.info(
f'CV fold {cv_index + 1} / {len(cv_folds)}: {len(data_loader.trial_ids)} total '
f'({len(train_set)} train, {len(val_set)} val, {len(test_set)} test)'
)
model = ImageClassifier(len(vocab), **model_params)
optimizer_ft = torch.optim.Adam(
model.parameters(), lr=learning_rate,
betas=(0.9, 0.999), eps=1e-08,
weight_decay=0, amsgrad=False
)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_ft, step_size=1, gamma=1.00)
train_epoch_log = collections.defaultdict(list)
val_epoch_log = collections.defaultdict(list)
metric_dict = {name: metrics.makeMetric(name) for name in metric_names}
model, last_model_wts = torchutils.trainModel(
model, criterion, optimizer_ft, lr_scheduler,
train_loader, val_loader,
device=device,
metrics=metric_dict,
train_epoch_log=train_epoch_log,
val_epoch_log=val_epoch_log,
**train_params
)
# Test model
metric_dict = {name: metrics.makeMetric(name) for name in metric_names}
test_io_history = torchutils.predictSamples(
model.to(device=device), test_loader,
criterion=criterion, device=device,
metrics=metric_dict, data_labeled=True, update_model=False,
seq_as_batch=train_params['seq_as_batch'],
return_io_history=True
)
metric_str = ' '.join(str(m) for m in metric_dict.values())
logger.info('[TST] ' + metric_str)
utils.writeResults(
results_file, {name: m.value for name, m in metric_dict.items()},
sweep_param_name, model_params
)
for pred_seq, score_seq, feat_seq, label_seq, batch_id in test_io_history:
prefix = f'cvfold={cv_index}_batch={batch_id}'
saveVariable(pred_seq.cpu().numpy(), f'{prefix}_pred-label-seq')
saveVariable(score_seq.cpu().numpy(), f'{prefix}_score-seq')
saveVariable(label_seq.cpu().numpy(), f'{prefix}_true-label-seq')
saveVariable(test_set.unflatten, f'cvfold={cv_index}_test-set-unflatten')
saveVariable(model, f'cvfold={cv_index}_{model_name}-best')
if train_epoch_log:
torchutils.plotEpochLog(
train_epoch_log,
subfig_size=(10, 2.5),
title='Training performance',
fn=os.path.join(fig_dir, f'cvfold={cv_index}_train-plot.png')
)
if val_epoch_log:
torchutils.plotEpochLog(
val_epoch_log,
subfig_size=(10, 2.5),
title='Heldout performance',
fn=os.path.join(fig_dir, f'cvfold={cv_index}_val-plot.png')
)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
|
{"hexsha": "cf3c54d80000970785cbe2afd0152ec47f67db98", "size": 11183, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/predict_video_pytorch.py", "max_stars_repo_name": "jd-jones/kinemparse", "max_stars_repo_head_hexsha": "279a9989981aa2a0eef1e46e5d02833f51ae352d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/predict_video_pytorch.py", "max_issues_repo_name": "jd-jones/kinemparse", "max_issues_repo_head_hexsha": "279a9989981aa2a0eef1e46e5d02833f51ae352d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/predict_video_pytorch.py", "max_forks_repo_name": "jd-jones/kinemparse", "max_forks_repo_head_hexsha": "279a9989981aa2a0eef1e46e5d02833f51ae352d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.545751634, "max_line_length": 95, "alphanum_fraction": 0.6370383618, "include": true, "reason": "import numpy", "num_tokens": 2522}
|
#!/usr/bin/env python
'''
@package prototype.coverage.record_set
@file prototype/coverage/record_set.py
@author David Stuebe
@author Tim Giguere
@brief https://confluence.oceanobservatories.org/display/CIDev/R2+Construction+Data+Model
'''
import numpy
class IterableExpression(dict):
"""
This class should inherit from arange and dict, but I can't do that yet... Need to figure out how for type builtin
Current interface:
ie = IterableExpression(1.0, 10.0)
1.0 == ie.sequence[0]
for val in ie.sequence:
...
"""
def __init__(self, start=None, stop=None, stride=None, dtype=None):
dict.__init__(self, start=start, stop=stop, stride=stride, dtype=dtype)
self.sequence = numpy.arange(start, stop, stride, dtype)
time = IterableExpression(start=0.0,stop=100.0)
for t in time.sequence:
print t
|
{"hexsha": "b6edb32c18309b46b1810fb3975458e41115c34e", "size": 864, "ext": "py", "lang": "Python", "max_stars_repo_path": "prototype/coverage/iterable_expression.py", "max_stars_repo_name": "ooici/pyon", "max_stars_repo_head_hexsha": "122c629290d27f32f2f41dafd5c12469295e8acf", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-06-09T16:07:09.000Z", "max_stars_repo_stars_event_max_datetime": "2015-07-28T10:06:31.000Z", "max_issues_repo_path": "prototype/coverage/iterable_expression.py", "max_issues_repo_name": "ooici/pyon", "max_issues_repo_head_hexsha": "122c629290d27f32f2f41dafd5c12469295e8acf", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-07-22T15:14:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-13T19:35:06.000Z", "max_forks_repo_path": "prototype/coverage/iterable_expression.py", "max_forks_repo_name": "ooici/pyon", "max_forks_repo_head_hexsha": "122c629290d27f32f2f41dafd5c12469295e8acf", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0731707317, "max_line_length": 118, "alphanum_fraction": 0.6944444444, "include": true, "reason": "import numpy", "num_tokens": 220}
|
# -*- coding: utf-8 -*-
''' Smooth Component (1)
This module contains the class for the convex heuristic for a piecewise linear
function. A piecewise constant function has a sparse second-order difference;
many changes in slope are exactly zero and a small number of them can be large.
A convex approximation of this problem is minimizing the L1-norm of the second-
order difference:
minimize || D_2 x ||_1
This is an extension of the concept of Total Variation filtering, applied to the
differences of a discrete signal, rather than the values.
Author: Bennet Meyers
'''
import cvxpy as cvx
import osqp
import scipy.sparse as sp
from functools import partial
import numpy as np
from scipy.signal import find_peaks
import warnings
from osd.classes.component import Component
from osd.utilities import compose
from osd.masking import make_masked_identity_matrix
class SparseSecondDiffConvex(Component):
def __init__(self, internal_scale=1., prox_polish=False,
max_bp=None, solver=None, **kwargs):
super().__init__(**kwargs)
self._prox_prob = None
self._rho_over_lambda = None
self.internal_scale = internal_scale
self.prox_polish = prox_polish
self.max_bp = max_bp
self._last_set = None
self._it = 0
self._solver = solver
return
@property
def is_convex(self):
return True
def _get_cost(self):
diff2 = partial(cvx.diff, k=2)
cost = compose(cvx.sum, cvx.abs, lambda x: self.internal_scale * x, diff2)
return cost
# def prox_op(self, v, weight, rho, use_set=None, verbose=False, eps=3.25e-4):
# vec_in, weight_val, rho_val = v, weight, rho
# # print(weight_val)
# problem = self._prox_prob
# ic = self.internal_scale
# rol = rho_val / (weight_val)
# if problem is None:
# P, q, A, l, u = make_all(vec_in, rol, internal_scale=ic, use_set=use_set)
# problem = osqp.OSQP()
# problem.setup(P=P, q=q, A=A, l=l, u=u, verbose=verbose,
# eps_rel=eps, eps_abs=eps, polish=True,
# max_iter=int(2e3))
# self._rho_over_lambda = rol
# self._prox_prob = problem
# else:
# l_new, u_new = make_lu(vec_in, len(vec_in))
# problem.update(l=l_new, u=u_new)
# # eps = max(
# # (self._it / 100) * 1e-3 + (1 - self._it / 100) * 1e-7,
# # 1e-9
# # )
# # if eps >= 1e-5:
# # polish = True
# # else:
# # polish = False
# # print('{:.2e}'.format(eps), polish)
# # problem.update_settings(eps_abs=eps, eps_rel=eps, polish=polish)
# if ~np.isclose(rol, self._rho_over_lambda, atol=1e-3):
# P_new = make_P(len(vec_in), rol)
# problem.update(Px=P_new)
# self._rho_over_lambda = rol
# results = problem.solve()
# out = results.x[:len(vec_in)]
# self._it += 1
# if self.prox_polish and self._it >= 5:
# z = np.abs(np.diff(out, n=2))
# detector = z / np.max(z)
# detector = np.log10(detector)
# peaks, _ = find_peaks(detector, distance=len(detector)*0.025)
# peaks += 1
# heights = z[peaks]
# bpts = peaks[np.argsort(heights)][:self.max_bp]
# return fit_pwl(out, use_set, bpts)
# else:
# return out
def prox_op(self, v, weight, rho, use_set=None, verbose=False,
prox_counts=None):
if use_set is None:
use_set = np.ones_like(v, dtype=bool)
problem = self._prox_prob
ic = self.internal_scale
if self._last_set is not None:
set_change = ~np.alltrue(use_set == self._last_set)
else:
set_change = True
if problem is None or set_change:
x = cvx.Variable(len(v))
Mv = cvx.Parameter(np.sum(use_set), value=v[use_set], name='Mv')
w = cvx.Parameter(value=weight, name='weight', nonneg=True)
r = cvx.Parameter(value=rho, name='rho', nonneg=True)
objective = cvx.Minimize(
w * cvx.norm1(ic * cvx.diff(x, k=2)) + r / 2 * cvx.sum_squares(
x[use_set] - Mv
)
)
c = []
if self.vmin is not None:
c.append(x >= self.vmin)
if self.vmax is not None:
c.append(x <= self.vmax)
if self.vavg is not None:
n = x.size
c.append(cvx.sum(x) / n == self.vavg)
if self.period is not None:
p = self.period
c.append(x[:-p] == x[p:])
if self.first_val is not None:
c.append(x[0] == self.first_val)
problem = cvx.Problem(objective, c)
self._prox_prob = problem
self._last_set = use_set
else:
params = problem.param_dict
params['Mv'].value = v[use_set]
if ~np.isclose(weight, params['weight'].value, atol=1e-3):
params['weight'].value = weight
if ~np.isclose(rho, params['rho'].value, atol=1e-3):
params['rho'].value = rho
with warnings.catch_warnings():
warnings.simplefilter('ignore')
problem.solve(solver=self._solver)
return problem.variables()[0].value
def make_P(len_x, rho_over_lambda, use_set=None):
len_r = len_x - 2
len_z = len_x
data = np.ones(len_z) * rho_over_lambda
i = np.arange(len_z) + len_x + len_r
if use_set is not None:
data = data[use_set]
i = i[use_set]
P = sp.coo_matrix((data, (i, i)), shape=2 * (len_x + len_r + len_z,))
return P.tocsc()
def make_q(len_x):
len_r = len_x - 2
len_z = len_x
return np.r_[np.zeros(len_x), np.ones(len_r), np.zeros(len_z)]
def make_A(len_x, internal_scale=1, use_set=None):
len_r = len_x - 2
len_z = len_x
# block 00
n = len_x
m1 = sp.eye(m=n - 2, n=n, k=0)
m2 = sp.eye(m=n - 2, n=n, k=1)
m3 = sp.eye(m=n - 2, n=n, k=2)
B00 = internal_scale * (m1 - 2 * m2 + m3)
# block 01
B01 = sp.eye(len_r)
# block 10
B10 = -1 * B00
# block 11
B11 = sp.eye(len_r)
# block 20
if use_set is None:
B20 = sp.eye(len_x)
else:
B20 = make_masked_identity_matrix(use_set)
# block 22
if use_set is None:
B22 = -1 * sp.eye(len_z)
else:
B22 = -1 * make_masked_identity_matrix(use_set)
A = sp.bmat([
[B00, B01, None],
[B10, B11, None],
[B20, None, B22]
])
return A.tocsc()
def make_lu(v, len_x):
len_r = len_x - 2
len_z = len_x
l = np.r_[np.zeros(len_r + len_r), v]
u = np.r_[np.inf * np.ones(len_r + len_r), v]
return l, u
def make_all(v, rho_over_lambda, internal_scale=1, use_set=None):
len_x = len(v)
P = make_P(len_x, rho_over_lambda, use_set=use_set)
q = make_q(len_x)
A = make_A(len_x, internal_scale=internal_scale, use_set=use_set)
l, u = make_lu(v, len_x)
return P, q, A, l, u
def fit_pwl(x, mask, breakpoints):
if mask is None:
mask = np.ones_like(x, dtype=bool)
breakpoints = np.atleast_1d(breakpoints)
h0 = np.ones(len(x))
h1 = np.arange(len(x))
hns = np.vstack([np.clip(np.arange(len(x)) - bp, 0, np.inf) for bp in breakpoints]).T
B = np.c_[np.c_[h0, h1], hns]
x_tilde = x[mask]
B_tilde = B[mask]
coef = np.linalg.lstsq(B_tilde, x_tilde, rcond=-1)[0]
return B @ coef
|
{"hexsha": "e9ffa5f5816e1e31dd03642d2627e6c0bfba438a", "size": 7717, "ext": "py", "lang": "Python", "max_stars_repo_path": "osd/classes/norm1_second.py", "max_stars_repo_name": "bmeyers/optimal-signal-decomposition", "max_stars_repo_head_hexsha": "14376d38e3b2965e0ccdaf4a8a1c3683697c146c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2022-02-22T00:43:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T22:00:43.000Z", "max_issues_repo_path": "osd/classes/norm1_second.py", "max_issues_repo_name": "cvxgrp/signal-decomposition", "max_issues_repo_head_hexsha": "14376d38e3b2965e0ccdaf4a8a1c3683697c146c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-04T21:28:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T21:28:55.000Z", "max_forks_repo_path": "osd/classes/norm1_second.py", "max_forks_repo_name": "cvxgrp/signal-decomposition", "max_forks_repo_head_hexsha": "14376d38e3b2965e0ccdaf4a8a1c3683697c146c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1460176991, "max_line_length": 89, "alphanum_fraction": 0.5609692886, "include": true, "reason": "import numpy,import scipy,from scipy,import cvxpy", "num_tokens": 2227}
|
# -*- coding: utf-8 -*-
"""
Author: @gabvaztor
StartDate: 04/03/2017
This file contains the next information:
- Libraries to import with installation comment and reason.
- Data Mining Algorithm.
- Sets (train,validation and test) information.
- ANN Arquitectures.
- A lot of utils methods which you'll get useful advantage
The code's structure is:
- Imports
- Global Variables
- Interface
- Reading data algorithms
- Data Mining
- Training and test
- Show final conclusions
Style: "Google Python Style Guide"
https://google.github.io/styleguide/pyguide.html
Notes:
* This file use TensorFlow version >1.0.
"""
"""
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# IMPORTS
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
"""
'''LOCAL IMPORTS
* UtilsFunctions is a library that contains a lot of functions which will help us
to code expressively, clearly and efficiently.
* TensorFlowGUI's library contains all GUI's methods. Contains EasyGUI.
Here you can download the library: https://pypi.python.org/pypi/easygui#downloads
It had been used the version: 0.98.1
'''
import TFBoost.TFReader as tfr
import TFBoost.TFDataMining as tfd
from TFBoost.TFEncoder import Dictionary
from UsefulTools.UtilsFunctions import *
import TFBoost.TFModels as models
import SettingsObject
''' TensorFlow: https://www.tensorflow.org/
To upgrade TensorFlow to last version:
*CPU: pip3 install --upgrade tensorflow
*GPU: pip3 install --upgrade tensorflow-gpu
'''
import tensorflow as tf
print("TensorFlow: " + tf.__version__)
''' Numpy is an extension to the Python programming language, adding support for large,
multi-dimensional arrays and matrices, along with a large library of high-level
mathematical functions to operate on these arrays.
It is mandatory to install 'Numpy+MKL' before scipy.
Install 'Numpy+MKL' from here: http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy
http://www.numpy.org/
https://en.wikipedia.org/wiki/NumPy '''
import numpy as np
'''
# You need to install the 64bit version of Scipy, at least on Windows.
# It is mandatory to install 'Numpy+MKL' before scipy.
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy
# We can find scipi in the url: http://www.lfd.uci.edu/~gohlke/pythonlibs/#scipy'''
import scipy.io as sio
''' Matlab URL: http://matplotlib.org/users/installing.html
python -m pip3 install matplotlib'''
import matplotlib.pyplot as plt
''' TFLearn library. License MIT.
Git Clone : https://github.com/tflearn/tflearn.git
To install: pip3 install tflearn'''
import tflearn
'''
Sklearn(scikit-learn): Simple and efficient tools for data mining and data analysis
To install: pip3 install -U scikit-learn
'''
from sklearn.model_selection import train_test_split
"""
To install pandas: pip3 install pandas
"""
import pandas as pd
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ---- GLOBAL VARIABLES ----
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
"""
"""
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ---- USER INTERFACE ----
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
"""Creating user interface
#properties = eg.EasyGui()
#uf.pt("Typos GUI",properties.types)
"""
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ---- READING DATA ----
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
"""
Creating Reader Features
"""
setting_object = SettingsObject.Settings(Dictionary.string_settings_german_signal_path)
option_problem = Dictionary.string_option_signals_images_problem
options = [option_problem, 0, 60, 60]
path_train_and_test_images = [setting_object.train_path,setting_object.test_path]
number_of_classes = 59 # Start in 0
percentages_sets = None # Example
labels_set = [Dictionary.string_labels_type_option_hierarchy]
is_an_unique_csv = False # If this variable is true, then only one CSV file will be passed and it will be treated like
# trainSet, validationSet(if necessary) and testSet
known_data_type = '' # Contains the type of data if the data file contains an unique type of data. Examples: # Number
# or Chars.
"""
Creating Reader Features
"""
reader_features = tfr.ReaderFeatures(set_data_files = path_train_and_test_images, number_of_classes = number_of_classes,
labels_set = labels_set,
is_unique_csv = is_an_unique_csv, known_data_type = known_data_type,
percentages_sets = percentages_sets)
"""
Creating Reader from ReaderFeatures
"""
tf_reader = tfr.Reader(type_problem=option_problem, reader_features=reader_features) # Reader Object with all information
"""
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ---- DATA MINING ----
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
"""
"""
Manipulate Reader with DataMining and update it.
"""
"""
Getting train, validation (if necessary) and test set.
"""
train_set = tf_reader.train_set # Train Set
test_set = tf_reader.test_set # Test Set
del reader_features
del tf_reader
models = models.TFModels(setting_object=setting_object, option_problem=options,
input_data=train_set[0],test=test_set[0],
input_labels=train_set[1],test_labels=test_set[1],
number_of_classes=number_of_classes, type=None,
validation=None, validation_labels=None,
load_model_configuration=False)
#with tf.device('/cpu:0'): # CPU
with tf.device('/gpu:0'): # GPU
models.convolution_model_image()
|
{"hexsha": "a2ac16b7fcb5012ddf3099913435e1b50c79b433", "size": 6557, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/projects/German_Signal/TFBooster_.py", "max_stars_repo_name": "Gabvaztor/TFBoost__", "max_stars_repo_head_hexsha": "a37b906f5cb47becc3275def8282ff395d06ef45", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/projects/German_Signal/TFBooster_.py", "max_issues_repo_name": "Gabvaztor/TFBoost__", "max_issues_repo_head_hexsha": "a37b906f5cb47becc3275def8282ff395d06ef45", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/projects/German_Signal/TFBooster_.py", "max_forks_repo_name": "Gabvaztor/TFBoost__", "max_forks_repo_head_hexsha": "a37b906f5cb47becc3275def8282ff395d06ef45", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0451977401, "max_line_length": 122, "alphanum_fraction": 0.5526917798, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1199}
|
function conv(path::String)
data = []
lines = open(readlines, path)
lines = map(chomp, lines)
for i = 1:length(lines)
line = lines[i]
if isempty(line)
push!(data, "")
continue
end
items = split(line, " ")
word = String(items[1])
tag = String(items[4])
if startswith(tag, "I-")
if i == 1 || isempty(lines[i-1])
tag = replace(tag, "I-", "")
else
ptag = String(split(lines[i-1])[4])
if ptag == "O"
tag = replace(tag, "I-", "")
elseif startswith(ptag, "B-")
tag = "_"
elseif startswith(ptag, "I-")
tag = "_"
else
throw("$ptag")
end
end
elseif startswith(tag, "B-")
elseif tag == "O"
else
throw("Invalid tag: $tag.")
end
push!(data, "$(word)\t$(tag)")
end
open("a.out", "w") do f
foreach(x -> println(f,x), data)
end
end
function convIOBES(path::String)
data = []
lines = open(readlines, path)
lines = map(chomp, lines)
for i = 1:length(lines)
line = lines[i]
if isempty(line)
push!(data, "")
continue
end
items = split(line, "\t")
word = String(items[1])
tag = String(items[2])
if startswith(tag, "B-") || startswith(tag, "S-")
tag = tag[3:end]
elseif startswith(tag, "I-") || startswith(tag, "E-")
tag = "_"
end
push!(data, "$(word)\t$(tag)")
end
open("a.out", "w") do f
foreach(x -> println(f,x), data)
end
end
workspace()
convIOBES(joinpath(dirname(@__FILE__), ".data/eng.train.IOBES"))
|
{"hexsha": "e503653a62fd26612543194267244c7c15dc4256", "size": 1848, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "ner/preprocess/convert.jl", "max_stars_repo_name": "hshindo/Merlin-Examples", "max_stars_repo_head_hexsha": "a12fd471d5271b99f6d9680d8c768661dca1ea31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-25T00:34:51.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-25T00:34:51.000Z", "max_issues_repo_path": "ner2/preprocess/convert.jl", "max_issues_repo_name": "hshindo/Merlin-Examples", "max_issues_repo_head_hexsha": "a12fd471d5271b99f6d9680d8c768661dca1ea31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ner2/preprocess/convert.jl", "max_forks_repo_name": "hshindo/Merlin-Examples", "max_forks_repo_head_hexsha": "a12fd471d5271b99f6d9680d8c768661dca1ea31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1764705882, "max_line_length": 64, "alphanum_fraction": 0.4431818182, "num_tokens": 478}
|
From mathcomp Require Import
all_ssreflect.
From AUChain
Require Import
BlockTree
Blocks
Messages
Parameters
LocalState
StateMonad.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
(** * Protocol
Execution plan for each party pr. round:
This consists of two parts:
1) Recieve and process messages:
- Recieve messages for this party and extend the blocktree with the
received blocks.
2) Execute consensus algorithm:
- Check if leader.
- If leader then bake block and add return messages that has to be submitted.
**)
Section Protocol.
Definition extend_tree_l (l: LocalState) (b : Block) : LocalState :=
mkLocalState (pk l) (extendTree (tree l) b).
Definition process_msg (m: Message) (l: LocalState) : LocalState :=
let: BlockMsg b := m in extend_tree_l l b.
Definition process_msgs (msgs: Messages) : State LocalState unit :=
modify (fun l => foldr process_msg l msgs).
(* Notice that if a party actually bakes a block it will be added
directly to the blocktree of this party. *)
Definition honest_bake (sl : Slot) (txs : Transactions) : State LocalState Messages :=
local_state <- get;
if Winner (pk local_state) sl
then let: bestChain := bestChain (sl-1) (tree local_state) in
let: hashPrev := HashB (head GenesisBlock bestChain) in
let: newBlock := MkBlock sl txs hashPrev (pk local_state) in
modify (fun l => extend_tree_l l newBlock);;
pure [:: BlockMsg newBlock]
else pure [::].
Definition honest_rcv (msgs : Messages) (sl : Slot) : State LocalState unit :=
process_msgs msgs.
End Protocol.
|
{"author": "AU-COBRA", "repo": "PoS-NSB", "sha": "8cb62e382f17626150a4b75e44af4d270474d3e7", "save_path": "github-repos/coq/AU-COBRA-PoS-NSB", "path": "github-repos/coq/AU-COBRA-PoS-NSB/PoS-NSB-8cb62e382f17626150a4b75e44af4d270474d3e7/Protocol/Protocol.v"}
|
import numpy as np
PARADIGM = np.array([['A','B','C','D','E','F'],
['G','H','I','J','K','L'],
['M','N','O','P','Q','R'],
['S','T','U','V','W','X'],
['Y','Z','1','2','3','4'],
['5','6','7','8','9','_']])
|
{"hexsha": "9fc891d646f710bf2f8261d773a08d5d48c208cb", "size": 324, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data/paradigm.py", "max_stars_repo_name": "Yuchen-Wang-SH/Electroencephalography-EEG-Signal-Classification-using-Deep-Learning", "max_stars_repo_head_hexsha": "55a3100182b7b5340ada375d46dd9ca0ad00ae21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-06-19T10:13:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T16:57:53.000Z", "max_issues_repo_path": "src/data/paradigm.py", "max_issues_repo_name": "Yuchen-Wang-SH/Electroencephalography-EEG-Signal-Classification-using-Deep-Learning", "max_issues_repo_head_hexsha": "55a3100182b7b5340ada375d46dd9ca0ad00ae21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data/paradigm.py", "max_forks_repo_name": "Yuchen-Wang-SH/Electroencephalography-EEG-Signal-Classification-using-Deep-Learning", "max_forks_repo_head_hexsha": "55a3100182b7b5340ada375d46dd9ca0ad00ae21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-21T10:08:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-21T10:08:43.000Z", "avg_line_length": 36.0, "max_line_length": 47, "alphanum_fraction": 0.2037037037, "include": true, "reason": "import numpy", "num_tokens": 97}
|
"""GLUT replacement for the original checker.py demonstration code
Note:
Has no navigation code ATM.
"""
# This is statement is required by the build system to query build info
if __name__ == '__build__':
raise Exception
__version__='$Revision: 1.1.1.1 $'[11:-2]
__date__ = '$Date: 2007/02/15 19:25:11 $'[6:-2]
import OpenGL
OpenGL.ERROR_ON_COPY = True
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import time, sys
try:
from numpy import *
except ImportError, err:
try:
from Numeric import *
except ImportError, err:
print "This demo requires the numpy or Numeric extension, sorry"
import sys
sys.exit()
def drawCheckerBoard( N=5, white=GLfloat_3(1,1,1), black=GLfloat_3(0,0,0) ):
"""Draw an 2N*2N checkerboard with given colours"""
glDisable(GL_LIGHTING)
try:
for x in range(-N, N):
for y in range(-N, N):
if (x + y) % 2 == 0:
glColor3fv(white)
else:
glColor3fv(black)
glRectf(x, y, x + 1, y + 1)
finally:
glEnable(GL_LIGHTING)
def drawSphere( center=(0,0,1), radius=1.0, sides=20 ):
glPushMatrix()
try:
glTranslatef(*center)
glutSolidSphere(radius, sides, sides)
finally:
glPopMatrix()
def display( swap=1, clear=1):
"""Callback function for displaying the scene
This defines a unit-square environment in which to draw,
i.e. width is one drawing unit, as is height
"""
if clear:
glClearColor(0.5, 0.5, 0.5, 0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# establish the projection matrix (perspective)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
x,y,width,height = glGetDoublev(GL_VIEWPORT)
gluPerspective(
45, # field of view in degrees
width/float(height or 1), # aspect ratio
.25, # near clipping plane
200, # far clipping plane
)
# and then the model view matrix
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(
0,1,20, # eyepoint
0,0,0, # center-of-view
0,1,0, # up-vector
)
glLightfv( GL_LIGHT0, GL_DIFFUSE, GLfloat_3(.8,.8,.3) )
glLightfv( GL_LIGHT0, GL_POSITION, GLfloat_4(1,1,3,0) )
glEnable( GL_LIGHT0)
rotation()
drawCheckerBoard()
drawSphere()
if swap:
glutSwapBuffers()
def idle( ):
glutPostRedisplay()
starttime = time.time()
def rotation( period = 10):
"""Do rotation of the scene at given rate"""
angle = (((time.time()-starttime)%period)/period)* 360
glRotate( angle, 0,1,0)
return angle
def key_pressed(*args):
# If escape is pressed, kill everything.
if args[0] == '\033':
sys.exit()
if __name__ == "__main__":
print """You should see a sphere+checker-board rotating about the origin."""
import sys
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutCreateWindow('Rotating Checkerboard')
glutDisplayFunc(display)
glutKeyboardFunc(key_pressed)
glutIdleFunc(display)
# note need to do this to properly render faceted geometry
glEnable( GL_DEPTH_TEST )
glutMainLoop()
|
{"hexsha": "4d0bbd301381536d05fe0c47cbb92376ca2b8f79", "size": 2898, "ext": "py", "lang": "Python", "max_stars_repo_path": "002-pyopengl/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/GLUT/tom/checker.py", "max_stars_repo_name": "lhl/vrdev", "max_stars_repo_head_hexsha": "fc1a9af2b51d159c99c8779349ef3392a70ed9ed", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2015-12-02T02:36:36.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-20T17:14:24.000Z", "max_issues_repo_path": "002-pyopengl/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/GLUT/tom/checker.py", "max_issues_repo_name": "lhl/vrdev", "max_issues_repo_head_hexsha": "fc1a9af2b51d159c99c8779349ef3392a70ed9ed", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "002-pyopengl/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/GLUT/tom/checker.py", "max_forks_repo_name": "lhl/vrdev", "max_forks_repo_head_hexsha": "fc1a9af2b51d159c99c8779349ef3392a70ed9ed", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2016-11-02T11:17:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-21T07:42:19.000Z", "avg_line_length": 24.5593220339, "max_line_length": 77, "alphanum_fraction": 0.7108350587, "include": true, "reason": "from numpy", "num_tokens": 842}
|
"""
Molecular depiction features.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
import io
import numpy as np
from PIL import Image
def load(string):
"""
Load an image from a file or binary string.
Parameters
----------
string : str
Filename or binary string.
"""
try:
im = Image.open(string)
except TypeError:
b = io.BytesIO(string)
im = Image.open(b)
return im
def get_pixels(image, mode=None):
"""
Extract pixels from an image, possibly after converting to the given
mode.
Parameters
----------
image : PIL Image
Image.
mode : str, optional
Image mode. For example, 'RGB' or 'P' (8-bit).
"""
if mode is not None and image.mode != mode:
image = image.convert(mode)
pixels = np.asarray(image)
return pixels
def downscale(image, max_size):
"""
Shrink an image while maintaining aspect ratio. Returns a copy of the
original image.
Parameters
----------
image : Image
Image to rescale.
max_size : int
Maximum image size in any dimension.
"""
if max(image.size) <= max_size:
return image.copy()
im = image.copy()
im.thumbnail((max_size, max_size), resample=Image.ANTIALIAS)
return im
def pad(image, shape, fill=255):
"""
Pad an image, where the first two axes are height and width,
respectively. Returns a copy of the original image.
Parameters
----------
image : PIL Image
Image.
shape : tuple of ints
Desired height and width.
fill : int, optional (default 255)
Intensity value for added pixels.
"""
pixels = get_pixels(image)
current = pixels.shape[:2]
assert current[0] <= shape[0] and current[1] <= shape[1]
pad_width = []
for i in xrange(2):
diff = shape[i] - current[i]
n, m = divmod(diff, 2)
m += n
pad_width.append((n, m))
while len(pad_width) < pixels.ndim:
pad_width.append((0, 0))
padded = np.pad(pixels, pad_width, 'constant', constant_values=fill)
im = Image.fromarray(padded, mode=image.mode)
return im
|
{"hexsha": "cd41912b76e50ca23b14445c3e70b7206375b683", "size": 2243, "ext": "py", "lang": "Python", "max_stars_repo_path": "vs_utils/utils/image_utils.py", "max_stars_repo_name": "rbharath/pande-gas", "max_stars_repo_head_hexsha": "7a947d087ba2dd77c4bbbb89b604cf83acdff5f3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2015-09-02T22:08:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-15T16:04:03.000Z", "max_issues_repo_path": "vs_utils/utils/image_utils.py", "max_issues_repo_name": "rbharath/pande-gas", "max_issues_repo_head_hexsha": "7a947d087ba2dd77c4bbbb89b604cf83acdff5f3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2015-03-06T02:13:55.000Z", "max_issues_repo_issues_event_max_datetime": "2016-01-20T01:24:39.000Z", "max_forks_repo_path": "vs_utils/utils/image_utils.py", "max_forks_repo_name": "rbharath/pande-gas", "max_forks_repo_head_hexsha": "7a947d087ba2dd77c4bbbb89b604cf83acdff5f3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-03-05T18:53:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-02T15:51:11.000Z", "avg_line_length": 23.3645833333, "max_line_length": 73, "alphanum_fraction": 0.6023183237, "include": true, "reason": "import numpy", "num_tokens": 552}
|
[STATEMENT]
lemma gc_W_empty_invL[intro]:
notes fun_upd_apply[simp]
shows
"\<lbrace> handshake_invL \<^bold>\<and> obj_fields_marked_invL \<^bold>\<and> gc_W_empty_invL \<^bold>\<and> LSTP valid_W_inv \<rbrace>
gc
\<lbrace> gc_W_empty_invL \<rbrace>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrace>\<lambda>s. handshake_invL s \<and> obj_fields_marked_invL s \<and> gc_W_empty_invL s \<and> valid_W_inv s\<down>\<rbrace> gc \<lbrace>gc_W_empty_invL\<rbrace>
[PROOF STEP]
apply (vcg_jackhammer; (clarsimp elim: gc_W_empty_mut_inv_load_W simp: WL_def)?)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>s s'. \<lbrakk>s'\<down> = s\<down>; taken gc mark_loop_get_work_done_loop s'; \<forall>p''\<in>- {gc}. GST s' p'' = GST s p''; valid_W_inv s\<down>; \<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>; sys_hs_type s\<down> = ht_GetWork; gc_W s\<down> = {}; gc_ghost_honorary_grey s\<down> = {}; \<forall>x. sys_ghost_hs_in_sync x s\<down>; \<forall>x. \<not> sys_hs_pending x s\<down>; sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep; at gc mark_loop_get_work_done_loop s; gc_muts s\<down> = {}; sys_W s\<down> = {}\<rbrakk> \<Longrightarrow> no_grey_refs s\<down>
2. \<And>s s'. \<lbrakk>s'\<down> = s\<down>; taken gc mark_loop_get_roots_done_loop s'; \<forall>p''\<in>- {gc}. GST s' p'' = GST s p''; valid_W_inv s\<down>; \<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>; sys_hs_type s\<down> = ht_GetRoots; gc_W s\<down> = {}; gc_ghost_honorary_grey s\<down> = {}; \<forall>x. sys_ghost_hs_in_sync x s\<down>; \<forall>x. \<not> sys_hs_pending x s\<down>; sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep; at gc mark_loop_get_roots_done_loop s; gc_muts s\<down> = {}; sys_W s\<down> = {}\<rbrakk> \<Longrightarrow> no_grey_refs s\<down>
[PROOF STEP]
proof vcg_name_cases
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>s s'. \<lbrakk>s'\<down> = s\<down>; taken gc mark_loop_get_work_done_loop s'; \<forall>p''\<in>- {gc}. GST s' p'' = GST s p''; valid_W_inv s\<down>; \<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>; sys_hs_type s\<down> = ht_GetWork; gc_W s\<down> = {}; gc_ghost_honorary_grey s\<down> = {}; \<forall>x. sys_ghost_hs_in_sync x s\<down>; \<forall>x. \<not> sys_hs_pending x s\<down>; sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep; at gc mark_loop_get_work_done_loop s; gc_muts s\<down> = {}; sys_W s\<down> = {}\<rbrakk> \<Longrightarrow> no_grey_refs s\<down>
2. \<And>s s'. \<lbrakk>s'\<down> = s\<down>; taken gc mark_loop_get_roots_done_loop s'; \<forall>p''\<in>- {gc}. GST s' p'' = GST s p''; valid_W_inv s\<down>; \<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>; sys_hs_type s\<down> = ht_GetRoots; gc_W s\<down> = {}; gc_ghost_honorary_grey s\<down> = {}; \<forall>x. sys_ghost_hs_in_sync x s\<down>; \<forall>x. \<not> sys_hs_pending x s\<down>; sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep; at gc mark_loop_get_roots_done_loop s; gc_muts s\<down> = {}; sys_W s\<down> = {}\<rbrakk> \<Longrightarrow> no_grey_refs s\<down>
[PROOF STEP]
case (mark_loop_get_work_done_loop s s')
[PROOF STATE]
proof (state)
this:
s'\<down> = s\<down>
taken gc mark_loop_get_work_done_loop s'
\<forall>p''\<in>- {gc}. GST s' p'' = GST s p''
valid_W_inv s\<down>
\<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>
sys_hs_type s\<down> = ht_GetWork
gc_W s\<down> = {}
gc_ghost_honorary_grey s\<down> = {}
\<forall>x. sys_ghost_hs_in_sync x s\<down>
\<forall>x. \<not> sys_hs_pending x s\<down>
sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep
at gc mark_loop_get_work_done_loop s
gc_muts s\<down> = {}
sys_W s\<down> = {}
goal (2 subgoals):
1. \<And>s s'. \<lbrakk>s'\<down> = s\<down>; taken gc mark_loop_get_work_done_loop s'; \<forall>p''\<in>- {gc}. GST s' p'' = GST s p''; valid_W_inv s\<down>; \<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>; sys_hs_type s\<down> = ht_GetWork; gc_W s\<down> = {}; gc_ghost_honorary_grey s\<down> = {}; \<forall>x. sys_ghost_hs_in_sync x s\<down>; \<forall>x. \<not> sys_hs_pending x s\<down>; sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep; at gc mark_loop_get_work_done_loop s; gc_muts s\<down> = {}; sys_W s\<down> = {}\<rbrakk> \<Longrightarrow> no_grey_refs s\<down>
2. \<And>s s'. \<lbrakk>s'\<down> = s\<down>; taken gc mark_loop_get_roots_done_loop s'; \<forall>p''\<in>- {gc}. GST s' p'' = GST s p''; valid_W_inv s\<down>; \<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>; sys_hs_type s\<down> = ht_GetRoots; gc_W s\<down> = {}; gc_ghost_honorary_grey s\<down> = {}; \<forall>x. sys_ghost_hs_in_sync x s\<down>; \<forall>x. \<not> sys_hs_pending x s\<down>; sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep; at gc mark_loop_get_roots_done_loop s; gc_muts s\<down> = {}; sys_W s\<down> = {}\<rbrakk> \<Longrightarrow> no_grey_refs s\<down>
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
s'\<down> = s\<down>
taken gc mark_loop_get_work_done_loop s'
\<forall>p''\<in>- {gc}. GST s' p'' = GST s p''
valid_W_inv s\<down>
\<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>
sys_hs_type s\<down> = ht_GetWork
gc_W s\<down> = {}
gc_ghost_honorary_grey s\<down> = {}
\<forall>x. sys_ghost_hs_in_sync x s\<down>
\<forall>x. \<not> sys_hs_pending x s\<down>
sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep
at gc mark_loop_get_work_done_loop s
gc_muts s\<down> = {}
sys_W s\<down> = {}
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
s'\<down> = s\<down>
taken gc mark_loop_get_work_done_loop s'
\<forall>p''\<in>- {gc}. GST s' p'' = GST s p''
valid_W_inv s\<down>
\<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>
sys_hs_type s\<down> = ht_GetWork
gc_W s\<down> = {}
gc_ghost_honorary_grey s\<down> = {}
\<forall>x. sys_ghost_hs_in_sync x s\<down>
\<forall>x. \<not> sys_hs_pending x s\<down>
sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep
at gc mark_loop_get_work_done_loop s
gc_muts s\<down> = {}
sys_W s\<down> = {}
goal (1 subgoal):
1. no_grey_refs s\<down>
[PROOF STEP]
by (simp add: WL_def gc_W_empty_mut_inv_load_W valid_W_inv_sys_ghg_empty_iff)
[PROOF STATE]
proof (state)
this:
no_grey_refs s\<down>
goal (1 subgoal):
1. \<And>s s'. \<lbrakk>s'\<down> = s\<down>; taken gc mark_loop_get_roots_done_loop s'; \<forall>p''\<in>- {gc}. GST s' p'' = GST s p''; valid_W_inv s\<down>; \<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>; sys_hs_type s\<down> = ht_GetRoots; gc_W s\<down> = {}; gc_ghost_honorary_grey s\<down> = {}; \<forall>x. sys_ghost_hs_in_sync x s\<down>; \<forall>x. \<not> sys_hs_pending x s\<down>; sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep; at gc mark_loop_get_roots_done_loop s; gc_muts s\<down> = {}; sys_W s\<down> = {}\<rbrakk> \<Longrightarrow> no_grey_refs s\<down>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>s s'. \<lbrakk>s'\<down> = s\<down>; taken gc mark_loop_get_roots_done_loop s'; \<forall>p''\<in>- {gc}. GST s' p'' = GST s p''; valid_W_inv s\<down>; \<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>; sys_hs_type s\<down> = ht_GetRoots; gc_W s\<down> = {}; gc_ghost_honorary_grey s\<down> = {}; \<forall>x. sys_ghost_hs_in_sync x s\<down>; \<forall>x. \<not> sys_hs_pending x s\<down>; sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep; at gc mark_loop_get_roots_done_loop s; gc_muts s\<down> = {}; sys_W s\<down> = {}\<rbrakk> \<Longrightarrow> no_grey_refs s\<down>
[PROOF STEP]
case (mark_loop_get_roots_done_loop s s')
[PROOF STATE]
proof (state)
this:
s'\<down> = s\<down>
taken gc mark_loop_get_roots_done_loop s'
\<forall>p''\<in>- {gc}. GST s' p'' = GST s p''
valid_W_inv s\<down>
\<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>
sys_hs_type s\<down> = ht_GetRoots
gc_W s\<down> = {}
gc_ghost_honorary_grey s\<down> = {}
\<forall>x. sys_ghost_hs_in_sync x s\<down>
\<forall>x. \<not> sys_hs_pending x s\<down>
sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep
at gc mark_loop_get_roots_done_loop s
gc_muts s\<down> = {}
sys_W s\<down> = {}
goal (1 subgoal):
1. \<And>s s'. \<lbrakk>s'\<down> = s\<down>; taken gc mark_loop_get_roots_done_loop s'; \<forall>p''\<in>- {gc}. GST s' p'' = GST s p''; valid_W_inv s\<down>; \<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>; sys_hs_type s\<down> = ht_GetRoots; gc_W s\<down> = {}; gc_ghost_honorary_grey s\<down> = {}; \<forall>x. sys_ghost_hs_in_sync x s\<down>; \<forall>x. \<not> sys_hs_pending x s\<down>; sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep; at gc mark_loop_get_roots_done_loop s; gc_muts s\<down> = {}; sys_W s\<down> = {}\<rbrakk> \<Longrightarrow> no_grey_refs s\<down>
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
s'\<down> = s\<down>
taken gc mark_loop_get_roots_done_loop s'
\<forall>p''\<in>- {gc}. GST s' p'' = GST s p''
valid_W_inv s\<down>
\<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>
sys_hs_type s\<down> = ht_GetRoots
gc_W s\<down> = {}
gc_ghost_honorary_grey s\<down> = {}
\<forall>x. sys_ghost_hs_in_sync x s\<down>
\<forall>x. \<not> sys_hs_pending x s\<down>
sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep
at gc mark_loop_get_roots_done_loop s
gc_muts s\<down> = {}
sys_W s\<down> = {}
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
s'\<down> = s\<down>
taken gc mark_loop_get_roots_done_loop s'
\<forall>p''\<in>- {gc}. GST s' p'' = GST s p''
valid_W_inv s\<down>
\<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>
sys_hs_type s\<down> = ht_GetRoots
gc_W s\<down> = {}
gc_ghost_honorary_grey s\<down> = {}
\<forall>x. sys_ghost_hs_in_sync x s\<down>
\<forall>x. \<not> sys_hs_pending x s\<down>
sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep
at gc mark_loop_get_roots_done_loop s
gc_muts s\<down> = {}
sys_W s\<down> = {}
goal (1 subgoal):
1. no_grey_refs s\<down>
[PROOF STEP]
by (simp add: WL_def gc_W_empty_mut_inv_load_W valid_W_inv_sys_ghg_empty_iff)
[PROOF STATE]
proof (state)
this:
no_grey_refs s\<down>
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4532, "file": "ConcurrentGC_Noninterference", "length": 12}
|
[STATEMENT]
lemma Standard_hnorm [simp]: "x \<in> Standard \<Longrightarrow> hnorm x \<in> Standard"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> Standard \<Longrightarrow> hnorm x \<in> Standard
[PROOF STEP]
by (simp add: hnorm_def)
|
{"llama_tokens": 88, "file": null, "length": 1}
|
/*
* Copyright (c) 2015, The Regents of the University of California (Regents).
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Please contact the author(s) of this library if you have any questions.
* Authors: David Fridovich-Keil ( dfk@eecs.berkeley.edu )
* Erik Nelson ( eanelson@eecs.berkeley.edu )
*/
#include <Eigen/Core>
#include <Eigen/LU>
#include <gflags/gflags.h>
#include <geometry/rotation.h>
#include <math/random_generator.h>
#include <gtest/gtest.h>
namespace bsfm {
using Eigen::Matrix3d;
using Eigen::Vector3d;
TEST(Rotation, TestEulerAnglesAndMatrices) {
// Randomly generate euler angles, convert to a matrix, then convert back.
// Check that (1) the rotation matrix has det(R)==1, and (2), that we get the
// same angles back.
math::RandomGenerator rng(0);
for (int ii = 0; ii < 1000; ++ii) {
Vector3d e1;
e1.setRandom();
// Converting from rotation matrices to euler angles is only valid when phi,
// theta, and psi are all < 0.5*PI. Otherwise the problem has multiple
// solutions, and we can only return one of them with our function.
e1 *= 0.5 * M_PI;
Matrix3d R = EulerAnglesToMatrix(e1);
EXPECT_NEAR(1.0, R.determinant(), 1e-8);
Vector3d e2 = MatrixToEulerAngles(R);
EXPECT_NEAR(0.0, S1Distance(e1(0), e2(0)), 1e-8);
EXPECT_NEAR(0.0, S1Distance(e1(1), e2(1)), 1e-8);
EXPECT_NEAR(0.0, S1Distance(e1(2), e2(2)), 1e-8);
EXPECT_TRUE(R.isApprox(EulerAnglesToMatrix(e2), 1e-4));
}
}
} //\namespace bsfm
|
{"hexsha": "4e0dda356be6e1642fed0d731292b8eae84367bf", "size": 3059, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/test_rotation.cpp", "max_stars_repo_name": "jamesdsmith/berkeley_sfm", "max_stars_repo_head_hexsha": "de3ae6b104602c006d939b1f3da8c497b86d39ff", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 21.0, "max_stars_repo_stars_event_min_datetime": "2016-01-14T13:52:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T19:30:33.000Z", "max_issues_repo_path": "test/test_rotation.cpp", "max_issues_repo_name": "jamesdsmith/berkeley_sfm", "max_issues_repo_head_hexsha": "de3ae6b104602c006d939b1f3da8c497b86d39ff", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2015-10-17T17:01:46.000Z", "max_issues_repo_issues_event_max_datetime": "2015-10-22T20:59:43.000Z", "max_forks_repo_path": "test/test_rotation.cpp", "max_forks_repo_name": "erik-nelson/berkeley_sfm", "max_forks_repo_head_hexsha": "5bf0b45fac176ff7abfca0ff690893c1afc73c51", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2016-01-22T06:23:59.000Z", "max_forks_repo_forks_event_max_datetime": "2018-01-16T03:54:33.000Z", "avg_line_length": 38.7215189873, "max_line_length": 80, "alphanum_fraction": 0.7129780974, "num_tokens": 771}
|
using DEBTrait, Test
el = "C4H9NO2"
chemFormBiom = [1, 1.8, 0.2, 0.5, 0, 0, 0]
chemical_composition = DEBTrait.ThermoStoichWizard.extract_composition(el) # CHNOSP
@test chemical_composition == [4,9,1,2,0,0]
stoich_electron_donor = DEBTrait.ThermoStoichWizard.get_stoich_electron_donor(el)
@test stoich_electron_donor == [-1, -10, 4, 1, 0, 0, 21, 18, 0, 0]
stoich_electron_acceptor = DEBTrait.ThermoStoichWizard.get_stoich_electron_acceptor()
@test stoich_electron_acceptor == [0., 2., 0., 0., 0., 0., -4., -4., -1., 0.]
stoich_cat_rxns = DEBTrait.ThermoStoichWizard.get_stoich_catabolic_reaction(el)
@test stoich_cat_rxns == [-1., -1., 4., 1., 0., 0., 3., 0., -4.5, 0.]
stoich_anabolic_O2, stoich_anabolic_HCO3 = DEBTrait.ThermoStoichWizard.get_stoich_anabolic_reaction(el, chemFormBiom)
@test stoich_anabolic_O2 ≈ [-0.25, 0.15, 0.0, 0.05, 0., 0., -0.05, 0., -0.075, 1.0] atol =1e-4
@test stoich_anabolic_HCO3 ≈ [-0.23333333, 0.16666667, -0.06666667, 0.03333333, 0., 0., -0.1, 0., 0., 1.] atol = 1e-4
out = DEBTrait.ThermoStoichWizard.get_lambda(el, chemFormBiom)
|
{"hexsha": "12b781547ff7bc3c7c90a15dd990d2ab2623c8c5", "size": 1080, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_ThermoStoichWizard.jl", "max_stars_repo_name": "giannamars/DEBTrait.jl", "max_stars_repo_head_hexsha": "3ce3dae8224f8226f727d43c5f2a05bd94c9a93f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_ThermoStoichWizard.jl", "max_issues_repo_name": "giannamars/DEBTrait.jl", "max_issues_repo_head_hexsha": "3ce3dae8224f8226f727d43c5f2a05bd94c9a93f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_ThermoStoichWizard.jl", "max_forks_repo_name": "giannamars/DEBTrait.jl", "max_forks_repo_head_hexsha": "3ce3dae8224f8226f727d43c5f2a05bd94c9a93f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.0, "max_line_length": 121, "alphanum_fraction": 0.7092592593, "num_tokens": 480}
|
\documentclass{article}
\title{AATOM - An Agent-based Airport Terminal Operations Model Simulator}
\author{Stef Janssen
\\\href{mailto:s.a.m.janssen@tudelft.nl}{\textit{s.a.m.janssen@tudelft.nl}}
\\\textit{Delft University of Technology}
}
\date{September 2017}
\usepackage{natbib}
\usepackage{graphicx}
\usepackage{listings}
\usepackage[margin=1.2in]{geometry}
\usepackage{hyperref}
\usepackage{fancyvrb}
\usepackage{todonotes}
\begin{document}
\maketitle
\section{Introduction}
This document gives an overview of the functionalities of the AATOM simulator. AATOM is Java-based and is used to simulate airport terminal operations in an agent-based way. It provides the user with a large set of basic functions useful for experimentation. This document is outlined as follows. An installation guide is found in Section \ref{sec:installation}, a set of tutorials is provided in Section \ref{sec:tutorials}. Finally, frequently asked questions are stated in Section \ref{sec:faq}. The full Java documentation for the simulator can be found at \href{https://stefjanssen.github.io/AATOM/}{https://stefjanssen.github.io/AATOM/}. Furhtermore, the code used in this work can also be found on Github.
\section{Installation} \label{sec:installation}
AATOM can be downloaded from \href{https://github.com/StefJanssen/AATOM}{https://github.com/StefJanssen/AATOM}. After downloading, include the .jar file into the path of a java project in the IDE of your choosing. A tutorial for Eclipse can be found \href{https://stackoverflow.com/questions/3280353/how-to-import-a-jar-in-eclipse}{\underline{here}}, for Intellij it can be found \href{https://stackoverflow.com/questions/1051640/correct-way-to-add-external-jars-lib-jar-to-an-intellij-idea-project}{\underline{here}}.
The .jar file contains all functionality of AATOM and can be extended to create your own models in. It also contains some basic example airports that you can simulate yourself.
Basic introductions to Java are readily available from the web. You can for instance take a look \href{https://www.tutorialspoint.com/java/}{here} or \href{http://math.hws.edu/javanotes/}{here}. Refer to the tutorials section below for ways to use AATOM.
\section{Tutorials} \label{sec:tutorials}
This section contains a set of tutorials that show the basics of AATOM. The tutorials gradually increases in complexity, until you designed your own airport. Code for all tutorials are provided on Github.
\subsection{The first simulation}
AATOM contains two airports that can be used for simulation: Eindhoven Airport and Rotterdam The Hague Airport. To simulate Eindhoven Airport, use the following code in the main method of your project. This main method you have to create yourself. If you do not know what this means or how to do this, please refer to the Java introductions websites of the previous section.
\begin{verbatim}
tutorial1();
\end{verbatim}
The first command (on line 1) in this method generates a Simulator called \textit{sim} from the ModelBuilder class. This class contains the prebuilt simulations for Eindhoven Airport and Rotterdam The Hague Airport. The inputs are a Boolean value stating if a GUI should be displayed and an integer stating the time step of the simulation (in milliseconds). The command on line 2 creates a thread and starts the simulation.
To be able to run this code, you need to import the correct classes in your own class. In an IDE such as Eclipse or Intellij, you can automatically do this by resolving the errors it shows. If multiple classes can be imported, a general rule of thumb is to choose the class that does \textit{not} start with \textit{`java.'}.
When the program is run, you see a visualization of Eindhoven Airport. In addition, passengers are generated and execute different activities in the airport. Processes like check-in and security are also present. Replacing \mbox{\textit{eindhovenAirport}} by \mbox{\textit{rotterdamTheHagueAirport}} in the code on line 1 generates a simulation for Rotterdam The Hague Airport.
\subsection{Building the Environment}
You can design your own environment by adding \textit{environment objects} to the simulation. An example is given on how to add a wall to the simulation, but all environment objects are added in the same way. An overview of existing environment objects can be found in the documentation.
\begin{verbatim}
tutorial2a();
\end{verbatim}
The first line creates a simulator, much like in the tutorial above. In this example, the base ending conditions are used and the GUI is set to true. These ending conditions state that the simulation should end after a specified number of seconds. In the example this is 20 seconds.
Next, on line 2, a wall is added to the simulator. The input arguments for the wall are the top left x coordinate, the top left y coordinate, the width and the height in meters respectively. Finally, on line 3, a threat is created and the simulation is started the same way as in the example above. Running the simulation shows a single wall for 10 seconds, after which the simulation is stopped.
We extend this example by creating a queuing area.
\begin{verbatim}
tutorial2b();
\end{verbatim}
Line 3 shows how a queue is created and added to the simulator. A queue is essentially a collection of queue separators and a queue area that are generated by the \textit{ModelComponentBuilder}. This is why \textit{addAll} is used: more objects are added to the simulator at the same time. The first parameter of the queue method specifies the top left corner position of the queue, the second parameter specifies the number of horizontal lanes. The third parameter specifies the width of the queue, and the fourth parameter adds a `blocking wall' to the simulation. Finally, the last parameter specifies the rotation of the queue. Running this simulation shows a wall (the one we saw before) and a queue leading towards that wall.
\subsection{Creating the First Airport}
This tutorial shows how to create your first airport in AATOM. As in the previous examples, first, we create a simulator with the base ending conditions and a GUI. Further, we create a map builder, as was done in the previous examples as well.
Then, we create a gate area at the top left corner of our map, followed by a set of chairs in the gate area. Then, a check-in area and checkpoint area is generated. These are accessed from the map builder, and include all necessary parts for simulation of these elements. The next lines are used for the creation of a flight in the environment. The flight needs a number of input arguments that are found on 11-16, while the creation and addition of the flight to the simulation happens on lines 17-19. Line 20-21 creates a passenger and adds it to the simulation. Finally, line 22 starts the simulation. This is all included in the following method.
\begin{verbatim}
tutorial3();
\end{verbatim}
When you start the simulation, you see a single passenger that moves from check-in, to the checkpoint and finally sits down on one of the chairs in the sitting area. The passenger is implemented following the AATOM architecture. You can create your own implementation of a passenger by extending the passenger class and extending the different layers (strategic, tactical and operational). Each of these layers has its own modules that need to be defined.
\subsection{Working with the Agent Generator}
This tutorial extends the tutorial described above (`Creating the First Airport'). To work with the agent generator, replace line 1 of this tutorial by the lines below. Further, remove lines 20-21 from the example. This is the call that starts with \textit{sim.add(new Passenger(...))}. This is already done in the next method.
\begin{verbatim}
tutorial4();
\end{verbatim}
The agent generator generates an agent with expected inter-arrival times of 30 seconds. The generator ensures that a random flight is chosen from the set of flights that leave in at least 30 minutes and at most 3 hours. You can create your own agent generator by extending the existing AgentGenerator class.
\subsection{Adding Analyzers}
This tutorial extends the tutorial described above (`Working with the Agent Generator'). Here, we add analyzers to the simulation. Analyzers show data of the simulation over time. Add the lines below right before you start the simulation.
\begin{verbatim}
sim.add(new QueueAnalyzer());
sim.add(new TimeInQueueAnalyzer());
sim.add(new ActivityDistributionAnalyzer());
sim.add(new TimeToGateAnalyzer());
sim.add(new AgentNumberAnalyzer());
sim.add(new MissedFlightsAnalyzer());
sim.add(new DistanceAnalyzer());
\end{verbatim}
This is already done in the following method.
\begin{verbatim}
tutorial5a();
\end{verbatim}
By adding these analyzers, you see graphs in the Graphs tab of the visualization. On top of that, the simulator automatically logs the graphs you add to a .txt file. See Section \ref{sec:analyzing} for more information on how to analyze this data.
You can also create your own analyzer to analyze some parameters of the simulation. You can do so, by extending the abstract class Analyzer. In this example, we create an analyzer that tracks how many check-in operators are currently active. To extend the Analyzer, you have to create a new file called \textit{`TutorialAnalyzer.java'} and fill it with the content below.
\begin{verbatim}
public class TutorialAnalyzer extends Analyzer {
@Override
public String[] getLineNames() {
return new String[] { "# of operators active" };
}
@Override
public String getTitle() {
return "# of check-in operators active";
}
@Override
public double[] getValues() {
double numberOfActiveOperators = 0;
for (OperatorAgent operator :
getSimulator().getMap().getMapComponents(OperatorAgent.class)) {
if (operator.getAssignment() instanceof OperatorCheckInActivity) {
if (!operator.getActiveActivities().isEmpty())
numberOfActiveOperators++;
}
}
return new double[] { numberOfActiveOperators };
}
@Override
public String getYAxis() {
return "# of operators active";
}
}
\end{verbatim}
Four methods need to be implemented to extend the Analyzer class. Three methods are used to give names to various elements of the analyzer (the title, the lines and the y axis), while the fourth is used to determine the values of the lines. In this example, a single line is used to indicate the number of active check-in operators. Make sure you add it to the simulation as well by adding the line below in your main code.
\begin{verbatim}
sim.add(new TutorialAnalyzer());
\end{verbatim}
This is already done in the following method.
\begin{verbatim}
tutorial5b();
\end{verbatim}
\subsection{Creating Your Own Passenger} \label{sec:creating}
In this tutorial we will extend the standard passenger class to create our own passenger. This class can be created in a different .java file called \textit{TutorialPassenger.java}. The only difference with the current passenger is that this extended passenger also logs a `1' if it is sitting. To create this passenger, use the code showed below.
\begin{verbatim}
public class TutorialPassenger extends Passenger {
public TutorialPassenger(Map map, Flight flight, boolean checkedIn,
Class<? extends Facility> facility, Position position,
double radius, double mass, Luggage luggage, Color color) {
super(map, flight, checkedIn, facility, position, radius, mass, luggage, color);
}
public void update(int timeStep) {
super.update(timeStep);
if (isSitting())
setLog(new String[] { "1" });
}
}
\end{verbatim}
This code consists of two parts: a constructor and an update method. The constructor is needed to create the passenger, while the update method specifies the behaviour of the passenger. The constructor has only one function: passing parameters to the super (Passenger) class. The update method consists of two parts. The first part ($super.update(timeStep)$) makes sure the standard passenger behaviour is executed, while the second part logs a 1 if the agent is sitting. As we only created the Tutorial Passenger, and did not yet add it to the simulator, the behaviour will not yet show. To achieve this, we extend the current BaseAgentGenerator class so that TutorialPassengers are added to the simulation. The code (created in a different \textit{TutorialAgentGenerator.java} file) to do this, is shown below.
\begin{verbatim}
public class TutorialAgentGenerator extends BaseAgentGenerator {
public TutorialAgentGenerator(double interArrivalTime) {
super(interArrivalTime);
}
@Override
public HumanAgent generateAgent(long numberOfSteps, int timeStep, boolean forced) {
if (forced || canGenerate(timeStep)) {
Luggage luggage = new Luggage(LuggageType.CARRY_ON,
Utilities.RANDOM_GENERATOR.nextDouble(),
Utilities.RANDOM_GENERATOR.nextDouble());
if (areas.isEmpty())
return null;
EntranceArea area = areas.get(Utilities.RANDOM_GENERATOR.
nextInt(areas.size()));
Position start = Utilities.generatePosition(area.getShape());
Flight flight = getEligibleFlight();
if (flight != null) {
return new TutorialPassenger(simulator.getMap(),
flight, false, null, start, 0.2, 80, luggage,
Color.RED);
}
}
return null;
}
}
\end{verbatim}
The constructor passes an argument to the super class, while the $generateAgent(numberOfSteps,$ $timeStep, forced)$ method ensures that a TutorialPassenger is generated with random parameters if possible.
We now adapt the previous tutorial to ensure that the tutorial agents generator is used, instead of the base agent generator.
\begin{verbatim}
Simulator sim = new Simulator(true, 100, new BaseEndingConditions(3600),
new TutorialAgentGenerator(30));
\end{verbatim}
This is already done in the following method.
\begin{verbatim}
tutorial6();
\end{verbatim}
After running this example, you will see that the $agentLog.txt$ file is not empty anymore and contains the logged data.
\subsection{Analyzing Data} \label{sec:analyzing}
By default, data of the different analyzers is saved in a text file. To add the traces of each agent to the log, a different type of logger has to be used. This can be done by changing the previous example as follows.
\begin{verbatim}
tutorial7();
\end{verbatim}
After a simulation run, a collection of log files are generated in a subfolder of the \textit{`logfiles'} folder. This folder can be found in the main folder of your Java project by default. For each simulation run, a folder that is named based on time is generated. Four files can be found in this folder: \textit{`agentLog.txt', `returnValues.txt', `agentTrace.txt'} and \textit{`trackedParameters.txt'}. These files contain the logs that you added yourself, the values returned by the EndingConditions, position history of passengers and the data of the analyzers respectively. These text files can be imported with your favorite data analysis tool.
Matlab code to read and save the .txt file is provided in the analytics section on Github. These Matlab scripts and functions can be used to import and analyze data generated by AATOM.
To use this code, run the \textit{`importAndVisualize.m'} script in Matlab and select the \textit{folder} that contains the log files of a simulation run. So for instance, select \textit{`logfiles/1503561764574\_9956'}, and not \textit{`logfiles/1503561764574\_9956/agentTrace.txt'}. This script visualizes the data of the \textit{`agentTrace.txt'} and \textit{`trackedParameters.txt'} files. Further, it saves all data into a data format that can be used for further analysis.
\subsection{Customized Views}
You can add customized views for MapComponents that you created. In this example, we create a custom view for the TutorialPassenger as defined in Section~\ref{sec:creating}. To do this, we extend the MapComponentView class in a new TutorialPassengerView class, as shown below.
\begin{verbatim}
public class TutorialPassengerView extends MapComponentView {
private TutorialPassenger passenger;
public TutorialPassengerView(TutorialPassenger passenger) {
this.passenger = passenger;
}
@Override
public String getAboutString() {
return "<html><i>Hello</i> world, my hashcode is: " + passenger.hashCode() + "</html>";
}
@Override
public void paintComponent() {
ShapeDrawer.drawCircle(Color.GREEN, passenger.getPosition(), passenger.getRadius());
// Set the bounds for the about box.
setBounds(ShapeDrawer.getRectangle(new Position(passenger.getPosition().x
- passenger.getRadius(), passenger.getPosition().y - passenger.getRadius()),
2 * passenger.getRadius(), 2 * passenger.getRadius()));
}
}
\end{verbatim}
To create a custom view for a MapComponent, the class name should exactly match the class name of that MapComponent, with an addition `View' at the end. Furthermore, the constructor takes exactly one argument, which is the MapComponent itself. Then, the $paintComponent()$ method has to be implemented. This method can be implemented using the $ShapeDrawer$ class, which automatically draws shapes at the right place of the GUI.
Finally, mapComponents can be clicked in the GUI. When they are clicked, an about box is shown if the $getAboutString()$ method was implemented. For this method to function correctly, the $setBounds(r)$ method has to be called in the $paintComponent()$ method. This method sets the bounds of the mapComponent, such that it generates an about box when clicked within this box. Finally, the $getAboutString()$ has to be filled in with any String that you would like to show in the about box. You can format the String following HTML standards to format the about box. In our example, we show show a green circle as a representation for the TutorialPassenger, and show the hash code of the agent in the about box.
\subsection{Running Experiments} \label{sec:experiments}
This final tutorial shows how you can perform experiments with the simulator. To be able to do this, you have to create a new main method and class. This main method starts a simulation like we have done in all previous tutorials. The main difference with the tutorials before, is that we use the $args$ field of the main method. This args field can be used to pass arguments to the simulator to specify the experiment conditions. In our case, we extend the tutorials above by adding two input arguments. The first argument specifies the interarrival time, while the second argument specifies the random seed that is being used. For an experiment, we of course do not need visualization, so we turn the GUI off. Finally, we set the name of the simulation to the two input arguments, separated with an underscore. This name will be the last line of the `agentLog.txt' file, and will later be useful to analyze the simulation outcomes. A snippet of the class is shown below.
\begin{verbatim}
public class ExperimenterMain {
public static void main(String[] args) {
int interarrivalTime = Integer.parseInt(args[0]);
long seed = Long.parseLong(args[1]);
Simulator sim = new Simulator.Builder<>().setSimulationName(args[0] + "_" + args[1])
.setAgentGenerator(new TutorialAgentGenerator(interarrivalTime))
.setEndingConditions(new BaseEndingConditions(3700)).setGui(false)
.setLogger(new BaseLogger(true)).setRandomSeed(seed).build();
[...]
}
\end{verbatim}
This class is now used to start the experiment using the Experimenter class. This class takes three input arguments: a list of parameters to experiment with, the main class and the number of parallel tasks to be ran. The third argument defaults to the number of available cores in the machine.
\begin{verbatim}
List<String[]> inputs = new ArrayList<>();
inputs.add(new String[] { "30", "111111" });
inputs.add(new String[] { "30", "222222" });
inputs.add(new String[] { "40", "111111" });
inputs.add(new String[] { "40", "222222" });
Experimenter experimenter = new Experimenter(inputs, ExperimenterMain.class);
new Thread(experimenter).start();
\end{verbatim}
\section{Frequently Asked Questions} \label{sec:faq}
Frequently asked questions are shown here. If you have a question that this document does not answer, please contact the author of this document. Contact details are provided at the top of this document.
\subsection*{How can I zoom in and out in the GUI?}
Zooming in works by dragging your mouse over a specific area. When you do so, you will see a black rectangle appear that shows to area that indicates the area that your will zoom into. By clicking on the right mouse button, you zoom out to the original view.
\subsection*{Why can't I see the logged files of my simulation?}
There are a few common causes and corresponding solutions for this problem.
\begin{enumerate}
\item You forcefully closed the simulation. This means you pressed \textit{ctrl + c} in a command window or closed the program from the task manager of your operating system/IDE. To prevent this from happening, ensure that your ending conditions are defined properly, close the simulation using the GUI, or wait until the simulation run has ended.
\item You explicitly mentioned that the simulator should not log your simulation by using a \textit{null} parameter in the constructor of the simulator. To prevent this, remove the \textit{null} parameter from the constructor.
\item You do not have writing permission in the operating system. To solve this, run the program in administrator mode.
\item The simulation is still writing the last logs. Depending on the computing speed of your computer, it can take a short while after you have closed the simulation before the .txt files are ready to be opened.
\end{enumerate}
\subsection*{Can I edit the Eindhoven Airport example of the first tutorial?}
You cannot edit the example that is present directly, however you can make a copy of the source code for this example and edit this copied code. The code for this example can be found on \href{https://github.com/StefJanssen/AATOM/blob/master/Simulator/src/simulation/simulationBuilder/SimulationBuilder.java}{\underline{Github}}. Find the eindhovenAirport method, and copy it into a class of your choosing. You can now change the example.
\subsection*{How can I generate multiple agent types in my own agent generator?}
You can do so by making sure there are multiple return statements (returning different agent types) in your $generateAgent()$ method. Based on your own defined conditions, one return statement returning the first agent type can be reached, while other agents can be reached based on other conditions.
\end{document}
|
{"hexsha": "0491d8ff766c3ff67c16777228c170a29d2ad77b", "size": 22969, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Tutorials/Tutorial.tex", "max_stars_repo_name": "StefJanssen/SeRiMa-ABM", "max_stars_repo_head_hexsha": "3fdf9f64037b1684b23a4c91323ca38589d86524", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-09-17T00:58:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-08T03:40:22.000Z", "max_issues_repo_path": "Tutorials/Tutorial.tex", "max_issues_repo_name": "StefJanssen/SeRiMa-ABM", "max_issues_repo_head_hexsha": "3fdf9f64037b1684b23a4c91323ca38589d86524", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tutorials/Tutorial.tex", "max_forks_repo_name": "StefJanssen/SeRiMa-ABM", "max_forks_repo_head_hexsha": "3fdf9f64037b1684b23a4c91323ca38589d86524", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-09-17T00:58:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-09T08:02:14.000Z", "avg_line_length": 72.0031347962, "max_line_length": 972, "alphanum_fraction": 0.7696025077, "num_tokens": 5057}
|
import numpy as np
from .base import BaseOptimizer
class GradientDescent(BaseOptimizer):
def __init__(self, trainable_layers):
super(GradientDescent, self).__init__(trainable_layers)
def initialize(self):
pass
def update(self, learning_rate, w_grads, b_grads, step):
for layer in self.trainable_layers:
layer.update_params(
dw=learning_rate * w_grads[layer], db=learning_rate * b_grads[layer]
)
class RMSProp(BaseOptimizer):
def __init__(self, trainable_layers, beta=0.9, epsilon=1e-8):
super(RMSProp, self).__init__(trainable_layers)
self.s = {}
self.beta = beta
self.epsilon = epsilon
def initialize(self):
for layer in self.trainable_layers:
w, b = layer.get_params()
w_shape = w.shape
b_shape = b.shape
self.s[("dw", layer)] = np.zeros(w_shape)
self.s[("db", layer)] = np.zeros(b_shape)
def update(self, learning_rate, w_grads, b_grads, step):
s_corrected = {}
s_correction_term = 1 - np.power(self.beta, step)
for layer in self.trainable_layers:
layer_dw = ("dw", layer)
layer_db = ("db", layer)
self.s[layer_dw] = self.beta * self.s[layer_dw] + (
1 - self.beta
) * np.square(w_grads[layer])
self.s[layer_db] = self.beta * self.s[layer_db] + (
1 - self.beta
) * np.square(b_grads[layer])
s_corrected[layer_dw] = self.s[layer_dw] / s_correction_term
s_corrected[layer_db] = self.s[layer_db] / s_correction_term
dw = learning_rate * (
w_grads[layer] / (np.sqrt(s_corrected[layer_dw]) + self.epsilon)
)
db = learning_rate * (
b_grads[layer] / (np.sqrt(s_corrected[layer_db]) + self.epsilon)
)
layer.update_params(dw, db)
class Adam(BaseOptimizer):
def __init__(self, trainable_layers, beta1=0.9, beta2=0.999, epsilon=1e-8):
super(Adam, self).__init__(trainable_layers)
self.v = {}
self.s = {}
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def initialize(self):
for layer in self.trainable_layers:
w, b = layer.get_params()
w_shape = w.shape
b_shape = b.shape
self.v[("dw", layer)] = np.zeros(w_shape)
self.v[("db", layer)] = np.zeros(b_shape)
self.s[("dw", layer)] = np.zeros(w_shape)
self.s[("db", layer)] = np.zeros(b_shape)
def update(self, learning_rate, w_grads, b_grads, step):
v_correction_term = 1 - np.power(self.beta1, step)
s_correction_term = 1 - np.power(self.beta2, step)
s_corrected = {}
v_corrected = {}
for layer in self.trainable_layers:
layer_dw = ("dw", layer)
layer_db = ("db", layer)
self.v[layer_dw] = (
self.beta1 * self.v[layer_dw] + (1 - self.beta1) * w_grads[layer]
)
self.v[layer_db] = (
self.beta1 * self.v[layer_db] + (1 - self.beta1) * b_grads[layer]
)
v_corrected[layer_dw] = self.v[layer_dw] / v_correction_term
v_corrected[layer_db] = self.v[layer_db] / v_correction_term
self.s[layer_dw] = self.beta2 * self.s[layer_dw] + (
1 - self.beta2
) * np.square(w_grads[layer])
self.s[layer_db] = self.beta2 * self.s[layer_db] + (
1 - self.beta2
) * np.square(b_grads[layer])
s_corrected[layer_dw] = self.s[layer_dw] / s_correction_term
s_corrected[layer_db] = self.s[layer_db] / s_correction_term
dw = (
learning_rate * v_corrected[layer_dw] / (np.sqrt(s_corrected[layer_dw]) + self.epsilon)
)
db = (
learning_rate * v_corrected[layer_db] / (np.sqrt(s_corrected[layer_db]) + self.epsilon)
)
layer.update_params(dw, db)
# -- Assign to the short forms --
adam = Adam
rmsprop = RMSProp
gradient_descent = GradientDescent
|
{"hexsha": "6a4b81d58886a1237147a5f7ebf04637a8c627d3", "size": 4237, "ext": "py", "lang": "Python", "max_stars_repo_path": "mini_keras/optimizer.py", "max_stars_repo_name": "Deep-Alchemy/Mini-Keras", "max_stars_repo_head_hexsha": "07aeb129c2530d39dc4dcea139c6fba72268d535", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-04-20T09:55:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T13:12:54.000Z", "max_issues_repo_path": "mini_keras/optimizer.py", "max_issues_repo_name": "Deep-Alchemy/Mini-Keras", "max_issues_repo_head_hexsha": "07aeb129c2530d39dc4dcea139c6fba72268d535", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mini_keras/optimizer.py", "max_forks_repo_name": "Deep-Alchemy/Mini-Keras", "max_forks_repo_head_hexsha": "07aeb129c2530d39dc4dcea139c6fba72268d535", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-18T14:05:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-06T14:13:44.000Z", "avg_line_length": 33.626984127, "max_line_length": 103, "alphanum_fraction": 0.5612461647, "include": true, "reason": "import numpy", "num_tokens": 1043}
|
"""
A Stage to load data from a CSV datarelease format file into a PISA pi ContainerSet
"""
from __future__ import absolute_import, print_function, division
import numpy as np
import pandas as pd
from pisa import FTYPE
from pisa.core.pi_stage import PiStage
from pisa.utils.profiler import profile
from pisa.core.container import Container
from pisa.core.events_pi import EventsPi
class csv_data_hist(PiStage):
"""
CSV file loader PISA Pi class
Parameters
----------
events_file : csv file path
"""
def __init__(self,
events_file,
data=None,
params=None,
input_names=None,
output_names=None,
debug_mode=None,
input_specs=None,
calc_specs=None,
output_specs=None,
):
# instantiation args that should not change
self.events_file = events_file
expected_params = ()
input_apply_keys = ('weights',
)
# copy of initial weights, to be modified by later stages
output_apply_keys = (
'weights',
)
# init base class
super().__init__(
data=data,
params=params,
expected_params=expected_params,
input_names=input_names,
output_names=output_names,
debug_mode=debug_mode,
input_specs=input_specs,
calc_specs=calc_specs,
output_specs=output_specs,
input_apply_keys=input_apply_keys,
output_apply_keys=output_apply_keys,
)
assert self.output_mode == 'binned'
def setup_function(self):
events = pd.read_csv(self.events_file)
container = Container('data')
container.data_specs = 'events'
container['weights'] = events['count'].values.astype(FTYPE)
container['reco_energy'] = events['reco_energy'].values.astype(FTYPE)
container['reco_coszen'] = events['reco_coszen'].values.astype(FTYPE)
container['pid'] = events['pid'].values.astype(FTYPE)
self.data.add_container(container)
# check created at least one container
if len(self.data.names) == 0:
raise ValueError(
'No containers created during data loading for some reason.'
)
container.array_to_binned('weights', self.output_specs)
|
{"hexsha": "5d3f670055807c9067dd675aed247aa7a90448dd", "size": 2473, "ext": "py", "lang": "Python", "max_stars_repo_path": "pisa/stages/data/csv_data_hist.py", "max_stars_repo_name": "wym109/pisa", "max_stars_repo_head_hexsha": "696803320f577d241651df900726b76a770d072a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pisa/stages/data/csv_data_hist.py", "max_issues_repo_name": "wym109/pisa", "max_issues_repo_head_hexsha": "696803320f577d241651df900726b76a770d072a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-05-03T15:46:07.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-03T17:57:17.000Z", "max_forks_repo_path": "pisa/stages/data/csv_data_hist.py", "max_forks_repo_name": "wym109/pisa", "max_forks_repo_head_hexsha": "696803320f577d241651df900726b76a770d072a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-15T13:48:48.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-15T13:48:48.000Z", "avg_line_length": 28.1022727273, "max_line_length": 83, "alphanum_fraction": 0.5976546704, "include": true, "reason": "import numpy", "num_tokens": 489}
|
module remesh_smoothing_examples
using FinEtools
using FinEtools.MeshExportModule
using FinEtools.TetRemeshingModule
function remesh1()
L= 0.3;
W = 0.3;
a = 0.15;
nL=46; nW=46; na=36;
fens,fes = T4block(a,L,W,nL,nW,na,:a);
t = deepcopy(connasarray(fes));
v = deepcopy(fens.xyz);
tmid = ones(Int, size(t,1));
desired_ts =a;
bfes = meshboundary(fes);
f = connectednodes(bfes);
bv = zeros(Bool, size(v,1));
bv[f] = true;
println("Mesh size: initial = $(size(t,1))")
t0 = time()
t, v, tmid = TetRemeshingModule.coarsen(t, v, tmid; bv = bv, desired_ts = desired_ts);
println("Mesh size: final = $(size(t,1)) [$(time() - t0) sec]")
fens.xyz = deepcopy(v)
fes = fromarray!(fes, t)
setlabel!(fes, tmid)
geom = NodalField(fens.xyz)
femm = FEMMBase(IntegData(fes, SimplexRule(3, 1)))
V = integratefunction(femm, geom, (x) -> 1.0)
println("V = $(V) compared to $(L * W * a)")
# File = "test1.vtk"
# MeshExportModule.vtkexportmesh(File, t, v, MeshExportModule.T4)
# @async run(`"paraview.exe" $File`)
end # remesh1
function mesh_smoothing()
println("""
Meshing, deforming and smoothing
""")
A = 100. # strip width
N = 16
tolerance = A / N / 1.0e5
fens, fes = T3block(A, A, N, N)
bnl = connectednodes(meshboundary(fes))
for ixxxx = 1:length(bnl)
x, y = fens.xyz[bnl[ixxxx], :]
fens.xyz[bnl[ixxxx], 1] += A / N * sin(2 * pi * y / A)
fens.xyz[bnl[ixxxx], 2] += -A / N * sin(2 * pi * x / A)
end
File = "mesh_smoothing_before.vtk"
vtkexportmesh(File, fens, fes);
@async run(`"paraview.exe" $File`)
println("$(fens.xyz[Int(N^2 / 2), :] )")
fixedv = falses(count(fens))
fixedv[bnl] = true
fens = meshsmoothing(fens, fes; fixedv=fixedv, method=:taubin, npass=100)
println("$(fens.xyz[Int(N^2 / 2), :] )")
geom = NodalField(fens.xyz)
File = "mesh_smoothing_after.vtk"
vtkexportmesh(File, fes.conn, geom.values, FinEtools.MeshExportModule.T3);
@async run(`"paraview.exe" $File`)
println("Done")
true
end # mesh_smoothing
function allrun()
println("#####################################################")
println("# remesh1 ")
remesh1()
println("#####################################################")
println("# mesh_smoothing ")
mesh_smoothing()
return true
end # function allrun
end # module remesh_smoothing_examples
|
{"hexsha": "22838fb3207beba5336e7a0d690ccc80c8b7237f", "size": 2554, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/meshing/remesh_smoothing_examples.jl", "max_stars_repo_name": "KristofferC/FinEtools.jl", "max_stars_repo_head_hexsha": "b0ce27e6a8aa63d1057307bcf36919239b1f3279", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/meshing/remesh_smoothing_examples.jl", "max_issues_repo_name": "KristofferC/FinEtools.jl", "max_issues_repo_head_hexsha": "b0ce27e6a8aa63d1057307bcf36919239b1f3279", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/meshing/remesh_smoothing_examples.jl", "max_forks_repo_name": "KristofferC/FinEtools.jl", "max_forks_repo_head_hexsha": "b0ce27e6a8aa63d1057307bcf36919239b1f3279", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.170212766, "max_line_length": 90, "alphanum_fraction": 0.5575567737, "num_tokens": 842}
|
classdef Residuals < Exportable
%--- * --. --- --. .--. ... * ---------------------------------------------
% ___ ___ ___
% __ _ ___ / __| _ | __|
% / _` / _ \ (_ | _|__ \
% \__, \___/\___|_| |___/
% |___/ v 1.0RC1
%
%--------------------------------------------------------------------------
% Copyright (C) 2021 Geomatics Research & Development srl (GReD)
% Written by: Andrea Gatti
% Contributors: Andrea Gatti, ...
% A list of all the historical goGPS contributors is in CREDITS.nfo
%--------------------------------------------------------------------------
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
%
%--------------------------------------------------------------------------
% 01100111 01101111 01000111 01010000 01010011
%--------------------------------------------------------------------------
%% CONSTANTS
properties (Constant)
RES_TYPE = {'0: no residuals', '1: PREPRO', '2: U1 engine', '3: U2 engine'};
end
properties
type % 0,1,2,3 see RES_TYPE
time % time as GPS_Time GPS_Time [1 x 1] stores n_epoch
value % matrix of residuals
prn % prn of the satellite (1 x col of pr/ph)
obs_code % type of tracking of the column (e.g. GL1C, GL1CL2WI, ...)
rec_coo % <optional> Coordinates of the receiver
end
methods
% Creator
function this = Residuals()
% Object creator
this.reset();
end
end
% =========================================================================
%% METHODS OBJ MANAGEMENT
% =========================================================================
methods % Public Access
function is_empty = isEmpty(this)
% Return the empty status
%
% SYNTAX
% is_empty = this.isEmpty()
is_empty = this.type == 0 || this.time.isEmpty;
end
function reset(this)
% Reset the stored residuals
% Empty the object
%
% SYNTAX
% this.reset
this.type = 0;
this.time = GPS_Time();
this.value = [];
this.prn = [];
this.obs_code = '';
this.rec_coo = Coordinates();
end
function import(this, type, time, value, prn, obs_code, rec_coo)
% Import new residuals (and delete the previous content)
%
% INPUT
% type % 0,1,2,3 see RES_TYPE
% time % time as GPS_Time GPS_Time [1 x 1] stores n_epoch
% value % matrix of residuals
%
% prn % prn of the satellite (1 x col of pr/ph)
% obs_code % type of tracking of the column (e.g. GL1C, GL1CL2WI, ...)
%
% rec_coo % <optional> Coordinates of the receiver
%
% SYNTAX
% this.import(type, time, value, prn, obs_code, rec_coo)
this.init(type, time, value, prn, obs_code, rec_coo)
end
function append(this, type, time, value, prn, obs_code, rec_coo)
% Append new residuals to the one already stored
%
% INPUT
% type % 0,1,2,3 see RES_TYPE
% time % time as GPS_Time GPS_Time [1 x 1] stores n_epoch
% value % matrix of residuals
%
% prn % prn of the satellite (1 x col of pr/ph)
% obs_code % type of tracking of the column (e.g. GL1C, GL1CL2WI, ...)
%
% rec_coo % <optional> Coordinates of the receiver
%
% SYNTAX
% this.import(type, time, value, prn, obs_code, rec_coo)
res = Residuals();
res.init(type, time, value, prn, obs_code, rec_coo);
this.injest(res);
end
function injest(this, res)
% import and append from a residual object file
% 1) Remove the old overlapped residuals
% Find the first epoch to remove from the existing res
if ~res.time.isEmpty
if isempty(this.time)
id_start = this.time.length + 1;
id_stop = 0;
else
id_start = find(this.time.getNominalTime.getRefTime(res.time.getNominalTime.first.getMatlabTime) > 0, 1, 'first');
id_stop = find(this.time.getNominalTime.getRefTime(res.time.getNominalTime.last.getMatlabTime) <= 0, 1, 'last');
if isempty(id_start)
id_start = this.time.length + 1;
end
if isempty(id_stop)
id_stop = 0;
end
end
% Find the last epoch to remove from the existing res
id_ko = id_start : id_stop;
if ~isempty(id_ko)
this.remEpoch(id_ko);
end
% 2) Insert data
% Insert time
time = this.time.getEpoch(1 : id_start-1);
time.append(res.time);
time.append(this.time.getEpoch(id_start : this.time.length));
if isempty(this.prn)
code_old = [];
else
code_old = Constellation_Collector.obsCode2num(this.obs_code, this.prn);
this.remEntry(code_old == 0);
end
if isempty(res.prn)
code_new = [];
else
% uniform the size of obs_code
if size(res.obs_code,2) < size(this.obs_code,2)
n_el = size(this.obs_code,2);
res.obs_code = [res.obs_code, ' ' * char(ones(size(res.obs_code,1), n_el - size(res.obs_code,2), 'uint8'))];
end
if size(res.obs_code,2) > size(this.obs_code,2)
n_el = size(res.obs_code,2);
this.obs_code = [this.obs_code, ' ' * char(ones(size(this.obs_code,1), n_el - size(this.obs_code,2), 'uint8'))];
end
code_new = Constellation_Collector.obsCode2num(res.obs_code, res.prn);
res.remEntry(code_new == 0);
end
% new satellites to add
[code_add, id_add] = setdiff(code_new, code_old);
[code_common, id_new, id_old] = intersect( code_new, code_old);
n_obs_new = size(res.value, 1);
% resize data to add new epochs
this.value = [this.value(1 : id_start-1, :); nan(n_obs_new, size(this.value, 2)); this.value(id_start : this.time.length, :)];
% resize data to add columns for the new observables
this.value = [this.value nan(size(this.value, 1), numel(code_add))];
% add new data
this.value(id_start + (0 : n_obs_new - 1) , [id_old; ((end - numel(code_add) + 1) : end)']) = res.value(:, [id_new; id_add]);
nch_old = size(this.obs_code,2);
nch_new = size(res.obs_code,2);
if nch_old == nch_new % length of obs code might not be compatible
this.obs_code = [this.obs_code; res.obs_code(id_add, :)];
elseif nch_old > nch_new
this.obs_code = [this.obs_code ; [res.obs_code(id_add, :) char(32*ones(size(res.obs_code,1),nch_old-nch_new,'uint8'))]];
else
this.obs_code = [[this.obs_code char(32*ones(size(this.obs_code,1),nch_new-nch_old,'uint8'))]; res.obs_code(id_add, :)];
end
this.prn = [this.prn; res.prn(id_add)];
this.time = time;
this.type = res.type; % save the last type
this.rec_coo = res.rec_coo.getCopy; % save the last coo
end
end
function remEpoch(this, lid_ko)
% Remove an epoch from the residuals
%
% INPUT
% lid_ko logical array of ko epochs
%
% SYNTAX
% this.remEpoch(lid_ko);
if any(lid_ko)
if ~islogical(lid_ko)
id_ko = lid_ko;
lid_ko = false(1,this.time.length);
lid_ko(id_ko) = true;
end
id_ok = ~lid_ko;
this.time = this.time.getEpoch(id_ok);
this.value(lid_ko, :) = [];
end
end
function remEntry(this, lid_ko)
% Remove an entry from the residuals
%
% INPUT
% lid_ko logical array of ko entry
%
% SYNTAX
% this.remEntry(lid_ko);
if any(lid_ko)
if ~islogical(lid_ko)
id_ko = lid_ko;
lid_ko = false(1, size(this.value,2));
lid_ko(id_ko) = true;
end
this.value(:, lid_ko) = [];
this.prn(lid_ko) = [];
this.obs_code(lid_ko, :) = [];
end
end
function cutEpochs(this, new_lim, end_lim)
% Get the residual only in the time span given
%
% SYNTAX
% this.cutEpochs(new_limits)
% this.cutEpochs(lim_start, lim_stop)
if nargin == 3
new_lim = new_lim.getCopy;
new_lim.append(end_lim);
end
time_res = this.time.getNominalTime();
sss_start = find(time_res.getMatlabTime >= round(new_lim.first.getMatlabTime * 86400 * time_res.getRate) / (86400 * time_res.getRate), 1, 'first');
sss_stop = find(time_res.getMatlabTime > round(new_lim.last.getMatlabTime * 86400 * time_res.getRate) / (86400 * time_res.getRate), 1, 'first');
lid = false(this.time.length(), 1);
if ~isempty(sss_start)
lid(1 : (sss_start - 1)) = true;
end
if ~isempty(sss_stop)
lid(sss_stop : end) = true;
end
this.remEpoch(lid);
end
end
% =========================================================================
%% GETTERS
% =========================================================================
methods
function [res, obs_code, prn] = getU1(this, sys_c, freq_c)
% Get residual matrix of the combined/single_freq processing
%
% SYNTAX
% [res, obs_code, prn] = this.getU1()
if this.type < 3
id_ok = true(numel(this.prn), 1);
if nargin > 1 && ~isempty(sys_c)
id_ok(this.obs_code(:,1) ~= sys_c) = false;
end
if nargin > 2 && ~isempty(freq_c)
id_ok(this.obs_code(:,3) ~= freq_c) = false;
end
prn = this.prn(id_ok);
obs_code = this.obs_code(id_ok,:);
res = this.value(:, id_ok);
else
prn = [];
obs_code = '';
res = [];
end
end
function [res, obs_code, prn] = getPrU2(this, sys_c, freq_c)
% Get residual matrix of the uncombined processing
% Pseudo-codes residuals
%
% SYNTAX
% [res, obs_code, prn] = = this.getPrU2()
if this.type == 3
id_ok = this.obs_code(:,2) == 'C';
if nargin > 1 && ~isempty(sys_c)
id_ok(this.obs_code(:,1) ~= sys_c) = false;
end
if nargin > 2 && ~isempty(freq_c)
id_ok(this.obs_code(:,3) ~= freq_c) = false;
end
prn = this.prn(id_ok);
obs_code = this.obs_code(id_ok,:);
res = this.value(:, id_ok);
else
prn = [];
obs_code = '';
res = [];
end
end
function [is_ph] = isPhase(this)
% get an index that tell which residual are phase
%
% SYNTAX:
% [is_ph] = this.isPhase()
if isempty(this.obs_code)
is_ph = false(size(this.obs_code,1),1);
else
is_ph = this.obs_code(:,2) == 'L';
end
end
function [is_co] = isCombined(this)
% get an index that tell which residual are combined phases
%
% SYNTAX:
% [is_co] = this.isCombined()
if size(this.obs_code,2) > 4
is_co = this.obs_code(:,5) ~= ' ';
else
is_co = false(size( this.obs_code,1),1);
end
end
function [res, obs_code, prn] = getPhU2(this, sys_c, freq_c)
% Get residual matrix of the combined processing
% Carrier-phase residuals
%
% SYNTAX
% [res, obs_code, prn] = this.getPhU2()
if this.type == 3
id_ok = this.obs_code(:,2) == 'L';
if nargin > 1 && ~isempty(sys_c)
id_ok(this.obs_code(:,1) ~= sys_c) = false;
end
if nargin > 2 && ~isempty(freq_c)
id_ok(this.obs_code(:,3) ~= freq_c) = false;
end
prn = this.prn(id_ok);
obs_code = this.obs_code(id_ok,:);
res = this.value(:, id_ok);
else
prn = [];
obs_code = '';
res = [];
end
end
function [res, obs_code, prn, time] = getRangeResiduals(this, sys_c)
% Get range residuals
%
% SYNTAX
% [res, obs_code, prn] = this.getU1()
if this.type < 3
if nargin == 1
[res, obs_code, prn] = this.getU1();
else
[res, obs_code, prn] = this.getU1(sys_c);
end
time = this.time;
else
% To be done!!! in case of uncombined residuals
prn = [];
obs_code = '';
res = [];
time = GPS_Time;
end
end
function [res, obs_code, prn, type] = get(this, sys_c, freq_c)
% Get residual matrix stored in residuals
%
% INPUT
% sys_c single character describing the constellation e.g. 'G', 'R', 'E', ...
% freq_c single character describing the frequency number e.g. '1', '2', ....
%
% SYNTAX
% [res, obs_code, prn, type] = this.get(sys_c, freq_c)
type = this.type;
switch type
case 0
prn = [];
obs_code = '';
res = [];
case {1, 2} % Prepro or unconbined residuals (both uses U1)
if nargin == 1
[res, obs_code, prn] = this.getU1();
elseif nargin == 2
[res, obs_code, prn] = this.getU1(sys_c);
elseif nargin == 3
[res, obs_code, prn] = this.getU1(sys_c, freq_c);
end
case 3 % if I have uncombined residuals, return just phases
if nargin == 1
[res, obs_code, prn] = this.getPhU2();
if isempty(res)
[res, obs_code, prn] = this.getPrU2();
end
elseif nargin == 2
[res, obs_code, prn] = this.getPhU2(sys_c);
if isempty(res)
[res, obs_code, prn] = this.getPrU2(sys_c);
end
elseif nargin == 3
[res, obs_code, prn] = this.getPhU2(sys_c, freq_c);
if isempty(res)
[res, obs_code, prn] = this.getPrU2(sys_c, freq_c);
end
end
end
end
function res = getCopy(this)
% Get a copy of the object
%
% SYNTAX
% res = this.getCopy();
res = Residuals;
res.importFromStruct(this.toStruct);
res.time = this.time.getCopy;
res.rec_coo = this.rec_coo.getCopy;
end
function sigma = getStd(this)
% Get std of all the stored residuals
% WARNING this is still a very rough estimation,
% different frequencies have different noise
%
% SINTAX
% sigma = this.getStd()
sigma = std(zero2nan(serialize(this.get())), 'omitnan');
end
end
% =========================================================================
%% AUXILLIARY
% =========================================================================
methods
function [az, el, sat_coo, sat_name, go_id] = getAzimuthElevation(this, id_ok)
% Get azimuth and elevation of each satellite stored in residuals
%
%
% [az, el, sat_coo, sat_name, go_id] = this.getAzimuthElevation();
core = Core.getCurrentCore;
sky = core.sky;
if isempty(core.state.eph_name)
fw = File_Wizard();
fw.conjureNavFiles(this.time.first, this.time.last);
end
if nargin == 2
time = this.time.getEpoch(id_ok);
else
time = this.time;
end
lim = time.first.getCopy;
lim.append(this.time.last);
flag_no_clock = true;
core.initSkySession(lim, flag_no_clock);
cc = core.getConstellationCollector;
go_id = unique(cc.getIndex(this.obs_code(:,1), this.prn));
sat_name = cc.getSatName(go_id);
[az, el, sat_coo] = sky.getAzimuthElevation(this.rec_coo, time, go_id);
end
end
% =========================================================================
%% MULTIPATH
% =========================================================================
methods
function ant_mp = computeMultiPath(this, marker_name, sys_grp, l_max, flag_reg, is_ph, mode, time_lim)
% Get multi path maps in different modes
%
% z_map Zernike
% r_map Zernike + (the methods specified on mode)
% g_map Simple Gridding of size [stk_grid_step]
% c_map Congruent cells gridding of size [stk_grid_step]
% g1_map Simple Gridding of size [1x1]
% c1_map Congruent cells gridding of size [1x1]
%
%
% INPUT
% marker_name name of the station
% sys_grp constellation grouping for MP estimation
% l_max maximum degree of the 3 steps of the zernike interpolation [l_max1, l_max2, l_max3]
% to disable Zernike use l_max = 0
% flag_reg add regularization points (pseudo obs at 0) in the empty areas of the sky
% is_ph use phases instead of pseudo-ranged (default = true)
% mode - 0 use Zernike + staking maps using congruent cells (variable azimuthal resolution)
% n_step_az = (360/max_n_step_az * cosd(el))
% - [0, n, m] use Zernike + stacking maps using congruent cells with maximum size of [n x m] note that the output will always be 0.5 x 0.5 degrees
% - 1 use Zernike + stacking map of [5 x 1]
% - [1, n, m] use Zernike + stacking map of [n x m]
% NOTE
% For mode 0 and 1 the output matrix will always be 0.5 x 0.5 degrees
%
% SYNTAX
% this.computeMultiPath(marker_name, sys_grp, <l_max=[43,43,43]>, <flag_reg=true>, <is_ph=true>, <mode=[0 5 1]>)
state = Core.getCurrentSettings;
flag_discard_co = false;
if nargin < 7 || isempty(mode)
mode = 0; % Z + stacking
end
if numel(mode) == 3
r_grid_step = mode(2,3);
mode = mode(1);
else
r_grid_step = state.mp_zcongruent_up_nxm;
end
n_min = state.mp_n_min; % minimum number of points per cell
n_min = 15; % <== DEBUG
% z_map Zernike
% r_map Zernike + (the methods specified on mode)
% g_map Simple Gridding of size [stk_grid_step]
% c_map Congruent cells gridding of size [stk_grid_step]
% g1_map Simple Gridding of size [1x1]
% c1_map Congruent cells gridding of size [1x1]
ltype_of_grids = [...
sum(state.mp_l_max) > 0 ...
state.mp_zcongruent_up_nxm(1) > 0 ...
state.mp_regular_up_nxm(1) > 0 ...
state.mp_congruent_up_nxm(1) > 0 ...
state.mp_regular_nxm(1) > 0 ...
state.mp_congruent_nxm(1) > 0];
log = Core.getLogger();
ant_mp = struct();
if this.isEmpty
log.addWarning('Residuals have not been computed');
else
flag_debug = false;
if flag_debug
% Enable all the grid types
ltype_of_grids = logical([1 1 1 1 1 1]); % All enabled
end
if nargin < 4 || isempty(l_max)
l_max = state.mp_l_max;
end
% Legacy support
if numel(l_max) == 3
l_max = [l_max(1) l_max];
end
if numel(l_max) == 1
l_max = [l_max 0 0 0];
end
% Depending on the maximum zernike degree change the map resolution
if max(l_max) > 180
grid_step = 0.1;
elseif max(l_max) > 90
grid_step = 0.25;
else
grid_step = 0.5;
end
if nargin < 2
marker_name = 'UNKN';
end
if nargin < 5 || isempty(flag_reg)
flag_reg = true;
end
if nargin < 6 || isempty(is_ph)
% If there are phases use phases
is_ph = any((serialize(this.obs_code(:,2:3:end-1))) == 'L');
end
if is_ph
name = 'Carrier-phase residuals';
search_obs = 'L';
else
name = 'Pseudo-ranges residuals';
search_obs = 'C';
end
deg2rad = pi/180;
cc = Core.getConstellationCollector();
log.addMarkedMessage(sprintf('Computing azimuth and elevation for "%s"', marker_name));
if nargin == 8 && ~isempty(time_lim)
id_span = this.time.getNominalTime(1) >= time_lim.first & this.time.getNominalTime(1) <= time_lim.last;
[az, el, ~, ~, go_id] = this.getAzimuthElevation(id_span);
else
id_span = true(this.time.length, 1);
[az, el, ~, ~, go_id] = this.getAzimuthElevation();
end
sys_c_list = cc.getAvailableSys;
log = Core.getLogger;
log.addMarkedMessage(sprintf('Computing multipath mitigation coefficients for "%s"', marker_name));
obs_code = this.obs_code;
if Core.getCurrentSettings.FLAG_MP_IGNORE_TRK
for i = 1 : size(obs_code, 1)
obs_code(i, 4:3:end) = '_';
end
end
for sys_c = sys_c_list(:)'
ids = find(ismember(obs_code(:,1), sys_c) & any((obs_code(:,2:3:end-1)) == search_obs, 2));
if ~any(ids)
log.addWarning(sprintf('No %s found in %s for constellation %s', name, marker_name, cc.getSysName(sys_c)));
else
obs_id_num = cc.obsCode2num([repmat('G', numel(ids),1) obs_code(ids, 2:end)], zeros(size(ids, 1), 1)); % get all the data of the same frequency - all the satellites
uobs_id = unique(obs_id_num);
for t = 1 : numel(uobs_id)
id = ids(obs_id_num == uobs_id(t)); % tracking for the specific obs_code
trk_code = obs_code(id(1),2:end);
if (numel(trk_code) > 5) && (all((trk_code(5:end)) == '_'))
trk_code = trk_code(1:4);
end
% recompute id if grouping is present
if numel(sys_grp.(sys_c)) > 1
ext_ids = find(ismember(obs_code(:,1), sys_grp.(sys_c)) & any((obs_code(:,2:3:end-1)) == search_obs, 2));
obs_id_num = cc.obsCode2num([repmat('G', numel(ext_ids),1) obs_code(ext_ids, 2:end)], zeros(size(ext_ids, 1), 1)); % get all the data of the same frequency - all the satellites
id = ext_ids(obs_id_num == uobs_id(t)); % tracking for the specific obs_code
end
data_found = false;
res = zero2nan(this.value(id_span, id));
% time filtering, search for badly estimated residuals
tmp = zero2nan(movmedian(std(res, 0, 2, 'omitnan'), 21, 'omitnan'));
thr = 5 * median(tmp, 'omitnan');
id_ko = flagMerge(tmp > thr, 11);
res(id_ko, :) = nan;
res_go_id = cc.getIndex(obs_code(id, 1), this.prn(id));
% Get all the data to interpolate
[~, id_sat] = ismember(res_go_id,go_id);
az_all = [];
el_all = [];
% Propagate orbit nans
for s = 1 : numel(res_go_id)
id_ko = isnan(el(:,id_sat(s))) | isnan(az(:,id_sat(s)));
res(id_ko, s) = nan;
end
res_all = res(~isnan(res(:)));
res_smt = Receiver_Commons.smoothMat(res, 'spline', 120/this.time.getRate);
res_smt = res_smt(~isnan(res(:)));
go_id_list = [];
for s = 1 : numel(res_go_id)
id_ok = ~isnan(res(:,s));
if any(id_ok)
data_found = true;
% res_all = [res_all; serialize(res(id_ok, s))]; %#ok<AGROW>
az_all = [az_all; az(id_ok, id_sat(s)) .* deg2rad]; %#ok<AGROW>
el_all = [el_all; el(id_ok, id_sat(s)) .* deg2rad]; %#ok<AGROW>
go_id_list = [go_id_list; res_go_id(s)]; %#ok<AGROW>
end
end
if data_found
m_max = l_max;
% Remove outliers
id_ok = Core_Utils.polarCleaner(az_all, el_all, res_all, [360, 1; 3 3]) & Core_Utils.polarCleaner(az_all, el_all, res_smt, [360, 1; 3 3]);
log.addMessage(log.indent(sprintf('1. Outlier rejection (%.3f%%)', (sum(~id_ok) / numel(id_ok)) * 100), 9));
if flag_debug
figure; plot(el_all/pi*180, res_all*1e3, '.'); hold on; plot(el_all(~id_ok)/pi*180, res_all(~id_ok)*1e3, 'o');
legend('residuals', 'outliers');
title((sprintf('Residuals of %s %s%s [mm]', marker_name, sys_c, trk_code))); drawnow
grid on;
end
clear res_smt;
az_all = az_all(id_ok);
el_all = el_all(id_ok);
res_all = res_all(id_ok);
n_obs = numel(res_all);
if flag_reg
log.addMessage(log.indent('2. Preparing regularization', 9));
% Get regularization points based on empty sky areas
[data_map, n_data_map, az_grid, el_grid] = Core_Utils.hemiGridder(az_all, el_all, res_all, [1 1]);
[az_grid, el_grid] = meshgrid(az_grid, el_grid);
az_reg = az_grid(n_data_map <= n_min);
el_reg = el_grid(n_data_map <= n_min);
% In the knots with few data add zero
az_all = [az_all; az_reg];
el_all = [el_all; el_reg];
res_all = [res_all; zeros(size(el_reg))];
if flag_discard_co
% First approach
% Do not consider observations under
% cut-off => map the radius starting from cut-off
id_ko = (el_all * 180/pi) < Core.getState.getCutOff;
az_all(id_ko) = [];
el_all(id_ko) = [];
res_all(id_ko) = [];
else
% Second approach
% Add additional points at the border close to radius 1
% This regularization is needed if the mapping of the radius
% have a cut-off
for i = 0 : 0.5 : (Core.getState.getCutOff - 2.5)
az_all = [az_all; (-pi : 0.05 : pi)'];
el_all = [el_all; i/180*pi + (-pi : 0.05 : pi)'*0];
res_all = [res_all; (-pi : 0.05 : pi)'*0];
end
end
end
% Ignore data under cut-off
if flag_discard_co
Zernike.setCutOff(max(0, (Core.getState.getCutOff - 0.5)));
end
res_work = res_all;
if ~any(ltype_of_grids(1:2))
% Zernike maps are not requested
z_map = 0;
r_map = 0;
else
step = 2 + flag_reg*1;
% Perform the first of 3 Zernike steps
Zernike.setMode(0); % Set Zernike engine to recursive
if l_max(1) > 0
log.addMessage(log.indent(sprintf('%d. Zernike coef. estimation (l_max = %d) (1/4)', step, l_max(1)), 9));
Zernike.setModeMF(0);
el2radius = Zernike.getElFun;
[z_par, l, m] = Zernike.analysisAllBlock(l_max(1), m_max(1), az_all, el2radius(el_all), res_work, 1e-5);
[z_map1, az_grid, el_grid] = Zernike.synthesisGrid(l, m, z_par, grid_step);
z_map1((el_grid * 180/pi) < Core.getState.getCutOff, :) = 0; % remove cutoff;
res_work = res_work - Core_Utils.hgrid2scatter(az_all, el_all, z_map1, false, 'spline');
step = step + 1;
else
z_map1 = 0;
end
Zernike.setMode(0); % Set Zernike engine to recursive
if l_max(2) > 0
log.addMessage(log.indent(sprintf('%d. Zernike coef. estimation (l_max = %d) (2/4)', step, l_max(1)), 9));
Zernike.setModeMF(1);
el2radius = Zernike.getElFun;
[z_par, l, m] = Zernike.analysisAllBlock(l_max(2), m_max(2), az_all, el2radius(el_all), res_work, 1e-5);
[z_map2, az_grid, el_grid] = Zernike.synthesisGrid(l, m, z_par, grid_step);
z_map2((el_grid * 180/pi) < Core.getState.getCutOff, :) = 0; % remove cutoff;
res_work = res_work - Core_Utils.hgrid2scatter(az_all, el_all, z_map2, false, 'spline');
step = step + 1;
else
z_map2 = 0;
end
% Perform the second of 3 Zernike steps
if l_max(3) > 0
log.addMessage(log.indent(sprintf('%d. Zernike coef. estimation (l_max = %d) (3/4)', step, l_max(2)), 9));
Zernike.setModeMF(2);
Zernike.setCutOff(0); % This mapping function is more unstable at low elevations => use polar regularization
el2radius = Zernike.getElFun;
[z_par, l, m] = Zernike.analysisAllBlock(l_max(3), m_max(3), az_all, el2radius(el_all), res_work, 1e-5);
[z_map3, az_grid, el_grid] = Zernike.synthesisGrid(l, m, z_par, grid_step);
z_map3((el_grid * 180/pi) < Core.getState.getCutOff, :) = 0; % remove cutoff;
res_work = res_work - Core_Utils.hgrid2scatter(az_all, el_all, z_map3, false, 'spline');
step = step + 1;
else
z_map3 = 0;
end
% Perform the third of 3 Zernike steps
if l_max(4) > 0
log.addMessage(log.indent(sprintf('%d. Zernike coef. estimation (l_max = %d) (4/4)', step, l_max(3)), 9));
Zernike.setModeMF(3);
Zernike.setCutOff(0); % This mapping function is more unstable at low elevations => use polar regularization
[z_par, l, m] = Zernike.analysisAllBlock(l_max(4), m_max(4), az_all, el2radius(el_all), res_work, 1e-5);
[z_map4, az_grid, el_grid] = Zernike.synthesisGrid(l, m, z_par, grid_step);
z_map4((el_grid * 180/pi) < Core.getState.getCutOff, :) = 0; % remove cutoff;
res_work = res_work - Core_Utils.hgrid2scatter(az_all, el_all, z_map4, false, 'spline');
step = step + 1;
else
z_map4 = 0;
end
% Generate maps
log.addMessage(log.indent(sprintf('%d. Compute mitigation grids', step), 9));
step = step + 1;
z_map = z_map1 + z_map2 + z_map3 + z_map4; % z_map Zernike only
if ~ltype_of_grids(2) % r_map Zernike + (the methods specified on mode)
r_map = 0;
else
% In this mode a gridding on the residuals is performed
% to retrieve the high frequency multipath a double step procedure is performed,
% first low-res than high-res
if mode == 1
flag_congruent = false;
else % if mode == 0
flag_congruent = true;
end
% low-res step
grid_step1 = [max(r_grid_step(1), 1.5) max(r_grid_step(end), 0.5)];
out_step1 = [min([grid_step1(1), grid_step(1), 0.5]) min([grid_step1(end), grid_step(end) 0.5])];
% high-res step
grid_step2 = r_grid_step;
out_step2 = [min([r_grid_step(1), grid_step(1)]) min([r_grid_step(end), grid_step(end)])];
% Set the final size of the r_grid (it is generally [0.25 x 0.1] deg
out_size = [90 360] ./ fliplr(out_step2);
% Restore data with no regularization
res_work = res_all(1 : n_obs);
res_work = res_work - Core_Utils.hgrid2scatter(az_all(1 : n_obs), el_all(1 : n_obs), Core_Utils.resize2(z_map, out_size));
% Compute low-res grid - STEP 1
log.addMessage(log.indent(sprintf(' - Zernike + LoRes Congruent %g x %g', grid_step1(1), grid_step1(end)), 9));
[r_map1, n_data_map, az_grid, el_grid] = Core_Utils.hemiGridder(az_all(1 : n_obs), el_all(1 : n_obs), res_work, grid_step1, out_step1, flag_congruent, n_min);
% Resize it to the final grid size
r_map = Core_Utils.resize2(r_map1, out_size);
res_work = res_work - Core_Utils.hgrid2scatter(az_all(1 : n_obs), el_all(1 : n_obs), r_map);
% Compute high-res grid - STEP 2 -- this is not CONGRUENT!
% note that high latitude cells are usually under n_min thr
log.addMessage(log.indent(sprintf(' - Zernike + HiRes %g x %g', grid_step2(1), grid_step2(end)), 9));
[r_map2, n_data_map, az_grid, el_grid] = Core_Utils.hemiGridder(az_all(1 : n_obs), el_all(1 : n_obs), res_work, grid_step2, grid_step2, false, 7);
r_map = Core_Utils.resize2(z_map, out_size) + r_map + Core_Utils.resize2(r_map2, out_size);
end
end
% Compute normal and congruent maps as comparison (no regularization)
if ltype_of_grids(3) % g_map Simple Gridding of size
log.addMessage(log.indent(sprintf(' - Regular grid %g x %g', state.mp_regular_up_nxm(1), state.mp_regular_up_nxm(end)), 9));
g_map = Core_Utils.hemiGridder(az_all, el_all, res_all, state.mp_regular_up_nxm , grid_step, false, n_min);
else
g_map = 0;
end
if ltype_of_grids(4) % c_map Congruent cells gridding of size [stk_grid_step]
log.addMessage(log.indent(sprintf(' - Congruent grid %g x %g', state.mp_congruent_up_nxm(1), state.mp_congruent_up_nxm(end)), 9));
c_map = Core_Utils.hemiGridder(az_all, el_all, res_all, state.mp_congruent_up_nxm, grid_step, true, n_min);
else
c_map = 0;
end
if ltype_of_grids(5) % g1_map Simple Gridding of size [1x1]
log.addMessage(log.indent(sprintf(' - Regular grid %g x %g', state.mp_regular_nxm(1), state.mp_regular_nxm(end)), 9));
g1_map = Core_Utils.hemiGridder(az_all(res_all ~= 0), el_all(res_all ~= 0), res_all(res_all ~= 0), state.mp_regular_nxm, [], false, n_min);
else
g1_map = 0;
end
if ltype_of_grids(6) % c1_map Congruent cells gridding of size [1x1]
log.addMessage(log.indent(sprintf(' - Congruent grid %g x %g', state.mp_congruent_nxm(1), state.mp_congruent_nxm(end)), 9));
c1_map = Core_Utils.hemiGridder(az_all(res_all ~= 0), el_all(res_all ~= 0), res_all(res_all ~= 0), state.mp_congruent_nxm, state.mp_congruent_nxm, true, n_min);
else
c1_map = 0;
end
if flag_debug
clim = [-1 1] * max(-perc(1e3*(r_map(:)), 0.003),perc(1e3*(r_map(:)), 0.997));
mp_map = z_map1;
[az_grid, el_grid] = Core_Utils.getPolarGrid(360 / size(mp_map, 2), 90 / size(mp_map, 1));
az_grid = Core_Utils.deg2rad(az_grid)';
el_grid = Core_Utils.deg2rad(el_grid);
%figure; imagesc(1e3*(z_map)); colormap((Cmap.get('PuOr', 2^11))); caxis([-5 5]); colorbar;
if numel(z_map1) > 1
figure; polarImagesc(az_grid, (pi/2 - el_grid), 1e3*(z_map1)); colormap((Cmap.get('PuOr', 2^11))); caxis(clim); colorbar;
end
title((sprintf('Zernike expansion (1) of %s %s%s [mm]', marker_name, sys_c, trk_code)), 'interpreter', 'none'); drawnow
if numel(z_map2) > 1
figure; polarImagesc(az_grid, (pi/2 - el_grid), 1e3*(z_map2)); colormap((Cmap.get('PuOr', 2^11))); caxis(clim); colorbar;
end
title((sprintf('Zernike expansion (2) of %s %s%s [mm]', marker_name, sys_c, trk_code)), 'interpreter', 'none'); drawnow
if numel(z_map3) > 1
figure; polarImagesc(az_grid, (pi/2 - el_grid), 1e3*(z_map3)); colormap((Cmap.get('PuOr', 2^11))); caxis(clim); colorbar;
end
title((sprintf('Zernike expansion (3) of %s %s%s [mm]', marker_name, sys_c, trk_code)), 'interpreter', 'none'); drawnow
if numel(z_map4) > 1
figure; polarImagesc(az_grid, (pi/2 - el_grid), 1e3*(z_map4)); colormap((Cmap.get('PuOr', 2^11))); caxis(clim); colorbar;
end
title((sprintf('Zernike expansion (4) of %s %s%s [mm]', marker_name, sys_c, trk_code)), 'interpreter', 'none'); drawnow
figure; polarImagesc(az_grid, (pi/2 - el_grid), 1e3*(z_map)); colormap((Cmap.get('PuOr', 2^11))); caxis(clim); colorbar;
title((sprintf('Zernike expansion of %s %s%s [mm]', marker_name, sys_c, trk_code)), 'interpreter', 'none'); drawnow
mp_map = r_map;
[az_grid, el_grid] = Core_Utils.getPolarGrid(360 / size(mp_map, 2), 90 / size(mp_map, 1));
az_grid = Core_Utils.deg2rad(az_grid)';
el_grid = Core_Utils.deg2rad(el_grid);
figure; polarImagesc(az_grid, (pi/2 - el_grid), 1e3*r_map); colormap((Cmap.get('PuOr', 2^11))); caxis(clim); colorbar;
title((sprintf('Final map of %s %s%s [mm]', marker_name, sys_c, trk_code)), 'interpreter', 'none'); drawnow
mp_map = c_map;
[az_grid, el_grid] = Core_Utils.getPolarGrid(360 / size(mp_map, 2), 90 / size(mp_map, 1));
az_grid = Core_Utils.deg2rad(az_grid)';
el_grid = Core_Utils.deg2rad(el_grid);
figure; polarImagesc(az_grid, (pi/2 - el_grid), 1e3*(mp_map)); colormap((Cmap.get('PuOr', 2^11))); caxis(clim); colorbar;
title((sprintf('Gridded map with congruent cells of %s %s%s [mm]', marker_name, sys_c, trk_code)), 'interpreter', 'none'); drawnow
mp_map = g_map;
[az_grid, el_grid] = Core_Utils.getPolarGrid(360 / size(mp_map, 2), 90 / size(mp_map, 1));
az_grid = Core_Utils.deg2rad(az_grid)';
el_grid = Core_Utils.deg2rad(el_grid);
figure; polarImagesc(az_grid, (pi/2 - el_grid), 1e3*(mp_map)); colormap((Cmap.get('PuOr', 2^11))); caxis(clim); colorbar;
title((sprintf('Gridded map of %s %s%s [mm]', marker_name, sys_c, trk_code)), 'interpreter', 'none'); drawnow
mp_map = c1_map;
[az_grid, el_grid] = Core_Utils.getPolarGrid(360 / size(mp_map, 2), 90 / size(mp_map, 1));
az_grid = Core_Utils.deg2rad(az_grid);
el_grid = Core_Utils.deg2rad(el_grid);
figure; polarImagesc(az_grid, (pi/2 - el_grid), 1e3*(mp_map)); colormap((Cmap.get('PuOr', 2^11))); caxis(clim); colorbar;
title((sprintf('Gridded map with congruent cells [1 x 1] of %s %s%s [mm]', marker_name, sys_c, trk_code)), 'interpreter', 'none'); drawnow
mp_map = g1_map;
[az_grid, el_grid] = Core_Utils.getPolarGrid(360 / size(mp_map, 2), 90 / size(mp_map, 1));
az_grid = Core_Utils.deg2rad(az_grid)';
el_grid = Core_Utils.deg2rad(el_grid);
figure; polarImagesc(az_grid, (pi/2 - el_grid), 1e3*(mp_map)); colormap((Cmap.get('PuOr', 2^11))); caxis(clim); colorbar;
title((sprintf('Gridded map [1 x 1] of %s %s%s [mm]', marker_name, sys_c, trk_code)), 'interpreter', 'none'); drawnow
end
if ~isfield(ant_mp, sys_c)
ant_mp.(sys_c) = struct;
end
if ~isfield(ant_mp.(sys_c), trk_code)
if (numel(trk_code) == 8) && sum(trk_code(5:end) == '_') == 4
trk_code = trk_code(1:4);
end
trk_code = strrep(trk_code, ' ', '_'); % structures do not support spaces
ant_mp.(sys_c).(trk_code) = struct;
end
% Keep multiple solutions in the struct
% decide a-posteriori what it's better
% Save grids of multi-path
ant_mp.(sys_c).(trk_code).z_map = single(z_map); % Zernike map
ant_mp.(sys_c).(trk_code).r_map = single(r_map); % Zernike map + gridded residuals
ant_mp.(sys_c).(trk_code).g_map = single(g_map); % Simple Gridding of size [stk_grid_step]
ant_mp.(sys_c).(trk_code).c_map = single(c_map); % Congruent cells gridding of size [stk_grid_step]
ant_mp.(sys_c).(trk_code).g1_map = single(g1_map); % Simple Gridding of size [1x1]
ant_mp.(sys_c).(trk_code).c1_map = single(c1_map); % c1_map Congruent cells gridding of size [1x1]
% Save Zernike coefficients
%ant_mp.(sys_c).(trk_code).z_par = [z_par1 z_par2];
%ant_mp.(sys_c).(trk_code).l = l;
%ant_mp.(sys_c).(trk_code).m = m;
else
if ~data_found
log = Core.getLogger;
log.addWarning(sprintf('No %s %s found in %s for constellation %s', name, trk_code, marker_name, cc.getSysName(sys_c)));
end
end
end
end
end
% Get the time limit of the map solution
ant_mp.time_lim = this.time.getEpoch(minMax(find(id_span)));
end
end
end
% =========================================================================
%% SHOW
% =========================================================================
methods
function fh_list = showResSkyCartScatter(this, marker_name, sys_c_list, is_ph)
% Plot residuals of the solution on cartesian axes
%
% SYNTAX
% this.showResSkyCartScatter(marker_name, sys_c_list, is_ph)
log = Core.getLogger();
fh_list = [];
if this.isEmpty
log.addWarning('Residuals have not been computed');
else
if nargin < 3 || isempty(sys_c_list)
sys_c_list = unique(this.obs_code(:,1));
end
if nargin < 4 || isempty(is_ph)
% If there are phases use phases
is_ph = any((serialize(this.obs_code(:,2:3:end-1))) == 'L');
end
if is_ph
name = 'Carrier-phase residuals';
search_obs = 'L';
scale = 1e3;
else
name = 'Pseudo-ranges residuals';
search_obs = 'C';
scale = 1e2;
end
if nargin < 2
marker_name = '';
end
cc = Core.getConstellationCollector();
[az, el, sat_coo, sat_name, go_id] = this.getAzimuthElevation();
for sys_c = sys_c_list(:)'
ids = find(this.obs_code(:,1) == sys_c & any((this.obs_code(:,2:3:end-1)) == search_obs, 2));
if ~any(ids)
log.addWarning(sprintf('No %s found in %s for constellation %s', name, marker_name, cc.getSysName(sys_c)));
else
obs_id_num = cc.obsCode2num(this.obs_code(ids,:), zeros(size(ids, 1), 1));
uobs_id = unique(obs_id_num);
for t = 1 : numel(uobs_id)
id = ids(obs_id_num == uobs_id(t)); % tracking for the specific obs_code
trk_code = this.obs_code(id(1),:);
res = this.value(:, id) * scale;
res_go_id = cc.getIndex(this.obs_code(id, 1), this.prn(id));
fh = figure('Visible', 'off'); fh.Name = sprintf('%03d: %s Res Cart %s', fh.Number, marker_name, strtrim(trk_code)); fh.NumberTitle = 'off';
Core_UI.beautifyFig(fh);
fh_list = [fh_list; fh]; %#ok<AGROW>
fig_name = sprintf('Res_cart_%s_%s_%s_%s', marker_name, cc.getSysName(sys_c), this.time.first.toString('yyyymmdd_HHMM'));
fh.UserData = struct('fig_name', fig_name);
[~, id_sat] = intersect(go_id, res_go_id);
figure(fh); % get focus;
hold on;
go_id_list = [];
for s = 1 : numel(res_go_id)
id_ok = find(res(:,s) ~= 0);
if any(id_ok)
[~, id_sort] = sort(abs(res(id_ok,s)));
id_ok = id_ok(id_sort);
line = scatter(az(id_ok, id_sat(s)), el(id_ok, id_sat(s)), 45, serialize(res(id_ok, s)), 'filled');
line.UserData = res_go_id(s);
go_id_list = [go_id_list; res_go_id(s)];
end
end
if isempty(go_id_list)
delete(fh);
else
ylim([0 90]); xlim([-180 180]);
caxis([-1 1] * max(2, min(6*std(noZero(res),'omitnan'), max(abs(noZero(res(:)))))));
colormap((Cmap.get('PuOr', 2^11)));
fh.Color = [.95 .95 .95]; cb = colorbar(); cbt = title(cb, iif(scale == 1e2, '[cm]', '[mm]')); cbt.Parent.UserData = cbt; ax = gca; ax.Color = 'none';
h = title(sprintf('Satellites residuals - receiver %s - %s\\fontsize{5} \n', strrep(marker_name,'_','\_'), cc.getSysExtName(sys_c))); h.FontWeight = 'bold';
hl = xlabel('Azimuth [deg]'); hl.FontWeight = 'bold';
hl = ylabel('Elevation [deg]'); hl.FontWeight = 'bold';
Core_UI.addSatMenu(fh, go_id_list);
Core_UI.beautifyFig(fh);
Core_UI.addExportMenu(fh);
Core_UI.addBeautifyMenu(fh);
fh.Visible = 'on'; drawnow;
end
end
end
end
if isempty(fh_list)
log.addWarning('Residuals have not been computed');
end
end
end
function fh_list = showResSkyPolarScatter(this, marker_name, sys_c_list, is_ph)
% Plot residuals of the solution on polar axes
%
% SYNTAX
% this.showResSkyPolarScatter(marker_name, sys_c_list, is_ph)
log = Core.getLogger();
fh_list = [];
if this.isEmpty
log.addWarning('Residuals have not been computed');
else
if nargin < 3 || isempty(sys_c_list)
sys_c_list = unique(this.obs_code(:,1));
end
if nargin < 4 || isempty(is_ph)
% If there are phases use phases
is_ph = any((serialize(this.obs_code(:,2:3:end-1))) == 'L');
end
if is_ph
name = 'Carrier-phase residuals';
search_obs = 'L';
scale = 1e3;
else
name = 'Pseudo-ranges residuals';
search_obs = 'C';
scale = 1e2;
end
if nargin < 2
marker_name = '';
end
cc = Core.getConstellationCollector();
[az, el, ~, ~, go_id] = this.getAzimuthElevation();
for sys_c = sys_c_list(:)'
ids = find(this.obs_code(:,1) == sys_c & any((this.obs_code(:,2:3:end-1)) == search_obs, 2));
if ~any(ids)
log.addWarning(sprintf('No %s found in %s for constellation %s', name, marker_name, cc.getSysName(sys_c)));
else
obs_id_num = cc.obsCode2num(this.obs_code(ids,:), zeros(size(ids, 1), 1));
uobs_id = unique(obs_id_num);
for t = 1 : numel(uobs_id)
id = ids(obs_id_num == uobs_id(t)); % tracking for the specific obs_code
trk_code = this.obs_code(id(1),:);
res = this.value(:, id) * scale;
res_go_id = cc.getIndex(this.obs_code(id, 1), this.prn(id));
fh = figure('Visible', 'off'); fh.Name = sprintf('%03d: %s Res Polar %s', fh.Number, marker_name, strtrim(trk_code)); fh.NumberTitle = 'off';
Core_UI.beautifyFig(fh);
fh_list = [fh_list; fh]; %#ok<AGROW>
fig_name = sprintf('Res_polar_%s_%s_%s_%s', marker_name, cc.getSysName(sys_c), this.time.first.toString('yyyymmdd_HHMM'));
fh.UserData = struct('fig_name', fig_name);
[~, id_sat] = ismember(res_go_id,go_id);
figure(fh); % get focus;
go_id_list = [];
for s = 1 : numel(res_go_id)
id_ok = find(res(:,s) ~= 0);
if any(id_ok)
[~, id_sort] = sort(abs(res(id_ok,s)));
id_ok = id_ok(id_sort);
line = polarScatter(az(id_ok, id_sat(s))/180*pi, (90 -el(id_ok, id_sat(s)))/180*pi, 45, serialize(res(id_ok, s)), 'filled');
line.UserData = res_go_id(s);
hold on;
go_id_list = [go_id_list; res_go_id(s)]; %#ok<AGROW>
end
end
if isempty(go_id_list)
delete(fh);
else
caxis([-1 1] * max(2, min(6*std(noZero(res),'omitnan'), max(abs(noZero(res(:)))))));
colormap((Cmap.get('PuOr', 2^11)));
fh.Color = [.95 .95 .95]; cb = colorbar(); cbt = title(cb, iif(scale == 1e2, '[cm]', '[mm]')); cbt.Parent.UserData = cbt; ax = gca; ax.Color = 'none';
h = title(sprintf('Satellites residuals\nreceiver %s - %s %s\\fontsize{5} \n', strrep(marker_name,'_','\_'), cc.getSysExtName(sys_c), strtrim(trk_code(2:end)))); h.FontWeight = 'bold';
Core_UI.addSatMenu(fh, go_id_list);
Core_UI.beautifyFig(fh);
Core_UI.addExportMenu(fh);
Core_UI.addBeautifyMenu(fh);
fh.Visible = 'on'; drawnow;
end
end
end
end
if isempty(fh_list)
log.addWarning('Residuals have not been computed');
end
end
end
function fh_list = showRes(this, marker_name, sys_c_list, is_ph)
% Plot residuals of the solution
%
% SYNTAX
% fh_list = this.showRes(marker_name, sys_c_list, is_ph)
log = Core.getLogger();
fh_list = [];
if this.isEmpty
log.addWarning('Residuals have not been computed');
else
if nargin < 3 || isempty(sys_c_list)
sys_c_list = unique(this.obs_code(:,1));
end
if nargin < 4 || isempty(is_ph)
% If there are phases use phases
is_ph = any((serialize(this.obs_code(:,2:3:end-1))) == 'L');
end
if is_ph
name = 'Carrier-phase residuals';
search_obs = 'L';
scale = 1e3;
else
name = 'Pseudo-ranges residuals';
search_obs = 'C';
scale = 1e2;
end
if nargin < 2 || isempty(marker_name)
marker_name = '';
end
cc = Core.getConstellationCollector();
for sys_c = sys_c_list(:)'
ids = find(this.obs_code(:,1) == sys_c & any((this.obs_code(:,2:3:end-1)) == search_obs, 2));
if ~any(ids)
log.addWarning(sprintf('No %s found in %s for constellation %s', name, marker_name, cc.getSysName(sys_c)));
else
obs_id_num = cc.obsCode2num(this.obs_code(ids,:), zeros(size(ids, 1), 1));
uobs_id = unique(obs_id_num);
for t = 1 : numel(uobs_id)
id = ids(obs_id_num == uobs_id(t)); % tracking for the specific obs_code
trk_code = this.obs_code(id(1),:);
res = zero2nan(this.value(:, id) * scale);
%res = Receiver_Commons.smoothMat(res, 'spline', 10);
res_go_id = cc.getIndex(this.obs_code(id, 1), this.prn(id));
fh = figure('Visible', 'off'); fh.Name = sprintf('%03d: %s Res %s %s', fh.Number, marker_name, cc.getSysName(sys_c), strtrim(trk_code(2:end))); fh.NumberTitle = 'off';
Core_UI.beautifyFig(fh); Core_UI.beautifyFig(fh); drawnow;
fig_name = sprintf('Res_polar_%s_%s_%s_%s', marker_name, cc.getSysName(sys_c), strtrim(trk_code(2:end)), this.time.first.toString('yyyymmdd_HHMM'));
fh.UserData = struct('fig_name', fig_name);
time = this.time.getMatlabTime;
go_id_list = [];
sat_name_list = {};
figure(fh); % get focus;
for s = 1 : numel(res_go_id)
id_ok = ~isnan(res(:,s));
if any(id_ok)
line = Core_Utils.plotSep(time(id_ok), serialize(res(id_ok, s)), '.-', 'Color', Core_UI.getColor(s, numel(res_go_id)));
line.UserData = res_go_id(s);
hold on;
go_id_list = [go_id_list; res_go_id(s)];
sat_name_list = [sat_name_list {cc.getSatName(res_go_id(s))}];
end
end
if isempty(go_id_list)
delete(fh);
else
xlim(minMax(time));
ylim([-1 1] * max(abs(ylim)));
setTimeTicks();
h = title(sprintf('Receiver %s - %s %s\\fontsize{5} \n', strrep(marker_name,'_','\_'), cc.getSysExtName(sys_c), strtrim(trk_code(2:end)))); h.FontWeight = 'bold';
[~, icons] = legend(sat_name_list, 'Location', 'NorthEastOutside');
icons = icons(numel(sat_name_list) + 2 : 2 : end);
for i = 1 : numel(icons)
icons(i).MarkerSize = 18;
icons(i).LineWidth = 2;
end
ylabel(sprintf('Satellite Residuals %s', iif(scale == 1e2, '[cm]', '[mm]')));
Core_UI.addSatMenu(fh, go_id_list);
Core_UI.beautifyFig(fh);
Core_UI.addExportMenu(fh);
Core_UI.addBeautifyMenu(fh);
fh_list = [fh_list; fh]; %#ok<AGROW>
fh.Visible = 'on'; drawnow;
end
end
end
end
if isempty(fh_list)
log.addWarning('Residuals have not been computed');
end
end
end
function fh_list = showResPerSat(this, marker_name, sys_c_list, is_ph)
% Plot the residuals of phase per tracking
%
% INPUT
% res is the matrix of residuals satellite by satellite and can be passed from e.g. NET
%
% SYNTAX
% fh_list = this.showResPerSat(marker_name, sys_c_list, is_ph)
log = Core.getLogger();
fh_list = [];
if this.isEmpty
log.addWarning('Residuals have not been computed');
else
if nargin < 3 || isempty(sys_c_list)
sys_c_list = unique(this.obs_code(:,1));
end
if nargin < 4 || isempty(is_ph)
% If there are phases use phases
is_ph = any((serialize(this.obs_code(:,2:3:end-1))) == 'L');
end
if is_ph
name = 'Carrier-phase residuals';
search_obs = 'L';
scale = 1e3;
else
name = 'Pseudo-ranges residuals';
search_obs = 'C';
scale = 1e2;
end
if nargin < 2
marker_name = '';
end
cc = Core.getConstellationCollector();
for sys_c = sys_c_list(:)'
ids = find(this.obs_code(:,1) == sys_c & any((this.obs_code(:,2:3:end-1)) == search_obs, 2));
if ~any(ids)
log.addWarning(sprintf('No %s found in %s for constellation %s', name, marker_name, cc.getSysName(sys_c)));
else
obs_id_num = cc.obsCode2num(this.obs_code(ids,:), zeros(size(ids, 1), 1));
uobs_id = unique(obs_id_num);
for t = 1 : numel(uobs_id)
id = ids(obs_id_num == uobs_id(t)); % tracking for the specific obs_code
trk_code = this.obs_code(id(1),:);
res = this.value(:, id) * scale;
fh = figure('Visible', 'off'); fh.Name = sprintf('%03d: %s Res %s', fh.Number, marker_name, trk_code); fh.NumberTitle = 'off';
Core_UI.beautifyFig(fh); drawnow;
fh_list = [fh_list; fh]; %#ok<AGROW>
fig_name = sprintf('Res_Per_Sat_%s_%s_%s_%s_%s', trk_code(2:end), marker_name, cc.getSysName(sys_c), trk_code, this.time.first.toString('yyyymmdd_HHMM'));
fh.UserData = struct('fig_name', fig_name);
ax2 = subplot(1, 24, 19:24);
ax1 = subplot(1, 24, 1:16);
data_found = false;
figure(fh); % get focus;
for s = 1 : numel(id)
id_ok = find(~isnan(zero2nan(res(:,s))));
if any(id_ok)
data_found = true;
[~, id_sort] = sort(abs(res(id_ok, s)));
scatter(ax1, id_ok(id_sort), this.prn(id(s)) * ones(size(id_ok)), 80, (res(id_ok(id_sort), s)), 'filled');
hold(ax1, 'on');
err = std(zero2nan(res(:,s)), 'omitnan');
if verLessThan('matlab', '9.4')
plot(ax2, mean(zero2nan(res(:,s)), 'omitnan') + [-err err], this.prn(id(s)) * [1 1], '.-', 'MarkerSize', 15, 'LineWidth', 3, 'Color', [0.6 0.6 0.6]);
plot(ax2, mean(zero2nan(res(:,s)), 'omitnan'), this.prn(id(s)), '.', 'MarkerSize', 30, 'Color', [0.6 0.6 0.6]);
else
errorbar(ax2, mean(zero2nan(res(:,s)), 'omitnan'), this.prn(id(s)), err, '.', 'horizontal', 'MarkerSize', 30, 'LineWidth', 3, 'Color', [0.6 0.6 0.6]);
end
hold(ax2, 'on');
end
end
if ~data_found
close(fh)
log = Core.getLogger;
log.addWarning(sprintf('No %s %s found in %s for constellation %s', name, trk_code, marker_name, cc.getSysName(sys_c)));
else
cax = caxis(ax1); caxis(ax1, [-1 1] * max(abs(cax)));
colormap(Cmap.get('PuOr', 2^11));
if min(abs(cax)) > 5
setColorMap('PuOr', caxis(), 0.90, [-5 5])
end
cb = colorbar(ax1); cb.UserData = title(cb, iif(scale == 1e2, '[cm]', '[mm]')); ax1.Color = [0.9 0.9 0.9];
prn_ss = unique(cc.prn(cc.system == sys_c));
xlim(ax1, [1 size(res,1)]);
ylim(ax1, [min(prn_ss) - 1 max(prn_ss) + 1]);
h = ylabel(ax1, 'PRN'); h.FontWeight = 'bold';
ax1.YTick = prn_ss;
grid(ax1, 'on');
h = xlabel(ax1, 'epoch'); h.FontWeight = 'bold';
h = title(ax1, sprintf('%s %s %s\\fontsize{5} \n', cc.getSysName(sys_c), strrep(marker_name, '_', '\_'), trk_code(2:end)), 'interpreter', 'tex'); h.FontWeight = 'bold';
ylim(ax2, [min(prn_ss) - 1 max(prn_ss) + 1]);
xlim(ax2, [-1 1] * (max(max(abs(mean(zero2nan(res(:,:)), 'omitnan'))), ...
max(std(zero2nan(res(:,:)), 'omitnan'))) + 1));
ax2.YTick = prn_ss; ax2.Color = [1 1 1];
grid(ax2, 'on');
xlabel(ax2, sprintf('mean %s', iif(scale == 1e2, 'cm', 'mm')));
h = title(ax2, sprintf('mean\\fontsize{5} \n'), 'interpreter', 'tex'); h.FontWeight = 'bold';
linkaxes([ax1, ax2], 'y');
Core_UI.beautifyFig(fh, 'dark');
Core_UI.addBeautifyMenu(fh);
fh.Visible = 'on'; drawnow;
end
end
end
end
end
end
end
% =========================================================================
%% PRIVATE
% =========================================================================
methods (Access = private)
function init(this, type, time, value, prn, obs_code, rec_coo)
% Init the residual object with new residuals (destroy the prevous content
%
% INPUT
% type % 0,1,2,3 see RES_TYPE
% time % time as GPS_Time GPS_Time [1 x 1] stores n_epoch
% pr % matrix of pseudorange residuals
% ph % matrix of carrier-phase residuals
%
% value % matrix of residuals
% obs_code % type of tracking of the column (e.g. GL1C, GL1CL2WI, ...)
%
% rec_coo % <optional> Coordinates of the receiver
%
% SYNTAX
% this.import(type, time, pr, ph, prn, obs_code, rec_coo)
this.type = type;
this.time = time;
this.value = value;
this.prn = prn;
this.obs_code = obs_code;
this.rec_coo = rec_coo;
% Remove entry with no obs_code
code_ko = Constellation_Collector.obsCode2num(this.obs_code, this.prn);
this.remEntry(code_ko == 0);
end
end
% =========================================================================
%% STATIC
% =========================================================================
methods (Static)
end
end
|
{"author": "goGPS-Project", "repo": "goGPS_MATLAB", "sha": "30644df61d2459e3347ac5f3e31b71d9f69f4b01", "save_path": "github-repos/MATLAB/goGPS-Project-goGPS_MATLAB", "path": "github-repos/MATLAB/goGPS-Project-goGPS_MATLAB/goGPS_MATLAB-30644df61d2459e3347ac5f3e31b71d9f69f4b01/source/obj/Residuals.m"}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 08:41:10 2017
@author: Yann Roussel and Tuan Bui
Edited by: Emine Topcu on Sep 2021
"""
from collections import Counter
import Const
import json
import matplotlib.image as mpimg
import numpy as np
# Import pandas for data saving
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import rcParams, animation
from numpy import zeros
from Analysis_tools import angles_
def saveToJSON(filename, content):
jscontent = json.dumps(content)
f = open(filename,"w")
f.write(jscontent)
f.close()
def readFromJSON(filename):
f = open(filename,"r")
jscontent = f.read()
content = json.loads(jscontent)
f.close()
return content
# The keys of the LeftValues and RightValues dictionaries are the cell names, like "IC", "MN" etc
# The column names in the csv file start with Left_IC, Right_MN, etc and end with cell number
# There are no gaps between the columns of the same neuron type and side.
# For each cell type, first left cells, than right cells are saved
def saveToCSV(filename, Time, LeftValues, RightValues):
#check for input accuracy - a file name and Time array need to be provided
if filename is None or Time is None or\
dict(LeftValues).keys() != dict(RightValues).keys():
return
#Sim_data is the pandas dataframe that will be used to save into a .csv
Sim_data = pd.DataFrame(index=Time)
for cellname in dict(LeftValues).keys():
groupValues = LeftValues[cellname]
numcells = len(groupValues)
for j in range(0, numcells):
header_name = 'Left_' + cellname + str(j)
col_df = pd.DataFrame(index = Time, data = groupValues[j], columns = [header_name])
Sim_data = pd.concat([Sim_data, col_df], axis= 1)
groupValues = RightValues[cellname]
for k in range(0, numcells):
header_name = 'Right_' + cellname + str(k)
col_df = pd.DataFrame(index = Time, data = groupValues[k], columns = [header_name])
Sim_data = pd.concat([Sim_data, col_df], axis= 1)
Sim_data.to_csv(filename, index_label ='Time')
# cell_names is the list of neurons, like "IC", "MN" etc
# Assumption 1: The column names in the csv file start with Left_IC, Right_MN, etc and end with cell number
# Assumption 2: There are no gaps between the columns of the same neuron type and side
def readFromCSV(filename, cell_names):
if filename is None:
return
read_data = pd.read_csv(filename)
data_top = list(read_data.columns.values.tolist())
read_sim = np.ascontiguousarray(read_data)
read_sim = np.transpose(read_sim)
Time = read_sim[0]
LeftValues = dict()
RightValues = dict()
for nt in cell_names:
#find the columns that start with Left_[neuron type name]
#enumerate adds the indices to data_top: x[0] refers to the index and x[1] refers to the values
#matchingcols is a list of index and value tuple, which can be reached by [,0] and [,1] respectively
matchingcols = list(filter(lambda x: x[1].startswith("Left_" + nt), enumerate(data_top)))
if len(matchingcols) == 0:
continue
next_start = matchingcols[0][0]
next_end = matchingcols[-1][0]
LeftValues[nt] = read_sim[next_start:next_end+1]
matchingcols = list(filter(lambda x: x[1].startswith("Right_" + nt), enumerate(data_top)))
if len(matchingcols) == 0:
continue
next_start = matchingcols[0][0]
next_end = matchingcols[-1][0]
RightValues[nt] = read_sim[next_start:next_end+1]
return Time, LeftValues, RightValues
# nMuscle: the number of somites
# dt: the discretization time
def saveAnimation(filename, nMuscle, VLMuscle, VRMuscle, Time, dt):
if filename is None or nMuscle is None or \
VLMuscle is None or VRMuscle is None or \
Time is None or dt is None:
return
# Uncomment the line below if ffmpeg.exe is not already in your system environment variable PATH
# plt.rcParams['animation.ffmpeg_path'] = "c:/Program Files/ffmpeg/ffmpeg.exe" #Change if ffmpeg.exe is in another location
# Calculate the number of time points
nmax = len(Time)
ani = angles_(Time, nMuscle, nmax, VRMuscle, VLMuscle, dt)
ani.save(filename) #, fps=30)#, extra_args=['-vcodec', 'libx264'])
#This function creates the multipanel animation combining musculoskeletal model with cell firing
#Assumption: leftValues and rightValues are dictionaries holding the membrane potential values of neurons and muscle cells
#The key of the dictionary is the type of neuron ("IC", "MN", etc) or "Muscle"
#colors is the dictionary holding the color
def multipanel_anim(Time, nmax, leftValues, rightValues, leftColors, rightColors, dt, imgfile, title):
plt.rc('lines', linewidth=Const.MULTIPANEL_LINEWIDTH)
# Change default font to Arial
rcParams['font.sans-serif'] = "Arial"
# Then, "ALWAYS use sans-serif fonts"
rcParams['font.family'] = "sans-serif"
rcParams['mathtext.fontset'] = 'custom'
rcParams['mathtext.bf'] = 'Arial:italic:bold'
figheight = 9
figwidth = 15
plotindex_angles = 133 # On a 1x3 image, 3rd position
plotindex_diagram = 131 # On a 1x3 image, 1st position
numofcols = 3
firingplotinc = 2
if (imgfile is None):
figwidth = 10
plotindex_angles = 122 # On a 1x2 image, 2nd position
numofcols = 2
firingplotinc = 1
# Declare figure and subplot
fig = plt.figure(figsize=(figwidth, figheight))
fig_angles = fig.add_subplot(plotindex_angles) # musculoskeletal model
fig_sublist = dict()
left_firing = dict()
right_firing = dict()
nMuscle = len(leftValues['Muscle'][:, 0])
VLMuscle = leftValues['Muscle']
VRMuscle = rightValues['Muscle']
numoffiring = len(list(filter(lambda x: x != 'Muscle', dict(leftValues).keys())))
for k in dict(leftValues).keys():
if k != "Muscle":
figsub = fig.add_subplot(numoffiring, numofcols, firingplotinc)
firingplotinc += numofcols
fig_sublist[k] = figsub
#Declare the various left and right traces to be plotted
firing, = figsub.plot([], [], lw=1, color = leftColors[k])
left_firing[k] = firing
firing, = figsub.plot([], [], lw=1, color = rightColors[k])
right_firing[k] = firing
fig_angles.set_title(title)
if imgfile is not None:
# insert double coiling diagram
fig_diagram = fig.add_subplot(plotindex_diagram)
img = mpimg.imread(imgfile)
fig_diagram.imshow(img)
fig_diagram.axis('off')
Muscle_angles, = fig_angles.plot([], [], 'o-', lw=3, color = 'Black')
Muscle_angles_highlight, = fig_angles.plot([], [], 'o-', lw=3, color = 'Red')
# Allocating arrays for velocity and position
vel = np.zeros((nMuscle, nmax))
pos = np.zeros((nMuscle, nmax))
# Setting constants and initial values for vel. and pos.
khi = 3.0 #damping cste , high khi =0.5/ low = 0.1
w0 = 2.5 #2.5 #20Hz = 125.6
vel0 = 0.0
pos0 = 0.0
#Wd = w0
for k in range (0,nMuscle):
vel[k,0] = vel0 #Sets the initial velocity
pos[k,0] = pos0 #Sets the initial position
pos[nMuscle-1,0] = 0.0
for i in range(1,nmax):
vel[k,i] = -(w0**2)*pos[k,i-1]*dt + vel[k,i-1]*(1-(2*dt*khi*w0)) + 0.1*VRMuscle[k,i-1]*dt - 0.1*VLMuscle[k,i-1]*dt
pos[k,i] = dt*vel[k,i-1] + pos[k,i-1]
### DYNAMIC PLOTING
x = np.zeros((nMuscle,nmax))
y = np.zeros((nMuscle,nmax))
for i in range (0,nmax):
x[0,i] = 0
y[0,i] = 0
pos[0,i] = 0
for k in range (1,nMuscle):
pos[k,i] = pos[k-1,i] + pos[k,i]
x[k,i] = x[k-1,i] + np.sin(pos[k,i])
y[k,i] = y[k-1,i] - np.cos(pos[k,i])
#Declare x and y-axis limits for the various figures
fig_angles.grid()
fig_angles.set_ylim(-15, 5)
fig_angles.set_xlim(-10, 10)
for k in fig_sublist.keys():
figsub = fig_sublist[k]
figsub.set_ylim(-80, 20)
figsub.set_xlim(0, nmax*dt)
# declare time text
time_template = 'time = %.1fms'
time_text = fig_angles.text(0.05, 0.1, '', transform=fig_angles.transAxes)
fig_angles.legend()
fig_angles.set_xticks([])
for k in fig_sublist.keys():
figsub = fig_sublist[k]
#Set up legend
leg=figsub.legend(handles=[left_firing[k], right_firing[k]], labels=['L '+ k,'R '+ k], loc='upper right',
handlelength=Const.MULTIPANEL_LINELENGTH, fontsize=Const.MULTIPANEL_SMALLER_SIZE)
leg.legendHandles[0].set_color(leftColors[k])
leg.legendHandles[1].set_color(rightColors[k])
for line in leg.get_lines():
line.set_linewidth(Const.MULTIPANEL_LINEWIDTH)
figsub.set_ylabel(r"$\mathbf{Vm}$" + " (mV)", fontsize= Const.MULTIPANEL_SMALL_SIZE, fontweight=Const.MULTIPANEL_FONT_STYLE) #y-axis title
figsub.set_ylim([Const.MULTIPANEL_LOWER_Y, Const.MULTIPANEL_UPPER_Y]) #y-axis limits
# Remove borders
figsub.spines['top'].set_visible(False)
figsub.spines['right'].set_visible(False)
figsub.spines['bottom'].set_visible(False)
figsub.spines['left'].set_visible(False)
#Set up ticks
figsub.tick_params(axis='both', which='both', length=0)
for item in ([figsub.title, figsub.xaxis.label, figsub.yaxis.label] +
figsub.get_xticklabels() + figsub.get_yticklabels()):
item.set_fontsize(Const.MULTIPANEL_SMALL_SIZE)
figsub.set_yticks([i*50 + -50 for i in range(0,2)])
figsub.set_xticks([i*5000 for i in range(0,5)])
figsub.set_xlabel('Time (ms)', fontsize= Const.MULTIPANEL_SMALL_SIZE, fontweight='bold') #x-axis title
figsub.set_xlim([Time[0], Time[-1]]) #x-axis limits
#This function initializes the animation
def init():
Muscle_angles.set_data([], [])
for k in left_firing.keys():
left_firing[k].set_data([], [])
right_firing[k].set_data([], [])
time_text.set_text('')
#This function drives the animation by updating every time point
def animate(i):
thisx = [x[k,i] for k in range(nMuscle)]
thisy = [y[k,i] for k in range(nMuscle)]
Muscle_angles.set_data(thisx, thisy)
Muscle_angles_highlight.set_data(x[3,i], y[3,i])
time_text.set_text(time_template % (Time[i]))
for k in left_firing.keys():
left_firing[k].set_data(Time[0:i], leftValues[k][3, 0:i])
right_firing[k].set_data(Time[0:i], rightValues[k][3, 0:i])
return Muscle_angles, left_firing, right_firing, time_text
ani = animation.FuncAnimation(fig, animate, np.arange(1, len(Time), 10),
interval=10, blit=False, init_func=init)
plt.show()
return ani
#leftValues and rightValues are dictionaries holding membrane potentials of different cell types, multiple cells
#if onSamePlot = 1: left and right values displayed on the same plot
#if there is only one neuron type to plot, left will be plotted on the top, right will be plotted on the bottom
#if there are multiple neuron types to plot, the height is the height for each row
#colorMode: 0 -> Left and Right colors will be used
#colorMode: 1 -> Neuron type based colors will be used
def plotProgress(tstart, tend, timeArray, leftValues, rightValues, onSamePlot = False, width = 15, height = 5, colorMode = 0):
if dict(leftValues).keys() != dict(rightValues).keys():
return 0
numofplots = len(dict(leftValues).keys())
x_axis = timeArray[tstart: tend]
numofrows = numofplots
numofcols = 1 if onSamePlot else 2
if numofplots == 1 and not onSamePlot:
numofrows = 2
numofcols = 1
fig, ax = plt.subplots(numofrows, numofcols, sharex=True, figsize=(width, height * numofrows))
rowind = 0
for k in dict(leftValues).keys():
nCells = len(leftValues[k][:, 0])
listLeft = leftValues[k]
listRight = rightValues[k]
colorLeft = Const.IPSI_COLOR_MAPS[k] if colorMode == 1 else Const.IPSI_COLOR_MAPS['Left']
colorRight = Const.CONTRA_COLOR_MAPS[k] if colorMode == 1 else Const.CONTRA_COLOR_MAPS['Right']
if numofplots == 1 and onSamePlot:
ax.plot([0], [0], c=colorLeft(0.5))
ax.set_title(k)
for k in range (0, nCells):
ax.plot(x_axis, listLeft[k,tstart: tend], c=colorLeft((k+1)/nCells)) # adding a color gradiant, darker color -> rostrally located
ax.plot(x_axis, listRight[k,tstart: tend], c=colorRight((k+1)/nCells))
elif numofplots == 1:
ax[0].plot([0], [0], c=colorLeft(0.5))
ax[0].set_title(k)
for k in range (0, nCells):
ax[0].plot(x_axis, listLeft[k,tstart: tend], c=colorLeft((k+1)/nCells)) # adding a color gradiant, darker color -> rostrally located
ax[1].plot(x_axis, listRight[k,tstart: tend], c=colorRight((k+1)/nCells))
elif numofcols == 1:
ax[0].plot([0], [0], c=colorLeft(0.5))
ax[rowind].set_title(k)
for k in range (0, nCells):
ax[rowind].plot(x_axis, listLeft[k,tstart: tend], c=colorLeft((k+1)/nCells)) # adding a color gradiant, darker color -> rostrally located
ax[rowind].plot(x_axis, listRight[k,tstart: tend], c=colorRight((k+1)/nCells))
rowind += 1
else:
colRight = 0 if onSamePlot else 1
ax[0, 0].plot([0], [0], c=colorLeft(0.5))
ax[rowind, 0].set_title(k)
for k in range (0, nCells):
ax[rowind, 0].plot(x_axis, listLeft[k,tstart: tend], c=colorLeft((k+1)/nCells)) # adding a color gradiant, darker color -> rostrally located
ax[rowind, colRight].plot(x_axis, listRight[k,tstart: tend], c=colorRight((k+1)/nCells))
rowind += 1
plt.xlabel('Time (ms)')
plt.xlim([timeArray[tstart], timeArray[tend] + 1])
plt.show()
return fig, ax
#import subprocess #Required to play sound in Mac - uncomment appropriate lines in PlaySound() function
import winsound
def PlaySound():
#settings for Windows
duration = 1000 # milliseconds
freq = 440 # Hz
winsound.Beep(freq, duration)
#subprocess.call(['afplay', 'Sound.wav']) #Put a wave file of your choice for the sound
|
{"hexsha": "5b083aa8f51809567ad97b0924bb9bbe878a002d", "size": 14699, "ext": "py", "lang": "Python", "max_stars_repo_path": "Zebrafish spinal locomotor circuit/Version 2/Util.py", "max_stars_repo_name": "Bui-lab/Code", "max_stars_repo_head_hexsha": "6ce5972a4bd0c059ab167522ab1d945f3b0f5707", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Zebrafish spinal locomotor circuit/Version 2/Util.py", "max_issues_repo_name": "Bui-lab/Code", "max_issues_repo_head_hexsha": "6ce5972a4bd0c059ab167522ab1d945f3b0f5707", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Zebrafish spinal locomotor circuit/Version 2/Util.py", "max_forks_repo_name": "Bui-lab/Code", "max_forks_repo_head_hexsha": "6ce5972a4bd0c059ab167522ab1d945f3b0f5707", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-25T08:14:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T12:56:17.000Z", "avg_line_length": 40.2712328767, "max_line_length": 156, "alphanum_fraction": 0.6347370569, "include": true, "reason": "import numpy,from numpy", "num_tokens": 4053}
|
#!/usr/bin/env python
#import numpy as np
import boards
import rules
def print_initial_state(grid):
print("-"*50)
print("initial board:")
grid.print_found()
grid.print_statistics()
print("-"*50)
def print_end_state(grid):
print("-"*50)
print("remaining candidates:")
grid.print_candidates()
print("end board:")
grid.print_found()
print("-"*50)
def apply_rule_pretty(grid, rule):
print("apply_rule: %s ..." % rule.__name__)
rule(grid)
grid.print_statistics()
def get_all_rules():
return [rules.apply_rule_one_number_per_unit,
rules.apply_rule_sole_candidate_in_unit_clears_others,
rules.apply_rule_all_instances_in_box_on_one_line_clears_rest_of_line,
rules.apply_rule_tuple_candidates_repeated_n_times_contain_no_other_candidates
]
def solve(grid):
iteration_count = 0
rule_list = get_all_rules()
while True:
candidates_pre = grid.get_candidate_count()
iteration_count = iteration_count + 1
for r in rule_list:
apply_rule_pretty(grid, r)
print('Iteration %i done.' % iteration_count)
print("-"*50)
print("")
if candidates_pre == grid.get_candidate_count():
break
def solve_pretty(grid):
print_initial_state(grid)
solve(grid)
print_end_state(grid)
if __name__ == "__main__":
grid = boards.generate_board_expert_2()
solve_pretty(grid)
|
{"hexsha": "50cd08a3524cf56cee70dec23c3e0437ed3a5574", "size": 1518, "ext": "py", "lang": "Python", "max_stars_repo_path": "solver.py", "max_stars_repo_name": "christiana/sudoku_solver", "max_stars_repo_head_hexsha": "5066cbba736dc07f465f20e6509542ac729651ad", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "solver.py", "max_issues_repo_name": "christiana/sudoku_solver", "max_issues_repo_head_hexsha": "5066cbba736dc07f465f20e6509542ac729651ad", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "solver.py", "max_forks_repo_name": "christiana/sudoku_solver", "max_forks_repo_head_hexsha": "5066cbba736dc07f465f20e6509542ac729651ad", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8852459016, "max_line_length": 90, "alphanum_fraction": 0.6469038208, "include": true, "reason": "import numpy", "num_tokens": 336}
|
from pathlib import Path
from functools import partial
import joblib
import torch
import numpy as np
import pandas as pd
import sentencepiece as spm
from tqdm import tqdm
from sklearn.model_selection import StratifiedShuffleSplit
from helperbot import setup_differential_learning_rates, freeze_layers
from helperbot.lr_scheduler import TriangularLR
from dekisugi.dataset import TextDataset
from dekisugi.sequence_model import get_sequence_model, SequenceRegressorBot
from dekisugi.sampler import SortishSampler, SortSampler
from dekisugi.dataloader import DataLoader
UNK = 0
BEG = 1
EMB_DIM = 500
MODEL_PATH = Path("data/cache/douban_dk_seg/")
WORD_SEG = True
DEVICE = "cuda:0"
def truncate_tokens(tokens, max_len=100):
return np.array([
x[:max_len] for x in tokens
])
def filter_entries(tokens, df_ratings, min_len=1, max_len=1000):
lengths = np.array([len(tokens[i]) for i in range(tokens.shape[0])])
flags = (lengths >= min_len) & (lengths <= max_len)
return (
tokens[flags],
df_ratings.loc[flags].copy()
)
def prepare_dataset():
cache_path = Path(f"/tmp/douban_sentiment_tokens_{WORD_SEG}.pkl")
if cache_path.exists():
tokens, df_ratings = joblib.load(cache_path)
else:
sp = spm.SentencePieceProcessor()
sp.Load(f"data/rating_unigram_{WORD_SEG}.model")
df_ratings = pd.read_csv(f"data/ratings_prepared_{WORD_SEG}.csv")
tokens = []
for _, row in tqdm(df_ratings.iterrows(), total=df_ratings.shape[0]):
tokens.append(sp.EncodeAsIds(row["comment"]))
assert len(tokens) == df_ratings.shape[0]
tokens, df_ratings = filter_entries(
np.array(tokens), df_ratings, min_len=1)
tokens = truncate_tokens(tokens, max_len=100)
joblib.dump([tokens, df_ratings], cache_path)
# df_ratings["rating"] = (df_ratings["rating"] - 1).astype("float32")
# df_ratings["rating"] = df_ratings["rating"].astype("float32")
df_ratings["rating"] = ((df_ratings["rating"] - 3) / 2).astype("float32")
# Split the dataset
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.4, random_state=888)
train_idx, test_idx = next(sss.split(df_ratings, df_ratings.rating))
tokens_train, tokens_test = tokens[train_idx], tokens[test_idx]
y_train = df_ratings.iloc[train_idx][["rating"]].copy().values
y_test = df_ratings.iloc[test_idx][["rating"]].copy().values
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=888)
val_idx, test_idx = next(sss.split(y_test, y_test))
tokens_valid, tokens_test = tokens_test[val_idx], tokens_test[test_idx]
y_valid, y_test = y_test[val_idx], y_test[test_idx]
del df_ratings
trn_ds = TextDataset(tokens_train, y_train)
val_ds = TextDataset(tokens_valid, y_valid)
tst_ds = TextDataset(tokens_test, y_test)
print(len(trn_ds), len(val_ds), len(tst_ds))
return trn_ds, val_ds, tst_ds
def regressor():
batch_size = 32
trn_ds, val_ds, tst_ds = prepare_dataset()
model = get_sequence_model(
7500,
emb_sz=500,
pad_idx=2,
dropoute=0,
rnn_hid=500,
rnn_layers=3,
bidir=False,
dropouth=0.2,
dropouti=0.2,
wdrop=0.05,
qrnn=False,
fcn_layers=[50, 1],
fcn_dropouts=[0.1, 0.1]
)
model = model.to(DEVICE)
trn_samp = SortishSampler(
trn_ds.x, key=lambda x: len(trn_ds.x[x]), bs=batch_size)
val_samp = SortSampler(
val_ds.x, key=lambda x: len(val_ds.x[x]))
trn_loader = DataLoader(
trn_ds, batch_size, transpose=True,
num_workers=1, pad_idx=2, sampler=trn_samp)
val_loader = DataLoader(
val_ds, batch_size * 2, transpose=True,
num_workers=1, pad_idx=2, sampler=val_samp)
optimizer_constructor = partial(
torch.optim.Adam, betas=(0.7, 0.99))
optimizer = setup_differential_learning_rates(
optimizer_constructor,
model,
(np.array([2**-4, 2**-3, 2**-2, 2**-1, 1]) * 2e-4).tolist()
)
bot = SequenceRegressorBot(
model, trn_loader, val_loader,
optimizer=optimizer,
clip_grad=25.,
log_dir=MODEL_PATH / "logs_reg",
checkpoint_dir=MODEL_PATH,
echo=True,
use_tensorboard=False,
avg_window=len(trn_loader) // 10 * 2,
device=DEVICE
)
bot.load_encoder(
prefix="lstm_500x3_emb_7500x500_")
bot.logger.info(str(model))
# Train only the last group
freeze_layers(model.get_layer_groups(), [True] * 4 + [False])
bot.count_model_parameters()
n_steps = len(trn_loader) * 1
bot.train(
n_steps,
log_interval=len(trn_loader) // 10,
snapshot_interval=len(trn_loader) // 10 * 5,
min_improv=1e-3,
scheduler=TriangularLR(
optimizer, max_mul=8, ratio=2,
steps_per_cycle=n_steps)
)
bot.remove_checkpoints(keep=1)
bot.best_performers = []
# Train the last group and the first grouop
freeze_layers(model.get_layer_groups(), [False] + [True] * 3 + [False])
bot.count_model_parameters()
n_steps = len(trn_loader) * 2
bot.step = 0
bot.train(
n_steps,
log_interval=len(trn_loader) // 10,
snapshot_interval=len(trn_loader) // 10 * 5,
min_improv=1e-3,
scheduler=TriangularLR(
optimizer, max_mul=8, ratio=2,
steps_per_cycle=n_steps)
)
bot.remove_checkpoints(keep=1)
bot.best_performers = []
# Train all groups
freeze_layers(model.get_layer_groups(), [False] * 5)
bot.count_model_parameters()
n_steps = len(trn_loader) * 10
bot.step = 0
bot.train(
n_steps,
log_interval=len(trn_loader) // 10,
snapshot_interval=len(trn_loader) // 10 * 5,
min_improv=1e-3,
scheduler=TriangularLR(
optimizer, max_mul=64, ratio=8,
steps_per_cycle=n_steps)
)
bot.remove_checkpoints(keep=1)
bot.load_model(bot.best_performers[0])
tst_samp = SortSampler(
tst_ds.x, key=lambda x: len(tst_ds.x[x]))
tst_loader = DataLoader(
tst_ds, batch_size * 2, transpose=True,
num_workers=1, pad_idx=2, sampler=tst_samp)
test_loss = bot.eval(tst_loader)
bot.logger.info("Test loss: %.4f", test_loss)
def regressor_from_scratch():
model_path = Path("data/cache/douban_dk_from_scratch/")
batch_size = 32
trn_ds, val_ds, tst_ds = prepare_dataset()
model = get_sequence_model(
7500,
emb_sz=500,
pad_idx=2,
dropoute=0,
rnn_hid=500,
rnn_layers=3,
bidir=False,
dropouth=0.2,
dropouti=0.2,
wdrop=0.05,
qrnn=False,
fcn_layers=[50, 1],
fcn_dropouts=[0.1, 0.1]
)
model = model.to(DEVICE)
trn_samp = SortishSampler(
trn_ds.x, key=lambda x: len(trn_ds.x[x]), bs=batch_size)
val_samp = SortSampler(
val_ds.x, key=lambda x: len(val_ds.x[x]))
trn_loader = DataLoader(
trn_ds, batch_size, transpose=True,
num_workers=1, pad_idx=2, sampler=trn_samp)
val_loader = DataLoader(
val_ds, batch_size * 2, transpose=True,
num_workers=1, pad_idx=2, sampler=val_samp)
optimizer = torch.optim.Adam(
model.parameters(), lr=2e-4, betas=(0.7, 0.99))
bot = SequenceRegressorBot(
model, trn_loader, val_loader,
optimizer=optimizer,
clip_grad=25.,
log_dir=model_path / "logs_reg",
checkpoint_dir=model_path,
echo=True,
use_tensorboard=False,
avg_window=len(trn_loader) // 10 * 2,
device=DEVICE
)
bot.logger.info(str(model))
# Train all groups
n_steps = len(trn_loader) * 15
bot.train(
n_steps,
log_interval=len(trn_loader) // 10,
snapshot_interval=len(trn_loader) // 10 * 5,
min_improv=1e-3,
scheduler=TriangularLR(
optimizer, max_mul=64, ratio=8,
steps_per_cycle=n_steps)
)
bot.remove_checkpoints(keep=1)
bot.load_model(bot.best_performers[0])
tst_samp = SortSampler(
tst_ds.x, key=lambda x: len(tst_ds.x[x]))
tst_loader = DataLoader(
tst_ds, batch_size * 2, transpose=True,
num_workers=1, pad_idx=2, sampler=tst_samp)
test_loss = bot.eval(tst_loader)
bot.logger.info("Test loss: %.4f", test_loss)
if __name__ == "__main__":
regressor()
# regressor_from_scratch()
|
{"hexsha": "7ccdc14dfcfb7feb20e9841253e85275da2a3e81", "size": 8503, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/sentiment_analysis/douban_sentiment.py", "max_stars_repo_name": "ceshine/modern_chinese_nlp", "max_stars_repo_head_hexsha": "e1d5941f381431ac114f440472d3e0f976437777", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2018-08-21T05:31:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-30T02:00:05.000Z", "max_issues_repo_path": "scripts/sentiment_analysis/douban_sentiment.py", "max_issues_repo_name": "ceshine/modern_chinese_nlp", "max_issues_repo_head_hexsha": "e1d5941f381431ac114f440472d3e0f976437777", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/sentiment_analysis/douban_sentiment.py", "max_forks_repo_name": "ceshine/modern_chinese_nlp", "max_forks_repo_head_hexsha": "e1d5941f381431ac114f440472d3e0f976437777", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-08-21T09:04:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-28T06:25:28.000Z", "avg_line_length": 31.8464419476, "max_line_length": 77, "alphanum_fraction": 0.6445960249, "include": true, "reason": "import numpy", "num_tokens": 2346}
|
[STATEMENT]
lemma hequiv_names: \<open>hequiv H i j \<Longrightarrow> i \<in> names H\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hequiv H i j \<Longrightarrow> i \<in> names H
[PROOF STEP]
unfolding hequiv_def names_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Nom j at i in' H \<Longrightarrow> i \<in> {uu_. \<exists>ps i. uu_ = i \<and> (ps, i) \<in> H}
[PROOF STEP]
by blast
|
{"llama_tokens": 170, "file": "Hybrid_Logic_Hybrid_Logic", "length": 2}
|
import os.path as osp
PATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..', '..')
import sys
sys.path.append(PATH_TO_ROOT)
import pickle
import time
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import StepLR
from models.pointnet.src.utils import get_id, save_to_log, get_comment, get_data_path, data
from models.pointnet.src.models.pointnet2_segmentation import Net
from models.pointnet.main.pointnet2_segmentation import train, test, perform_final_testing
# Global variables
all_labels = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8 , 9, 10, 11, 12, 13, 14, 15, 16, 17])
num_points_dict = {'original': 32492, '50': 16247, '90': None}
PATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..', '..') + '/'
PATH_TO_POINTNET = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..', '..', 'models', 'pointnet') + '/'
if __name__ == '__main__':
num_workers = 2
local_features = ['corrected_thickness', 'curvature', 'sulcal_depth']
global_features = None
#################################################
########### EXPERIMENT DESCRIPTION ##############
#################################################
recording = True
REPROCESS = True
data_nativeness = 'native'
data_compression = "5k"
data_type = 'white'
hemisphere = 'both'
# data_nativeness = 'native'
# data_compression = "20k"
# data_type = 'white'
# hemisphere = 'left'
additional_comment = 'Baseline PointNet++ segmentn to compare with Randla-net segmentn'
experiment_name = f'{data_nativeness}_{data_type}_{data_compression}_{hemisphere}_{additional_comment}'
#################################################
############ EXPERIMENT DESCRIPTION #############
#################################################
# 1. Model Parameters
################################################
lr = 0.001
batch_size = 2
gamma = 0.9875
target_class = ""
task = 'segmentation'
################################################
###### SPECIFY PATH TO YOUR DATA_SPLIT PICKLE #####
# 2. Get the data splits indices
with open(PATH_TO_POINTNET + 'src/names.pk', 'rb') as f:
indices = pickle.load(f)
# 4. Get experiment description
comment = get_comment(data_nativeness, data_compression, data_type, hemisphere,
lr, batch_size, local_features, global_features, target_class)
print('=' * 50 + '\n' + '=' * 50)
print(comment)
print('=' * 50 + '\n' + '=' * 50)
##### SPECIFY YOUR DATA_FOLDER AND FILES_ENDING #####
# 5. Perform data processing.
data_folder, files_ending = get_data_path(data_nativeness, data_compression, data_type, hemisphere=hemisphere)
train_dataset, test_dataset, validation_dataset, train_loader, test_loader, val_loader, num_labels = data(
data_folder,
files_ending,
data_type,
target_class,
task,
REPROCESS,
local_features,
global_features,
indices,
batch_size,
num_workers=2,
data_nativeness=data_nativeness,
data_compression=data_compression,
hemisphere=hemisphere
)
# 6. Getting the number of features to adapt the architecture
try:
num_local_features = train_dataset[0].x.size(1)
except:
num_local_features = 0
print(f'Unique labels found: {num_labels}')
if not torch.cuda.is_available():
print('You are running on a CPU.')
# 7. Create the model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net(num_labels, num_local_features, num_global_features=None).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
id = '0'
writer = None
if recording:
# 9. Save to log_record.txt
log_descr = get_comment(data_nativeness, data_compression, data_type, hemisphere,
lr, batch_size, local_features, global_features, target_class,
log_descr=True)
save_to_log(log_descr, prefix=experiment_name)
id = str(int(get_id(prefix=experiment_name)) - 1)
writer = SummaryWriter(PATH_TO_POINTNET + f'new_runs/{experiment_name}ID' + id)
writer.add_text(f'{experiment_name} ID #{id}', comment)
best_val_acc = 0
best_val_iou = 0
best_model_acc = 0
best_model_iou = 0
# 10. ====== TRAINING LOOP ======
for epoch in range(1, 150):
# 1. Start recording time
start = time.time()
# 2. Make a training step
train(model, train_loader, epoch, device, optimizer, num_labels, writer, recording=recording)
if recording:
writer.add_scalar('Training Time/epoch', time.time() - start, epoch)
# 3. Validate the performance after each epoch
loss, acc, iou, mean_iou = test(model, val_loader, comment + 'val' + str(epoch), device, num_labels, writer, epoch=epoch, id=id,
experiment_name=experiment_name, recording=recording)
print('Epoch: {:02d}, Val Loss/nll: {}, Val Acc: {:.4f}'.format(epoch, loss, acc))
scheduler.step()
# 4. Record valiation metrics in Tensorboard
if recording:
# By Accuracy
if acc > best_val_acc:
best_val_acc = acc
best_model_acc = epoch
torch.save(model.state_dict(),
PATH_TO_POINTNET + f'experiment_data/new/{experiment_name}-{id}/' + 'best_acc_model' + '.pt')
# By Mean IoU
if mean_iou > best_val_iou:
best_val_iou = mean_iou
best_model_iou = epoch
torch.save(model.state_dict(),
PATH_TO_POINTNET + f'experiment_data/new/{experiment_name}-{id}/' + 'best_iou_model' + '.pt')
writer.add_scalar('Loss/val_nll', loss, epoch)
writer.add_scalar('Accuracy/val', acc, epoch)
for label, value in enumerate(iou):
writer.add_scalar('IoU{}/validation'.format(label), value, epoch)
print('\t\tValidation Label {}: {}'.format(label, value))
print('=' * 60)
if recording:
# save the last model
torch.save(model.state_dict(), PATH_TO_POINTNET + f'experiment_data/new/{experiment_name}-{id}/' + 'last_model' + '.pt')
loss_acc, acc_acc, iou_acc, mean_iou_acc, loss_iou, acc_iou, iou_iou, mean_iou_iou = perform_final_testing(model,
writer,
test_loader,
experiment_name,
comment,
id,
num_labels,
device,
best_model_acc,
best_model_iou,
recording=recording)
|
{"hexsha": "cbbecb922401274e31912c3ad07cb8e5c2af2c9e", "size": 9507, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/segmentation/PointNet/run_pointnet_segmentation.py", "max_stars_repo_name": "devskroy1/ForkedBrainSurfaceTK", "max_stars_repo_head_hexsha": "774035ab5eae6c0a40eb96eab43d489d3f722eaa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/segmentation/PointNet/run_pointnet_segmentation.py", "max_issues_repo_name": "devskroy1/ForkedBrainSurfaceTK", "max_issues_repo_head_hexsha": "774035ab5eae6c0a40eb96eab43d489d3f722eaa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/segmentation/PointNet/run_pointnet_segmentation.py", "max_forks_repo_name": "devskroy1/ForkedBrainSurfaceTK", "max_forks_repo_head_hexsha": "774035ab5eae6c0a40eb96eab43d489d3f722eaa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.0643564356, "max_line_length": 146, "alphanum_fraction": 0.4299989481, "include": true, "reason": "import numpy", "num_tokens": 1655}
|
import csv
import re
import math
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append("..") # adds higher directory to python modules path
from LoaderPACK.Loader import testload_5min
import torch
val_load_file = testload_5min(path = "/home/tyson/data_cutoff/val_model_data",
series_dict = 'val_series_length.pickle',
size = (28, 22, 549200),
device = "cpu")
val_loader = torch.utils.data.DataLoader(val_load_file,
batch_size=1,
shuffle=True,
num_workers=0)
for i in val_loader:
print(i)
|
{"hexsha": "0eb4a6950491efd4f36f490786fae22677822ca1", "size": 775, "ext": "py", "lang": "Python", "max_stars_repo_path": "Testing loader/.ipynb_checkpoints/test-loader-checkpoint.py", "max_stars_repo_name": "marctimjen/Artefact-Rejection", "max_stars_repo_head_hexsha": "4e850d172fa8c08ba1776c46e760484673d7e7ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Testing loader/.ipynb_checkpoints/test-loader-checkpoint.py", "max_issues_repo_name": "marctimjen/Artefact-Rejection", "max_issues_repo_head_hexsha": "4e850d172fa8c08ba1776c46e760484673d7e7ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Testing loader/.ipynb_checkpoints/test-loader-checkpoint.py", "max_forks_repo_name": "marctimjen/Artefact-Rejection", "max_forks_repo_head_hexsha": "4e850d172fa8c08ba1776c46e760484673d7e7ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2916666667, "max_line_length": 78, "alphanum_fraction": 0.52, "include": true, "reason": "import numpy", "num_tokens": 141}
|
import os
from abc import ABC, abstractmethod
import numpy as np
import torch
import torch.optim as opt
from sklearn import metrics
from torch import nn
from classifiers import Net
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Classifier(nn.Module):
def __init__(self, model=Net, neg_label=0):
"""
:param model: neural classifier
:param neg_label: label of negative class (normally 0, for SVM-like models -1)
"""
super().__init__()
self.model = model()
self.optimizers = None
self.schedulers = None
self.neg_label = neg_label
self.val_data = None
self.epoch = -1
def forward(self, x):
return self.model(x)
@abstractmethod
def batch_loss(self, batch):
"""
:return: loss on a batch
"""
raise NotImplementedError()
def preprocess_data(self, data):
"""
Preprocession for some methods
:param data: input data for preprocessing
:return: preprocessed data,
"""
return data
def postprocess(self):
"""
Postprocession for some methods.
"""
pass
@abstractmethod
def get_data_loader(self, train_data, batch_size):
"""
:return: iterator over data
"""
raise NotImplementedError()
def _decision_function(self, x):
"""
:param x: input
:return: decision function for x
"""
return self.model(x)
def decision_function_loader(self, dataloader):
"""
:return: calculates decision function on batched data
"""
y_pred = np.array([])
y_true = np.array([])
for (x, y, _) in dataloader:
res = self._decision_function(x.to(device).float())
y_pred = np.hstack((y_pred, res.squeeze().detach().cpu().numpy()))
y_true = np.hstack((y_true, y.squeeze().detach().cpu().numpy()))
return y_pred, y_true
def decision_function(self, data):
"""
:return: calculates decision function on non-batched data
"""
dataloader = torch.utils.data.DataLoader(data,
batch_size=512,
shuffle=False)
return self.decision_function_loader(dataloader)
def get_optimizers(self, lr, **kwargs):
self.optimizers = [opt.Adam(self.model.parameters(), lr=lr)]
def optimizers_step(self, loss):
for optim in self.optimizers:
optim.zero_grad()
loss.backward()
for optim in self.optimizers:
optim.step()
def get_schedulers(self, gamma, **kwargs):
self.schedulers = [opt.lr_scheduler.ExponentialLR(self.optimizers[0], gamma=gamma)]
def schedulers_step(self):
for scheduler in self.schedulers:
scheduler.step()
def fit(self,
train_data,
num_epochs=50,
lr=1e-3,
batch_size=512,
gamma=0.96,
verbose=False,
test_data=None,
**kwargs):
self.to(device)
train_data = self.preprocess_data(train_data)
data_loader = self.get_data_loader(train_data, batch_size)
if test_data and verbose:
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=512,
shuffle=False)
self.get_optimizers(lr, **kwargs)
self.get_schedulers(gamma, **kwargs)
for epoch in range(num_epochs):
self.epoch += 1
self.train()
running_loss = 0.0
for batch in data_loader:
loss = self.batch_loss(batch)
self.optimizers_step(loss)
running_loss += loss.item()
self.schedulers_step()
if verbose:
print_line = f'[{epoch}/{num_epochs}]: loss={running_loss:.4f}'
if test_data is not None and verbose:
self.eval()
y_pred, y_true = self.decision_function_loader(test_loader)
auc = metrics.roc_auc_score(y_true, y_pred)
print_line += f" auc={auc:.4f}"
# if 'acc' in kwargs:
# acc = metrics.roc_auc_score(y_true, y_pred > 0.5)
# print_line += f" acc={acc:.4f}"
print(print_line)
self.postprocess()
def save(self, path):
torch.save(self.model.state_dict(), os.path.join(path, 'model.pt'))
def load(self, path):
self.model.load_state_dict(torch.load(os.path.join(path, 'model.pt')))
class OCModel(Classifier, ABC):
"""
Class for OC models
"""
def get_data_loader(self, train_data, batch_size):
oc_data = train_data.lab_data(lab=1)
data_loader = torch.utils.data.DataLoader(oc_data,
batch_size=batch_size,
shuffle=True)
return data_loader
class PUModelEqualBatch(Classifier, ABC):
"""
Class for PU models which sample data from labeled and unlabeled samples in equal batches
"""
def get_data_loader(self, train_data, batch_size):
lab_data = train_data.lab_data(lab=1)
unl_data = train_data.lab_data(lab=self.neg_label)
lab_loader = torch.utils.data.DataLoader(lab_data,
batch_size=batch_size,
shuffle=True)
unl_loader = torch.utils.data.DataLoader(unl_data,
batch_size=batch_size,
shuffle=True)
return zip(lab_loader, unl_loader)
class PUModelRandomBatch(Classifier, ABC):
"""
Class for PU models which sample data from all available data
"""
def get_data_loader(self, train_data, batch_size):
data_loader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size,
shuffle=True)
return data_loader
|
{"hexsha": "30c67cedabba0c6d6f2623f024540297443b81e2", "size": 6369, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/base_models.py", "max_stars_repo_name": "jbr-ai-labs/PU-OC", "max_stars_repo_head_hexsha": "4030a67353594d864a2a9482dd3f5d206cbd28ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/base_models.py", "max_issues_repo_name": "jbr-ai-labs/PU-OC", "max_issues_repo_head_hexsha": "4030a67353594d864a2a9482dd3f5d206cbd28ae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/base_models.py", "max_forks_repo_name": "jbr-ai-labs/PU-OC", "max_forks_repo_head_hexsha": "4030a67353594d864a2a9482dd3f5d206cbd28ae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.761682243, "max_line_length": 93, "alphanum_fraction": 0.5437274297, "include": true, "reason": "import numpy", "num_tokens": 1265}
|
// $Id$
/***********************************************************************
Moses - factored phrase-based, hierarchical and syntactic language decoder
Copyright (C) 2009 Hieu Hoang
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
***********************************************************************/
#include <algorithm>
#include <iostream>
#include "moses/Util.h"
#include "TargetPhrase.h"
#include "OnDiskWrapper.h"
#include "util/exception.hh"
#include <boost/algorithm/string.hpp>
using namespace std;
namespace OnDiskPt
{
TargetPhrase::TargetPhrase(size_t numScores)
:m_scores(numScores)
{
}
TargetPhrase::TargetPhrase(const TargetPhrase ©)
:Phrase(copy)
,m_scores(copy.m_scores)
{
}
TargetPhrase::~TargetPhrase()
{
}
void TargetPhrase::SetLHS(WordPtr lhs)
{
AddWord(lhs);
}
void TargetPhrase::Create1AlignFromString(const std::string &align1Str)
{
vector<size_t> alignPoints;
Moses::Tokenize<size_t>(alignPoints, align1Str, "-");
UTIL_THROW_IF2(alignPoints.size() != 2, "Incorrectly formatted word alignment: " << align1Str);
m_align.push_back(pair<size_t, size_t>(alignPoints[0], alignPoints[1]) );
}
void TargetPhrase::CreateAlignFromString(const std::string &alignStr)
{
vector<std::string> alignPairs;
boost::split(alignPairs, alignStr, boost::is_any_of("\t "));
for (size_t i = 0; i < alignPairs.size(); ++i) {
vector<size_t> alignPoints;
Moses::Tokenize<size_t>(alignPoints, alignPairs[i], "-");
m_align.push_back(pair<size_t, size_t>(alignPoints[0], alignPoints[1]) );
}
}
void TargetPhrase::SetScore(float score, size_t ind)
{
assert(ind < m_scores.size());
m_scores[ind] = score;
}
class AlignOrderer
{
public:
bool operator()(const AlignPair &a, const AlignPair &b) const {
return a.first < b.first;
}
};
void TargetPhrase::SortAlign()
{
std::sort(m_align.begin(), m_align.end(), AlignOrderer());
}
char *TargetPhrase::WriteToMemory(OnDiskWrapper &onDiskWrapper, size_t &memUsed) const
{
size_t phraseSize = GetSize();
size_t targetWordSize = onDiskWrapper.GetTargetWordSize();
const PhrasePtr sp = GetSourcePhrase();
size_t spSize = sp->GetSize();
size_t sourceWordSize = onDiskWrapper.GetSourceWordSize();
size_t memNeeded = sizeof(uint64_t) // num of words
+ targetWordSize * phraseSize // actual words. lhs as last words
+ sizeof(uint64_t) // num source words
+ sourceWordSize * spSize; // actual source words
memUsed = 0;
uint64_t *mem = (uint64_t*) malloc(memNeeded);
// write size
mem[0] = phraseSize;
memUsed += sizeof(uint64_t);
// write each word
for (size_t pos = 0; pos < phraseSize; ++pos) {
const Word &word = GetWord(pos);
char *currPtr = (char*)mem + memUsed;
memUsed += word.WriteToMemory((char*) currPtr);
}
// write size of source phrase and all source words
char *currPtr = (char*)mem + memUsed;
uint64_t *memTmp = (uint64_t*) currPtr;
memTmp[0] = spSize;
memUsed += sizeof(uint64_t);
for (size_t pos = 0; pos < spSize; ++pos) {
const Word &word = sp->GetWord(pos);
char *currPtr = (char*)mem + memUsed;
memUsed += word.WriteToMemory((char*) currPtr);
}
assert(memUsed == memNeeded);
return (char *) mem;
}
void TargetPhrase::Save(OnDiskWrapper &onDiskWrapper)
{
// save in target ind
size_t memUsed;
char *mem = WriteToMemory(onDiskWrapper, memUsed);
std::fstream &file = onDiskWrapper.GetFileTargetInd();
uint64_t startPos = file.tellp();
file.seekp(0, ios::end);
file.write(mem, memUsed);
#ifndef NDEBUG
uint64_t endPos = file.tellp();
assert(startPos + memUsed == endPos);
#endif
m_filePos = startPos;
free(mem);
}
char *TargetPhrase::WriteOtherInfoToMemory(OnDiskWrapper &onDiskWrapper, size_t &memUsed) const
{
// allocate mem
size_t numScores = onDiskWrapper.GetNumScores()
,numAlign = GetAlign().size();
size_t sparseFeatureSize = m_sparseFeatures.size();
size_t propSize = m_property.size();
size_t memNeeded = sizeof(uint64_t) // file pos (phrase id)
+ sizeof(uint64_t) + 2 * sizeof(uint64_t) * numAlign // align
+ sizeof(float) * numScores // scores
+ sizeof(uint64_t) + sparseFeatureSize // sparse features string
+ sizeof(uint64_t) + propSize; // property string
char *mem = (char*) malloc(memNeeded);
//memset(mem, 0, memNeeded);
memUsed = 0;
// phrase id
memcpy(mem, &m_filePos, sizeof(uint64_t));
memUsed += sizeof(uint64_t);
// align
size_t tmp = WriteAlignToMemory(mem + memUsed);
memUsed += tmp;
// scores
memUsed += WriteScoresToMemory(mem + memUsed);
// sparse features
memUsed += WriteStringToMemory(mem + memUsed, m_sparseFeatures);
// property string
memUsed += WriteStringToMemory(mem + memUsed, m_property);
//DebugMem(mem, memNeeded);
assert(memNeeded == memUsed);
return mem;
}
size_t TargetPhrase::WriteStringToMemory(char *mem, const std::string &str) const
{
size_t memUsed = 0;
uint64_t *memTmp = (uint64_t*) mem;
size_t strSize = str.size();
memTmp[0] = strSize;
memUsed += sizeof(uint64_t);
const char *charStr = str.c_str();
memcpy(mem + memUsed, charStr, strSize);
memUsed += strSize;
return memUsed;
}
size_t TargetPhrase::WriteAlignToMemory(char *mem) const
{
size_t memUsed = 0;
// num of alignments
uint64_t numAlign = m_align.size();
memcpy(mem, &numAlign, sizeof(numAlign));
memUsed += sizeof(numAlign);
// actual alignments
AlignType::const_iterator iter;
for (iter = m_align.begin(); iter != m_align.end(); ++iter) {
const AlignPair &alignPair = *iter;
memcpy(mem + memUsed, &alignPair.first, sizeof(alignPair.first));
memUsed += sizeof(alignPair.first);
memcpy(mem + memUsed, &alignPair.second, sizeof(alignPair.second));
memUsed += sizeof(alignPair.second);
}
return memUsed;
}
size_t TargetPhrase::WriteScoresToMemory(char *mem) const
{
float *scoreMem = (float*) mem;
for (size_t ind = 0; ind < m_scores.size(); ++ind)
scoreMem[ind] = m_scores[ind];
size_t memUsed = sizeof(float) * m_scores.size();
return memUsed;
}
uint64_t TargetPhrase::ReadOtherInfoFromFile(uint64_t filePos, std::fstream &fileTPColl)
{
assert(filePos == (uint64_t)fileTPColl.tellg());
uint64_t memUsed = 0;
fileTPColl.read((char*) &m_filePos, sizeof(uint64_t));
memUsed += sizeof(uint64_t);
assert(m_filePos != 0);
memUsed += ReadAlignFromFile(fileTPColl);
assert((memUsed + filePos) == (uint64_t)fileTPColl.tellg());
memUsed += ReadScoresFromFile(fileTPColl);
assert((memUsed + filePos) == (uint64_t)fileTPColl.tellg());
// sparse features
memUsed += ReadStringFromFile(fileTPColl, m_sparseFeatures);
// properties
memUsed += ReadStringFromFile(fileTPColl, m_property);
return memUsed;
}
uint64_t TargetPhrase::ReadStringFromFile(std::fstream &fileTPColl, std::string &outStr)
{
uint64_t bytesRead = 0;
uint64_t strSize;
fileTPColl.read((char*) &strSize, sizeof(uint64_t));
bytesRead += sizeof(uint64_t);
if (strSize) {
char *mem = (char*) malloc(strSize + 1);
mem[strSize] = '\0';
fileTPColl.read(mem, strSize);
outStr = string(mem);
free(mem);
bytesRead += strSize;
}
return bytesRead;
}
uint64_t TargetPhrase::ReadFromFile(std::fstream &fileTP)
{
uint64_t bytesRead = 0;
fileTP.seekg(m_filePos);
uint64_t numWords;
fileTP.read((char*) &numWords, sizeof(uint64_t));
bytesRead += sizeof(uint64_t);
for (size_t ind = 0; ind < numWords; ++ind) {
WordPtr word(new Word());
bytesRead += word->ReadFromFile(fileTP);
AddWord(word);
}
// read source words
uint64_t numSourceWords;
fileTP.read((char*) &numSourceWords, sizeof(uint64_t));
bytesRead += sizeof(uint64_t);
PhrasePtr sp(new SourcePhrase());
for (size_t ind = 0; ind < numSourceWords; ++ind) {
WordPtr word( new Word());
bytesRead += word->ReadFromFile(fileTP);
sp->AddWord(word);
}
SetSourcePhrase(sp);
return bytesRead;
}
uint64_t TargetPhrase::ReadAlignFromFile(std::fstream &fileTPColl)
{
uint64_t bytesRead = 0;
uint64_t numAlign;
fileTPColl.read((char*) &numAlign, sizeof(uint64_t));
bytesRead += sizeof(uint64_t);
for (size_t ind = 0; ind < numAlign; ++ind) {
AlignPair alignPair;
fileTPColl.read((char*) &alignPair.first, sizeof(uint64_t));
fileTPColl.read((char*) &alignPair.second, sizeof(uint64_t));
m_align.push_back(alignPair);
bytesRead += sizeof(uint64_t) * 2;
}
return bytesRead;
}
uint64_t TargetPhrase::ReadScoresFromFile(std::fstream &fileTPColl)
{
UTIL_THROW_IF2(m_scores.size() == 0, "Translation rules must must have some scores");
uint64_t bytesRead = 0;
for (size_t ind = 0; ind < m_scores.size(); ++ind) {
fileTPColl.read((char*) &m_scores[ind], sizeof(float));
bytesRead += sizeof(float);
}
std::transform(m_scores.begin(),m_scores.end(),m_scores.begin(), Moses::TransformScore);
std::transform(m_scores.begin(),m_scores.end(),m_scores.begin(), Moses::FloorScore);
return bytesRead;
}
void TargetPhrase::DebugPrint(ostream &out, const Vocab &vocab) const
{
Phrase::DebugPrint(out, vocab);
for (size_t ind = 0; ind < m_align.size(); ++ind) {
const AlignPair &alignPair = m_align[ind];
out << alignPair.first << "-" << alignPair.second << " ";
}
out << ", ";
for (size_t ind = 0; ind < m_scores.size(); ++ind) {
out << m_scores[ind] << " ";
}
return;
}
std::ostream& operator<<(std::ostream &out, const TargetPhrase &phrase)
{
out << (const Phrase&) phrase << ", " ;
for (size_t ind = 0; ind < phrase.m_align.size(); ++ind) {
const AlignPair &alignPair = phrase.m_align[ind];
out << alignPair.first << "-" << alignPair.second << " ";
}
out << ", ";
for (size_t ind = 0; ind < phrase.m_scores.size(); ++ind) {
out << phrase.m_scores[ind] << " ";
}
return out;
}
} // namespace
|
{"hexsha": "c232c9bc37d5eba61e2fd51944c13fbc7441710a", "size": 10654, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "model/mosesdecoder/OnDiskPt/TargetPhrase.cpp", "max_stars_repo_name": "saeedesm/UNMT_AH", "max_stars_repo_head_hexsha": "cc171bf66933b5c0ad8a0ab87e57f7364312a7df", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2019-12-02T14:53:29.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-12T18:01:49.000Z", "max_issues_repo_path": "model/mosesdecoder/OnDiskPt/TargetPhrase.cpp", "max_issues_repo_name": "saeedesm/UNMT_AH", "max_issues_repo_head_hexsha": "cc171bf66933b5c0ad8a0ab87e57f7364312a7df", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/mosesdecoder/OnDiskPt/TargetPhrase.cpp", "max_forks_repo_name": "saeedesm/UNMT_AH", "max_forks_repo_head_hexsha": "cc171bf66933b5c0ad8a0ab87e57f7364312a7df", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-11-26T05:27:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-17T01:53:43.000Z", "avg_line_length": 26.4367245658, "max_line_length": 97, "alphanum_fraction": 0.6699831049, "num_tokens": 2900}
|
import numpy as np
import cv2
import csv
# Hyper Parameters
L = 2
# Read the csv file.
csv_reader = csv.reader(open('./data/passingevents.csv'))
# The first match.(First match and self passing only.)
passing_list = [row for row in csv_reader if row[1] == 'Huskies']
passing_cnt = len(passing_list)
# Analyzing the data.
player_map = {}
player_list = []
for row in passing_list:
if player_map.get(row[2]) is None:
player_map[row[2]] = len(player_list)
player_list.append(row[2])
if player_map.get(row[3]) is None:
player_map[row[3]] = len(player_list)
player_list.append(row[3])
player_cnt = len(player_list)
pass_data = []
# Count the passing cnt.
for row in passing_list:
if len(pass_data) == 0 or pass_data[-1][-1] != row[2]:
pass_data.append([row[2], row[3]])
else:
pass_data[-1].append(row[3])
# Find the most frequent methods.
pass_map = {}
for long_pass in pass_data:
if len(long_pass) < L:
continue
for i in range(len(long_pass) - L + 1):
cur_cnt = 0
cur_pass = {}
cur_ans = ''
for j in range(i, len(long_pass) - L + 1):
if cur_pass.get(long_pass[j]) is None:
cur_ans += str(cur_cnt) + '-'
cur_pass[long_pass[j]] = cur_cnt
cur_cnt += 1
else:
cur_ans += str(cur_pass[long_pass[j]]) + '-'
if j == len(long_pass) - L + 1 and cur_cnt == L:
cur_ans = cur_ans[:-1]
if pass_map.get(cur_ans) is None:
pass_map[cur_ans] = 1
else:
pass_map[cur_ans] += 1
elif cur_cnt > L:
cur_ans = cur_ans[:-3]
if pass_map.get(cur_ans) is None:
pass_map[cur_ans] = 1
else:
pass_map[cur_ans] += 1
break
sorted_list = sorted(pass_map.items(), key=lambda x: x[1], reverse=True)
for item in sorted_list:
print(item)
|
{"hexsha": "4aa7761e7d91bac1af65faad25f65dca49a5a970", "size": 2024, "ext": "py", "lang": "Python", "max_stars_repo_path": "problem1_solve1.py", "max_stars_repo_name": "ligongzzz/MCM2020_Code", "max_stars_repo_head_hexsha": "7e5e6f9a6b09b3eb7e21774535c977ba6e974d79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "problem1_solve1.py", "max_issues_repo_name": "ligongzzz/MCM2020_Code", "max_issues_repo_head_hexsha": "7e5e6f9a6b09b3eb7e21774535c977ba6e974d79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "problem1_solve1.py", "max_forks_repo_name": "ligongzzz/MCM2020_Code", "max_forks_repo_head_hexsha": "7e5e6f9a6b09b3eb7e21774535c977ba6e974d79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1111111111, "max_line_length": 72, "alphanum_fraction": 0.5538537549, "include": true, "reason": "import numpy", "num_tokens": 522}
|
import requests
from clint.textui import progress
import urllib2
from bs4 import BeautifulSoup
import threading
import pafy
import time
import sys
from pathlib import Path
import os
import vlc
import nltk
from nltk.stem.lancaster import LancasterStemmer
import json
import numpy as np
from pydub import AudioSegment
from pydub.utils import which
import scraper as sc
from datetime import datetime, time
from difflib import SequenceMatcher
import jellyfish
pafy.BACK_END = "internal"
DOWNLOAD_PATH = os.path.dirname(os.path.realpath(__file__))
class DownloadThread(threading.Thread):
def __init__(self, target, *args):
self._target = target
self._args = args
threading.Thread.__init__(self)
def run(self):
print "thread" + str(self._args) + "started"
self._target(*self._args)
def is_restricted(url):
if "youtube" in url:
return True
else:
return False
def download_restricted(url,pref_format="mp3"):
print "youtube download started"
video = pafy.new(url, gdata=True)
video_format = video.getbest()
path = Path(DOWNLOAD_PATH+"/"+"youtube")
if Path.exists(path):
category_path = DOWNLOAD_PATH+"/"+"youtube/"+str(video.category)
if str(video.category) == "Music":
print "looks like you are trying to download a music video"
user_resp = raw_input("would you like to download only audio file ? (y or n) \n")
if user_resp == "y":
print "finding the best audio quality..."
if video.getbestaudio(preftype="mp3") == None:
video_format = video.getbestaudio()
else:
video_format = video.getbestaudio(preftype="mp3")
print video_format
else:
pass
#print category_path
if Path.exists(Path(category_path)):
file = video_format.download(filepath = DOWNLOAD_PATH+"/"+"youtube/"+str(video.category))
if pref_format=="mp3":
AudioSegment.from_file(file).export(file.split("webm")[0]+".mp3", format="mp3")
else:
os.makedirs(category_path)
file = video_format.download(filepath = DOWNLOAD_PATH+"/"+"youtube/"+str(video.category))
if pref_format=="mp3":
AudioSegment.from_file(file).export(file.split("webm")[0]+".mp3", format="mp3")
else:
os.makedirs(category_path)
file = video_format.download(filepath = DOWNLOAD_PATH+"/"+"youtube/"+str(video.category))
if pref_format=="mp3":
AudioSegment.from_file(file).export(file.split("webm")[0]+".mp3", format="mp3")
os.remove(file)
def is_downloadable(url):
"""
Does the url contain a downloadable resource
"""
h = requests.head(url, allow_redirects=True)
header = h.headers
content_type = header.get('content-type')
print content_type
if 'text' in content_type.lower():
return False
if 'html' in content_type.lower():
return False
return True
def download(url,download_path=DOWNLOAD_PATH):
if "list" in url:
download_playlist(url)
else:
if is_downloadable(url):
filename = url.split("/")[-1]
start = get_current_size(filename)
r = requests.get(url, stream=True)
file_path = download_path+"/"+filename
with open(file_path, 'wb') as f:
total_length = int(r.headers.get('content-length'))
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
if chunk:
f.write(chunk)
f.flush()
else:
if is_restricted(url):
print "looks like a youtube url"
download_restricted(url)
else:
print "the link is not downloadable"
## download season wise takes season url and starting episode as argument
def download_1_deep(pageurl,start=1):
if is_restricted(pageurl):
print "dowloading single youtube video"
currentthread = DownloadThread(download_restricted,pageurl)
currentthread.start()
currentthread.join()
else:
page = urllib2.urlopen(pageurl).read()
soup = BeautifulSoup(page)
soup.prettify()
threads = []
for anchor in soup.findAll('a', href=True)[start::]:
downloadableurl = pageurl + anchor['href']
currentthread = DownloadThread(download,downloadableurl)
threads.append(currentthread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def recursive_download(url,download_path= DOWNLOAD_PATH,in_parts = 0):
if is_downloadable(url):
print "downloading " + url
if in_parts == 1:
fast_multi_thread_download(url)
else:
currentthread = DownloadThread(download,url,download_path)
currentthread.start()
currentthread.join()
else:
if is_restricted(url):
print "dowloading single youtube video"
currentthread = DownloadThread(download_restricted,url)
currentthread.start()
currentthread.join()
else:
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page)
soup.prettify()
for anchor in soup.findAll('a', href=True)[1::]:
path = Path(download_path+"/"+str(anchor.text))
if is_downloadable(url + anchor['href']):
recursive_download(url + anchor['href'],download_path,in_parts)
else:
if Path.exists(path):
recursive_download(url + anchor['href'],download_path+"/"+str(anchor.text),in_parts)
else:
os.mkdir(download_path+"/"+str(anchor.text))
recursive_download(url + anchor['href'],download_path+"/"+str(anchor.text),in_parts)
def exp(url):
print "sa"
def get_current_size(filename):
path = Path(DOWNLOAD_PATH+"/"+filename)
if Path.exists(path):
return path.stat().st_size
else:
return 0
def resume_download(url,start=0,end=0,part=0):
if is_downloadable(url):
if end != 0 or start != 0:
filename = url.split("/")[-1]+".part"+str(part)
print filename
rangestart = get_current_size(filename)
resume_header = {'Range': 'bytes=%d-%d' % (rangestart,end)}
r = requests.get(url,stream = True)
total_length = end
else:
filename = url.split("/")[-1]
rangestart = get_current_size(filename)
resume_header = {'Range': 'bytes=%d-' % rangestart}
r = requests.get(url,stream = True)
total_length = int(r.headers.get('content-length'))
r = requests.get(url,headers=resume_header, stream=True, verify=False, allow_redirects=True)
with open(filename, 'ab') as f:
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
if chunk:
f.write(chunk)
else:
print "downloadcomplete"
else:
if is_restricted(url):
print "looks like a youtube url"
download_restricted(url)
else:
print "the link is not downloadable"
def fast_multi_thread_download(url):
r = requests.get(url,stream = True)
total_length = int(r.headers.get('content-length'))
part_size = total_length/12
Parts = {}
for i in range(0,12):
start = i*part_size
end = (i+1)*part_size
filename = url.split("/")[-1]+".part"+str(i+1)
currentthread = DownloadThread(resume_download,url,start,end,i+1)
Parts.setdefault(filename,currentthread)
for part,thread in Parts.iteritems():
print "starting part "+ str(part)
thread.start()
for part,thread in Parts.iteritems():
print "starting part "+ str(part)
thread.join()
complete_file = url.split("/")[-1]
with open(complete_file,"ab") as f:
for part,thread in Parts.iteritems():
with open(part,"r") as pf:
f.write(pf.read())
for part,thread in Parts.iteritems():
print "deleting "+ str(part)
if os.path.exists(part):
os.remove(part)
else:
print "no file " + part
def stream_online(playurl):
Instance = vlc.Instance()
player = Instance.media_player_new()
Media = Instance.media_new(playurl)
Media.get_mrl()
player.set_media(Media)
player.play()
def download_playlist(url,is_audio=1):
playlist = pafy.get_playlist(url)
for video in playlist['items']:
video = video["pafy"]
if is_audio == 1:
video_format = video.getbestaudio()
else:
video_format = video.getbest()
path = Path(DOWNLOAD_PATH+"/"+"youtube")
if Path.exists(path):
category_path = DOWNLOAD_PATH+"/"+"youtube/"+"Music"
print "looks like you are trying to download a music video, so downloading only audio"
if Path.exists(Path(category_path)):
file = video_format.download(filepath = DOWNLOAD_PATH+"/"+"youtube/"+"Music")
if is_audio == 1:
AudioSegment.from_file(file).export(file.split("webm")[0]+".mp3", format="mp3")
else:
os.makedirs(category_path)
file = video_format.download(filepath = DOWNLOAD_PATH+"/"+"youtube/"+"Music")
if is_audio == 1:
AudioSegment.from_file(file).export(file.split("webm")[0]+".mp3", format="mp3")
else:
os.makedirs(category_path)
file = video_format.download(filepath = DOWNLOAD_PATH+"/"+"youtube/"+"Music")
if is_audio == 1:
AudioSegment.from_file(file).export(file.split(".webm")[0]+".mp3", format="mp3")
os.remove(file)
def sort_util_generate_key(filename):
return int(filename.split("part")[-1])
def stitch_parts(filename):
files = os.listdir(DOWNLOAD_PATH)
if filename in files:
print "file already exists"
else:
req_list = [req_file for req_file in files if filename in req_file]
req_list.sort(key=sort_util_generate_key)
print req_list
with open(filename,"ab") as f:
for req_file in req_list:
with open(req_file,"r") as pf:
f.write(pf.read())
os.remove(req_file)
def download_complete_series(tv_series):
base_url =get_base_url(tv_series,get_best_mirror(tv_series))
print "download from "+str(base_url)
recursive_download(base_url)
def get_best_mirror(query):
speed_dict = {}
gresults = sc.g_search(query)[0:5]
for result in gresults:
print result.url
try:
print get_first_downloadable_link_speed(result.url,query)
speed_dict[result.url] = get_first_downloadable_link_speed(result.url,query)
except Exception as e:
print e
speed_dict[result.url] = 99999
print speed_dict
return min(speed_dict, key=speed_dict.get)
def get_related_links(query,anchor_list):
return filter(lambda x: similar(x,urllib2.quote(query)) >= 0.5 , anchor_list)
def get_first_downloadable_link_speed(url,query,fail =0,download_path= os.getcwd(),in_parts = 0,count = 0):
if is_downloadable(url):
start = datetime.now()
print "downloading " + url
resume_download(url,0,512)
end = datetime.now()
print "start is " + str(start)
print "end is " + str(end)
return get_time_diff(start,end)
else:
if count >= 3:
return 99999
try:
req = urllib2.Request(url)
except Exception as e:
if fail <=1 :
ssl._DEFAULT_CIPHERS = ('DES-CBC3-SHA')
return get_first_downloadable_link_speed(url,query,fail+1,download_path= os.getcwd(),in_parts = 0,count = 0)
else:
pass
req.add_header('User-agent', 'Mozilla 5.10')
page = urllib2.urlopen(req).read()
soup = BeautifulSoup(page)
soup.prettify()
anchor_list = soup.findAll('a', href=True)[1::]
anchor = get_related_links(query,anchor_list)[0]
return get_first_downloadable_link_speed(url + anchor['href'],query,fail+1,download_path,in_parts,count+1)
def get_time_diff(start,end):
diff = end - start
return diff.seconds
def similar(a, b):
return jellyfish.jaro_distance(unicode(a), unicode(b))
def get_base_url(query_string,url):
url_list = url.split("/")
res_list = []
for c,i in enumerate(url_list[::-1]):
if similar(i,query_string) > 0.5:
res_list = url_list[0:len(url_list)-c]
base_url = "/".join(res_list)+"/"
return base_url
############################################
# Classifier Functions using Weights.JSON
############################################
def sigmoid(x):
output = 1/(1+np.exp(-x))
return output
def cleanup_sentence(sentence):
# tokenize the pattern
sentence_words = nltk.word_tokenize(sentence)
# stem each word
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
def bag_of_words(sentence,words,show_details=False):
# tokenize the pattern
sentence_words = cleanup_sentence(sentence)
# bag of words
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w==s:
bag[i]=1
if show_details:
print("Found in bag %s" % w)
return(np.array(bag))
def think(sentence, show_details=False):
x = bag_of_words(sentence.lower(), words, show_details)
if show_details:
print ("sentence:", sentence, "\n bag_of_words:", x)
# input layer is our bag of words
l0 = x
# matrix multiplication of input and hidden layer
l1 = sigmoid(np.dot(l0, synapse_0))
# output layer
l2 = sigmoid(np.dot(l1, synapse_1))
return l2
ERROR_THRESHOLD = 0.2
def classify(sentence, show_details=False):
data_file = DOWNLOAD_PATH+'/weights.json'
stemmer = LancasterStemmer()
with open(data_file, 'r') as f:
raw = json.load(f)
words = raw['words']
synapse_0 = np.array(raw['synapse0'])
synapse_1 = np.array(raw['synapse1'])
classes = raw['classes']
print("Classifier between Movies and Music is running....")
results = think(sentence, show_details)
results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD ]
results.sort(key=lambda x: x[1], reverse=True)
return_results =[[classes[r[0]],r[1]] for r in results]
print ("%s \n classification: %s" % (sentence, return_results))
return return_results
if __name__ == "__main__":
print os.path.dirname(os.path.realpath(__file__))
download(sys.argv[1])
|
{"hexsha": "b70fde4beb3000907f788d149128b36275952240", "size": 15382, "ext": "py", "lang": "Python", "max_stars_repo_path": "tv_download.py", "max_stars_repo_name": "pmitra96/multi-threaded-downloader", "max_stars_repo_head_hexsha": "0b879cd05cf588ec497e3762456bdfd5678d00d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tv_download.py", "max_issues_repo_name": "pmitra96/multi-threaded-downloader", "max_issues_repo_head_hexsha": "0b879cd05cf588ec497e3762456bdfd5678d00d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-18T20:53:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T00:03:36.000Z", "max_forks_repo_path": "tv_download.py", "max_forks_repo_name": "pmitra96/multi-threaded-downloader", "max_forks_repo_head_hexsha": "0b879cd05cf588ec497e3762456bdfd5678d00d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-09-04T19:33:53.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-04T19:33:53.000Z", "avg_line_length": 33.9558498896, "max_line_length": 124, "alphanum_fraction": 0.5988818099, "include": true, "reason": "import numpy", "num_tokens": 3409}
|
! Copyright 2021 Ivan Pribec
! SPDX-License-Identifier: Apache-2.0
!> Semi-implicit Runge-Kutta method of third order
!>
!> This is a modernized version of the code originally given in
!>
!> Villadsen, J., & Michelsen, M. L. (1978). Solution of differential
!> equation models by polynomial approximation. Prentice-Hall, Inc.,
!> 1978.
!>
module stiff3_solver
use stiff3_linalg, only: lu, back
implicit none
private
public :: stiff3, stiff3_wp
public :: rhs_sub, jacobian_sub, output_sub
!> Kind parameter for working precision of stiff3 reals
integer, parameter :: stiff3_wp = kind(1.0d0)
!> Working precision with short name for internal use
integer, parameter :: wp = stiff3_wp
abstract interface
!> Function to evaluate the right-hand side of a system of ODEs.
!> It is assumed the system of ODEs is autonomous, meaning that
!> the independent variable x, does not appear explicitly.
subroutine rhs_sub(n,y,f)
import wp
integer, intent(in) :: n
real(wp), intent(in) :: y(n)
real(wp), intent(inout) :: f(n)
end subroutine
!> User supplied subprogram for evaluation of the Jacobian.
subroutine jacobian_sub(n,y,df)
import wp
integer, intent(in) :: n
real(wp), intent(in) :: y(n)
real(wp), intent(inout) :: df(n,n)
end subroutine
!> User supplied subprogram for output.
subroutine output_sub(x,y,iha,qa)
import wp
real(wp), intent(in) :: x
!! Current value of the independent variable
real(wp), intent(in) :: y(:)
!! Current value of the dependent variable vector
integer, intent(in) :: iha
!! Number of bisections (unsuccesful integrations) in
!! the current step
real(wp), intent(in) :: qa
!! Step-length acceleration factor
end subroutine
end interface
contains
! TODO: Check if the the statement `h0 = h` should appear before
! or after exiting the routine.
!> Semi-implicit Runge-Kutta integrator routine
!
subroutine stiff3(n,fun,dfun,out,nprint,x0,x1,h0,eps,w,y)
integer, intent(in) :: n
!! Number of equations to be integrated.
procedure(rhs_sub) :: fun
!! User supplied subprogram for function evaluation.
procedure(jacobian_sub) :: dfun
!! User supplied subprogram for evaluation of the Jacobian.
procedure(output_sub) :: out
!! User supplied subprogram for output.
integer, intent(in) :: nprint
!! Printing interval. For `nprint = k` the solution is only printed.
!! at every kth step.
real(wp), intent(in) :: x0, x1
!! Limits of the independent variable between which the differential
!! equation is solved.
real(wp), intent(inout) :: h0
!! Suggested initial half-step length. On exit `h0` contains suggested
!! value of half-step length for continued integration beyond `x1`.
real(wp), intent(in) :: eps, w(n)
!! Tolerance parameters.
real(wp), intent(inout) :: y(n)
!! Vector of dependent variables at `x0`. On exit `y` is the vector of
!! dependent variables at `x1`.
real(wp), dimension(n) :: yk1, yk2, ya, yold, yold1, f, fold
!! Workspace for solution vector and right-hand side
real(wp), dimension(n,n) :: df, dfold
!! Workspace for jacobian arrays
integer :: ip(n)
!! Workspace for the pivot array
integer :: icon, iha, i, j, nout
real(wp) :: x, h, e, es, q, qa
! icon = 0 except for last step which ends exactly at x1
icon = 0
nout = 0
x = x0
h = h0
outer: do
! last step - or first step longer than interval
if (x + 2.0_wp*h >= x1) then
h = (x1 - x)/2.0_wp
icon = 1
end if
! other steps - limit to one quarter of remaining interval
if ((icon == 0) .and. (x + 4.0_wp*h > x1)) then
h = (x1 - x)/4.0_wp
end if
! evaluate function and jacobian
call fun(n,y,f)
call dfun(n,y,df)
! keep values which are used in half-step integration
do i = 1, n
yold(i) = y(i)
fold(i) = f(i)
do j = 1, n
dfold(i,j) = df(i,j)
end do
end do
! perform full integration step
call sirk3(n,fun,ip,f,y,yk1,yk2,df,2*h)
do i = 1, n
ya(i) = y(i)
y(i) = yold(i)
f(i) = fold(i)
do j = 1, n
df(i,j) = dfold(i,j)
end do
end do
! full step finished, start half-step integration
! iha counts number of steplength bisections
iha = -1
inner: do
iha = iha + 1
call sirk3(n,fun,ip,f,y,yk1,yk2,df,h)
call fun(n,y,f)
call dfun(n,y,df)
yold1 = y
call sirk3(n,fun,ip,f,y,yk1,yk2,df,h)
! half step integration finished
! compute deviation and compare with tolerance
e = 0.0_wp
do i = 1, n
es = w(i)*abs(ya(i)-y(i))/(1.0_wp+abs(y(i)))
e = max(e,es)
end do
q = e/eps
qa = (4.0_wp*q)**0.25_wp
if (q <= 1.0_wp) then
exit inner
end if
! deviation too large- return to half-step with smaller h
do i = 1, n
ya(i) = yold1(i)
y(i) = yold(i)
f(i) = fold(i)
do j = 1, n
df(i,j) = dfold(i,j)
end do
end do
h = h/2.0_wp
icon = 0
end do inner
! adjust y-vector
do i = 1, n
y(i) = y(i) + (y(i) - ya(i))/7.0_wp
end do
x = x + 2*h
! compute new stepsize
qa = 1.0_wp/(qa+1.0e-10_wp)
if (qa > 3.0_wp) qa = 3.0_wp
h = qa*h
! perform output if appropriate
nout = nout + 1
if (mod(nout,nprint) == 0 .or. icon == 1) then
call out(x,y,iha,qa)
end if
! exit main loop
if (icon == 1) then
h0 = h
return
end if
end do outer
end subroutine
!> Single-step semi-implicit integration
!
subroutine sirk3(n,fun,ipiv,f,y,yk1,yk2,df,h)
integer, intent(in) :: n
!! Size of the system of ODEs
procedure(rhs_sub) :: fun
!! Function to evaluate the right hand side
integer, intent(inout) :: ipiv(n)
!! Integer workspace used to store pivots in the LU factorization
real(wp), intent(inout) :: f(n)
!! On input, array of rhs values at beginning of step
real(wp), intent(inout) :: y(n)
!! On input contains the current approximation of the dependent variables.
!! On output contains the approximation at the new time.
real(wp), intent(inout) :: yk1(n),yk2(n)
!! Real workspace arrays used in the implicit Runge-Kutta rule
real(wp), intent(inout) :: df(n,n)
!! On input contains the Jacobian values J,
!! On output contains the factorized matrix (I - h a J) = LU
real(wp), intent(in) :: h
!! Step size of the independent variable
integer :: i
real(wp), parameter :: a = 0.4358665215084589_wp
real(wp), parameter :: r1 = 1.037609496131859_wp
real(wp), parameter :: r2 = 0.8349304838526377_wp
real(wp), parameter :: r3 = -0.6302020887244523_wp
real(wp), parameter :: r4 = -0.2423378912600452_wp
real(wp), parameter :: DF_TOL = 1.0e-12_wp
!! Jacobian cutoff (elements smaller than DF_TOL are set to zero)
!
! form matrix (I - h a J)
!
where (abs(df) > DF_TOL)
df = -h*a*df
elsewhere
df = 0.0_wp
end where
do i = 1, n
df(i,i) = df(i,i) + 1.0_wp
end do
!
! perform triangular decomposition and evaluate k1
!
call lu(df,ipiv)
call back(df,f,ipiv)
do i = 1, n
yk1(i) = h*f(i)
yk2(i) = y(i) + 0.75_wp * yk1(i)
end do
call fun(n,yk2,f)
call back(df,f,ipiv)
!
! evaluate k2
!
do i = 1, n
yk2(i) = h*f(i)
y(i) = y(i) + r1 * yk1(i) + r2 * yk2(i)
yk2(i) = r3 * yk1(i) + r4 * yk2(i)
end do
!
! evaluate k3
! for convenience stored in yk2
!
call back(df,yk2,ipiv)
do i = 1, n
y(i) = y(i) + yk2(i)
end do
end subroutine
end module
|
{"hexsha": "c95e7bf3659e5ddf66f78fb3e6d46c82ee0ec7ee", "size": 8095, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/stiff3_solver.f90", "max_stars_repo_name": "awvwgk/stiff3", "max_stars_repo_head_hexsha": "7ed7379e1a20d229848fddec62453602216c2074", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-08-12T01:46:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-08T09:04:21.000Z", "max_issues_repo_path": "src/stiff3_solver.f90", "max_issues_repo_name": "awvwgk/stiff3", "max_issues_repo_head_hexsha": "7ed7379e1a20d229848fddec62453602216c2074", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-08-13T02:47:51.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-14T09:52:37.000Z", "max_forks_repo_path": "src/stiff3_solver.f90", "max_forks_repo_name": "awvwgk/stiff3", "max_forks_repo_head_hexsha": "7ed7379e1a20d229848fddec62453602216c2074", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-12T16:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-13T06:32:42.000Z", "avg_line_length": 25.9455128205, "max_line_length": 80, "alphanum_fraction": 0.5775169858, "num_tokens": 2508}
|
'''
multilabel_confusion_matrix.py
Run MATCH with PeTaL data.
Last modified on 23 July 2021.
DESCRIPTION
multilabel_confusion_matrix.py plots multilabel confusion matrices
based on the data in MATCH/PeTaL/results.
In a multilabel confusion matrix, the rows correspond to
ground-truth labels $l_{true}$ and the columns correspond to
predicted labels $l_{pred}$. Each cell sports a colour representing
the average confidence score MATCH predicts for the label $l_{pred}$
across all papers bearing the actual label $l_{true}$. This colour
is brighter for averages closer to 1, and darker for averages closer
to 0.
In an ideal classifier, there would be a bright line streaking
across the diagonal from the top left to the bottom right. Cells on
the diagonal represent correct predictions; most cells off the
diagonal represent mispredictions.
Labels are sorted by their frequency of occurrence in the dataset;
labels at the top and left are more common; labels at the bottom and
right are rarer.
OPTIONS
-m, --match PATH/TO/MATCH
Path of MATCH folder.
-p, --plots PATH/TO/plots
Path of plots folder.
--leaf-only
Only include leaf labels in the matrix. Defualts to false.
--threshold
Logits threshold for a positive prediction, between 0 and 1.
If it exists, we convert all confidence scores above threshold to 1
and all other confidence scores to 0. ("hard" prediction)
If not, we don't transform the confidence scores. ("soft" prediction)
-v, --verbose
Enables verbose output.
USAGE
python3 multilabel_confusion_matrix.py -m ../src/MATCH -p ../plots --verbose
Authors: Eric Kong (eric.l.kong@nasa.gov, erickongl@gmail.com)
'''
import click
import os
import numpy as np
from matplotlib import pyplot as plt, rc
from datetime import datetime
import logging
@click.command()
@click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH folder.')
@click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of plots folder.')
@click.option('--leaf-only', type=click.BOOL, is_flag=True, default=False, required=False, help='Leaf labels only.')
@click.option('--threshold', '-t', type=click.FLOAT, default=None, required=False, help='Logits threshold for a positive prediction. Between 0 and 1.')
@click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.')
def main(match_path, plots_path, leaf_only, threshold, verbose):
"""Plots multilabel confusion matrix.
Args:
match_path (str): Path of MATCH folder.
plots_path (str): Path of plots folder.
leaf_only (bool): Leaf labels only.
threshold (float, optional): 'Logits threshold for a positive prediction. Between 0 and 1.'
verbose (bool): Verbose output.
"""
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s:%(name)s] %(message)s"
)
MCMlogger = logging.getLogger("MCM")
DATASET = 'PeTaL'
MODEL = 'MATCH'
res_labels = np.load(f"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy", allow_pickle=True)
res_scores = np.load(f"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy", allow_pickle=True)
test_labels = np.load(f"{match_path}/{DATASET}/test_labels.npy", allow_pickle=True)
train_labels = np.load(f"{match_path}/{DATASET}/train_labels.npy", allow_pickle=True)
parent_labels = set()
with open(f'{match_path}/{DATASET}/taxonomy.txt', 'r') as tax_file:
for line in tax_file:
parent_label = line.split()[0]
parent_labels.add(parent_label)
all_labels = np.concatenate([train_labels, test_labels], axis=0)
label_list = np.array(list(set(label for label_list in all_labels for label in label_list)))
label_count = dict()
for label in label_list:
label_count[label] = 0
for label_list_ in all_labels:
for label in label_list_:
label_count[label] += 1
# print(label_count)
# print(len(label_count))
ONLY_LEAF_LABELS = leaf_only
# labels = None
# if ONLY_LEAF_LABELS:
# labels = np.array(sorted(filter(lambda lbl: not lbl in parent_labels,
# list(set(label for label_list in all_labels for label in label_list))
# ),
# key=lambda lbl: label_count[lbl],
# reverse=True))
# else:
# labels = np.array(sorted(list(set(label for label_list in all_labels for label in label_list)),
# key=lambda lbl: label_count[lbl],
# reverse=True))
if ONLY_LEAF_LABELS:
label_list = filter(lambda lbl: not lbl in parent_labels, label_list)
labels = np.array(sorted(label_list,
key=lambda lbl: label_count[lbl],
reverse=True))
label2idx = {label: idx for idx, label in enumerate(labels)}
idx2label = {idx: label for idx, label in enumerate(labels)}
preds_for_test_label = dict()
test_label_count = dict()
for test_label in label2idx:
test_label_count[test_label] = 0
for test_label in label2idx:
preds_for_test_label[test_label] = dict()
for res_label in label2idx:
preds_for_test_label[test_label][res_label] = 0
for test_label_list, res_label_list, res_score_list in zip(test_labels, res_labels, res_scores):
for test_label in test_label_list:
if test_label in test_label_count:
test_label_count[test_label] += 1
for res_label, res_score in zip(res_label_list, res_score_list):
if res_label in label2idx:
preds_for_test_label[test_label][res_label] += res_score if not threshold else (1 if res_score > threshold else 0)
num_labels = len(label2idx)
conf_matrix = np.array(
[
[
# i is label2idx[test_label], j is label2idx[res_label]
preds_for_test_label[idx2label[i]][idx2label[j]] / test_label_count[idx2label[i]] if test_label_count[idx2label[i]] > 0 else 0
for j in range(num_labels)
]
for i in range(num_labels)
]
)
########################################
# PLOTTING!
########################################
ALL_PLOTS_PATH = plots_path
if not os.path.exists(ALL_PLOTS_PATH):
os.mkdir(ALL_PLOTS_PATH)
else:
if verbose:
MCMlogger.info(f"You already have a plots directory at {ALL_PLOTS_PATH}.")
now = datetime.now()
date_str = now.strftime("%Y%m%d")
time_str = now.strftime("%H%M%S")
comment = f"MCM"
PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f"{date_str}_{comment}")
if not os.path.exists(PLOTS_PATH):
os.mkdir(PLOTS_PATH)
if verbose:
MCMlogger.info(f"New plots directory at {PLOTS_PATH}")
else:
if verbose:
MCMlogger.info(f"You already have a plots directory at {PLOTS_PATH}")
rc('xtick', labelsize=8)
rc('ytick', labelsize=8)
rc('font', size=20)
################################################################################
# COMMENT/UNCOMMENT THIS CODE TO FILTER OUT EVERYTHING BUT THE TOP label_limit LABELS
label_limit = 25
conf_matrix = conf_matrix[:label_limit, :label_limit]
num_labels = label_limit
################################################################################
row_labels = [idx2label[i] for i in range(num_labels)]
col_labels = [idx2label[i] for i in range(num_labels)]
if verbose:
conf_matrix_shape = conf_matrix.shape
MCMlogger.info(f"Generating MCM with size {conf_matrix_shape[0]}x{conf_matrix_shape[1]}")
plt.rcParams["figure.figsize"] = (10, 10)
fig, ax = plt.subplots()
plt.matshow(conf_matrix, fignum=0)
ax.set_title('Multilabel Confusion Matrix for MATCH on golden.json\nTop 25 of All Labels Sorted by Frequency', y=1.5, pad=0)
ax.set_xlabel('Predicted labels')
ax.set_ylabel('Ground truth labels')
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
plt.xticks(range(num_labels), col_labels, rotation='vertical')
plt.yticks(range(num_labels), row_labels)
plt.colorbar()
# plt.rcParams["axes.titley"] = 1.0
# plt.rcParams["axes.titlepad"] = 15
PLOT_PATH = os.path.join(PLOTS_PATH, f'mcm_{time_str}')
plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False, bbox_inches='tight')
plt.clf()
if verbose:
MCMlogger.info(f"New plot at {PLOT_PATH}")
if __name__ == '__main__':
main()
|
{"hexsha": "52115071b8b8ca4d0375c7a75165c8a3646d5b38", "size": 8978, "ext": "py", "lang": "Python", "max_stars_repo_path": "auto-labeler/MATCH/analysis/multilabel_confusion_matrix.py", "max_stars_repo_name": "nasa-petal/PeTaL-labeller", "max_stars_repo_head_hexsha": "b68d534c8c9f026860ce7fe869eef4c16fe35505", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-21T18:08:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-21T18:08:55.000Z", "max_issues_repo_path": "auto-labeler/MATCH/analysis/multilabel_confusion_matrix.py", "max_issues_repo_name": "nasa-petal/PeTaL-labeller", "max_issues_repo_head_hexsha": "b68d534c8c9f026860ce7fe869eef4c16fe35505", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2021-01-13T21:20:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-18T15:47:22.000Z", "max_forks_repo_path": "auto-labeler/MATCH/analysis/multilabel_confusion_matrix.py", "max_forks_repo_name": "nasa-petal/PeTaL-labeller", "max_forks_repo_head_hexsha": "b68d534c8c9f026860ce7fe869eef4c16fe35505", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-14T19:17:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-14T19:17:44.000Z", "avg_line_length": 39.3771929825, "max_line_length": 151, "alphanum_fraction": 0.6289819559, "include": true, "reason": "import numpy", "num_tokens": 2099}
|
(* Some results about real numbers *)
From intuitionism Require Import lib set seq spr fan func classic choice.
From intuitionism Require Import bcp bar.
(*
Describing intervals of real numbers as binary sequences
--------------------------------------------------------
We can describe the real numbers in [a_0, b_0] as binary sequences. At each step
the interval [a_n, b_n] is split into [a_n, (a_n+2b_h)/3], [(2a_n+b_n)/3, b_n].
A sequence α ∈ Bin selects in which intervals a point is located. Note that
splitting in half would require exact knowledge about the upper/lower bound of
some numbers, which is a problem for some constructions.
The next definitions use [a_0, b_0] = [0, 1].
*)
(* Compute left endpoint of the n-th interval of α with denominator 3^(n+1). *)
Fixpoint lbound α n :=
(if α n =? 0 then 0 else 2^n) +
match n with
| 0 => 0
| S m => 3 * lbound α m
end.
(* Distance between m and n. *)
Definition distance m n := (m - n) + (m - n).
(* α and β are within distance 1/(2^δ) of each other. *)
Definition within (δ : nat) (α β : dom Bin) :=
∃n, 2^δ * distance (lbound α n) (lbound β n) < 3^n.
(* Continuity of f : [0,1] -> [0,1] at x. *)
Definition point_continuous f x ε :=
∃δ, ∀x', within δ x x' -> within ε (f x) (f x').
(* Pointwise continuity of f : [0,1] -> [0,1]. *)
Definition pointwise_continuous f :=
∀x ε, point_continuous f x ε.
(* Uniform continuity of f : [0,1] -> [0,1]. *)
Definition uniform_continuous f :=
∀ε, ∃δ, ∀x x', within δ x x' -> within ε (f x) (f x').
(* The intermediate value theorem. *)
Definition IntermediateValueTheorem :=
∀f, pointwise_continuous f /\ f (0^ω) = 0^ω /\ f (1^ω) = 1^ω ->
∀y, ∃x, f x = y.
(* Simple form of Brouwers fixed-point theorem. *)
Definition FixedPointTheorem :=
∀f, uniform_continuous f -> ∃x, f x = x.
(*
It would be nice if we can show that both the intermediate value theorem and
Brouwers fixed-point theorem imply LPO without resorting to a full definition
of real numbers. However it seems the binary sequences are not particularly
easy to reason about either, or do arithmetic with.
*)
|
{"author": "bergwerf", "repo": "intuitionism", "sha": "581ac55e8a5382d3f35cf8f9b09accb9b5f89ae8", "save_path": "github-repos/coq/bergwerf-intuitionism", "path": "github-repos/coq/bergwerf-intuitionism/intuitionism-581ac55e8a5382d3f35cf8f9b09accb9b5f89ae8/reals.v"}
|
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import cifar100,mnist,cifar10,fashion_mnist
from scipy.io import loadmat
import numpy as onp #original numpy
import jax.numpy as jnp #jax numpy
import itertools
#import custom_datasets
# TODO: Setup this function to take in a string for the data set
def setupMNIST():
classes = 10
subtract_pixel_mean = True
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#for MNIST
x_train = onp.expand_dims(x_train,axis=3)
x_test = onp.expand_dims(x_test,axis=3)
y_train = y_train.reshape([-1])
y_test = y_test.reshape([-1])
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = onp.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
orig_x_train = onp.array(x_train)
orig_y_train = onp.array(y_train)
datagen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by std of dataset
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# epsilon for ZCA whitening
zca_epsilon=1e-06,
# randomly rotate images in the range (deg 0 to 180)
rotation_range=0,
# randomly shift images horizontally
width_shift_range=0.1,
# randomly shift images vertically
height_shift_range=0.1,
# set range for random shear
shear_range=0.,
# set range for random zoom
zoom_range=0.,
# set range for random channel shifts
channel_shift_range=0.,
# set mode for filling points outside the input boundaries
fill_mode='nearest',
# value used for fill_mode = "constant"
cval=0.,
# randomly flip images
horizontal_flip=True,
# randomly flip images
vertical_flip=False,
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
datagen = ImageDataGenerator()
datagen.fit(x_train)
train_flow = datagen.flow(x_train, y_train, batch_size=128)
train_ds = map(lambda x: {'image': x[0].astype(onp.float32),
'label': x[1].astype(onp.int32)},train_flow)
test_ds = {'image': x_test.astype(jnp.float32),
'label': y_test.astype(jnp.int32)}
full_train_ds = {'image': x_train.astype(jnp.float32),
'label': y_train.astype(jnp.int32)}
return x_train, full_train_ds, train_ds, test_ds, classes
def setupFashionMNIST():
classes = 10
subtract_pixel_mean = True
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
# Add 3rd dimension to correspond to color in RGB images
x_train = onp.expand_dims(x_train,axis=3)
x_test = onp.expand_dims(x_test,axis=3)
y_train = y_train.reshape([-1])
y_test = y_test.reshape([-1])
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = onp.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
orig_x_train = onp.array(x_train)
orig_y_train = onp.array(y_train)
datagen = ImageDataGenerator()
datagen.fit(x_train)
train_flow = datagen.flow(x_train, y_train, batch_size=128)
train_ds = map(lambda x: {'image': x[0].astype(onp.float32),
'label': x[1].astype(onp.int32)},train_flow)
full_train_ds = {'image': x_train.astype(jnp.float32),
'label': y_train.astype(jnp.int32)}
test_ds = {'image': x_test.astype(jnp.float32),
'label': y_test.astype(jnp.int32)}
return x_train, full_train_ds, train_ds, test_ds, classes
def setupCIFAR10():
classes = 10
subtract_pixel_mean = True
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = y_train.reshape([-1])
y_test = y_test.reshape([-1])
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = onp.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
orig_x_train = onp.array(x_train)
orig_y_train = onp.array(y_train)
datagen = ImageDataGenerator()
datagen.fit(x_train)
train_flow = datagen.flow(x_train, y_train, batch_size=128)
train_ds = map(lambda x: {'image': x[0].astype(onp.float32),
'label': x[1].astype(onp.int32)},train_flow)
full_train_ds = {'image': x_train.astype(jnp.float32),
'label': y_train.astype(jnp.int32)}
test_ds = {'image': x_test.astype(jnp.float32),
'label': y_test.astype(jnp.int32)}
return x_train, full_train_ds, train_ds, test_ds, classes
def setupCIFAR100():
classes = 100
subtract_pixel_mean = True
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
y_train = y_train.reshape([-1])
y_test = y_test.reshape([-1])
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = onp.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
orig_x_train = onp.array(x_train)
orig_y_train = onp.array(y_train)
datagen = ImageDataGenerator()
datagen.fit(x_train)
train_flow = datagen.flow(x_train, y_train, batch_size=128)
train_ds = map(lambda x: {'image': x[0].astype(onp.float32),
'label': x[1].astype(onp.int32)},train_flow)
full_train_ds = {'image': x_train.astype(jnp.float32),
'label': y_train.astype(jnp.int32)}
test_ds = {'image': x_test.astype(jnp.float32),
'label': y_test.astype(jnp.int32)}
return x_train, full_train_ds, train_ds, test_ds, classes
def setupSVHN():
classes = 10
subtract_pixel_mean = True
def load_data(path):
""" Helper function for loading a MAT-File"""
data = loadmat(path)
return data['X'], data['y']
x_train, y_train = load_data('train_32x32.mat')
x_test, y_test = load_data('test_32x32.mat')
# Gets rid of the extra dimension on the training labels
y_train = y_train.reshape([-1])
y_test = y_test.reshape([-1])
x_train = onp.moveaxis(x_train, -1, 0)
x_test = onp.moveaxis(x_test, -1, 0)
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = onp.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
orig_x_train = onp.array(x_train)
orig_y_train = onp.array(y_train)
datagen = ImageDataGenerator()
datagen.fit(x_train)
train_flow = datagen.flow(x_train, y_train, batch_size=128)
train_ds = map(lambda x: {'image': x[0].astype(onp.float32),
'label': x[1].astype(onp.int32)},train_flow)
full_train_ds = {'image': x_train.astype(jnp.float32),
'label': y_train.astype(jnp.int32)}
test_ds = {'image': x_test.astype(jnp.float32),
'label': y_test.astype(jnp.int32)}
return x_train, full_train_ds, train_ds, test_ds, classes
def setupTinyImageNet():
classes = 200
subtract_pixel_mean = True
dataset = custom_datasets.TINYIMAGENET('Data', train=True, download=True)
print(dataset)
#(x_train, y_train), (x_test, y_test) = cifar100.load_data()
y_train = y_train.reshape([-1])
y_test = y_test.reshape([-1])
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = onp.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
orig_x_train = onp.array(x_train)
orig_y_train = onp.array(y_train)
datagen = ImageDataGenerator()
datagen.fit(x_train)
train_flow = datagen.flow(x_train, y_train, batch_size=128)
train_ds = map(lambda x: {'image': x[0].astype(onp.float32),
'label': x[1].astype(onp.int32)},train_flow)
full_train_ds = {'image': x_train.astype(jnp.float32),
'label': y_train.astype(jnp.int32)}
test_ds = {'image': x_test.astype(jnp.float32),
'label': y_test.astype(jnp.int32)}
return x_train, full_train_ds, train_ds, test_ds, classes
|
{"hexsha": "48ae7516a75cb185f899718420d9eae5cc7c8cce", "size": 8739, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_data.py", "max_stars_repo_name": "anonymous-code-submission/ICML2021_anon_code_submission", "max_stars_repo_head_hexsha": "0c6b57c6170dd763e400e32392ce946ff6a86dfd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generate_data.py", "max_issues_repo_name": "anonymous-code-submission/ICML2021_anon_code_submission", "max_issues_repo_head_hexsha": "0c6b57c6170dd763e400e32392ce946ff6a86dfd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generate_data.py", "max_forks_repo_name": "anonymous-code-submission/ICML2021_anon_code_submission", "max_forks_repo_head_hexsha": "0c6b57c6170dd763e400e32392ce946ff6a86dfd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5623100304, "max_line_length": 74, "alphanum_fraction": 0.7195331274, "include": true, "reason": "import numpy,from scipy,import jax", "num_tokens": 2523}
|
"""
@file
@brief Shape object.
"""
import numpy
class BaseDimensionShape:
"""
Base class to @see cl DimensionObject,
@see cl ShapeOperator, @see cl ShapeObject.
"""
def to_string(self, use_x=True):
"""
Converts the object into a string.
"""
raise NotImplementedError()
def evaluate(self, **kwargs):
"""
Evaluates the object, reduces the expression
to a number or a string.
"""
raise NotImplementedError() # pragma: no cover
class ShapeOperator(BaseDimensionShape):
"""
Base class for all shapes operator.
"""
def __init__(self, name, fct, fct_string, *args):
"""
@param name display name of the operator
@param fct function doing the operator
if argument are numeric
@param fct_string function represented as a string
@param args argument of the operator
"""
self._name = name
self._fct = fct
self._fct_string = fct_string
self._args = args
for a in self._args:
if not isinstance(a, DimensionObject):
raise TypeError(
"All arguments must be of type DimensionObject not '{}'.".format(type(a)))
def __repr__(self):
"""
usual
"""
return "{0}('{1}', {2}, '{2}', {3})".format(
self.__class__.__name__, self._name,
self._fct_string, self._args)
def to_string(self, use_x=True):
"""
Displays as a string.
@return a string
"""
raise NotImplementedError( # pragma: no cover
"Operator '{}' does not implement 'to_string': {}.".format(
self.__class__.__name__, repr(self)))
def evaluate(self, **kwargs):
"""
Evalutes the operator.
@param kwargs value for the variables.
@return string or integer
"""
args = []
has_string = False
for a in self._args:
a = DimensionObject._same_(a)
v = a.evaluate(**kwargs)
if isinstance(v, str):
has_string = True
args.append(v)
if has_string:
res = self._evaluate_string_(args, **kwargs)
else:
try:
res = self._fct(*args)
except TypeError as e:
raise RuntimeError(
"Unable to evaluate operator {} due to {}".format(repr(self), e)) from e
return res
def _evaluate_string_(self, args, **kwargs):
"""
Evalutes the operator assuming some of them are still strings.
@param args arguments extracted by method *evaluate*
@param kwargs value for the variables.
@return string or integer
"""
raise NotImplementedError(
"This function must be overwritten.") # pragma: no cover
class ShapeBinaryOperator(ShapeOperator):
"""
Base class for shape binary operator.
"""
def __init__(self, name, fct, fct_string, x, y):
"""
@param name display name of the operator
@param fct function doing the operator
if argument are numeric
@param fct_string function represented as a string
@param x first argument
@param y second argument
"""
ShapeOperator.__init__(self, name, fct, fct_string, x, y)
if isinstance(x, tuple):
raise TypeError('x cannot be a tuple') # pragma: no cover
if isinstance(y, tuple):
raise TypeError('y cannot be a tuple') # pragma: no cover
def _to_string1(self, x, y):
return DimensionObject(self._fct(x._dim, y._dim)).to_string()
def _to_string2(self, x, y):
return DimensionObject("{}{}{}".format(x._dim, self._name, y._dim)).to_string()
def _to_string2b(self, x, y):
return DimensionObject("({}){}({})".format(x._dim, self._name, y._dim)).to_string()
def _to_string3(self, x):
return DimensionObject("{}{}x".format(x._dim, self._name)).to_string()
def to_string(self, use_x=True):
"""
Applies binary operator to a dimension.
@param use_x use `'x'` if dimension is unknown
@return a string
"""
x, y = self._args # pylint: disable=W0632
if isinstance(x._dim, int):
if isinstance(y, DimensionObject):
if isinstance(y._dim, int):
return self._to_string1(x, y)
if isinstance(y._dim, str):
return self._to_string2(x, y)
if y._dim is None:
if use_x:
return self._to_string3(x)
return DimensionObject("{}{}DimensionObject()".format(
x._dim, self._name)).to_string()
raise TypeError( # pragma: no cover
"Unable to handle type '{}'.".format(type(y._dim)))
raise TypeError( # pragma: no cover
"Unable to handle type '{}'.".format(type(y)))
elif isinstance(x._dim, str):
if isinstance(y._dim, int):
return self._to_string2(x, y)
if isinstance(y._dim, str):
return self._to_string2b(x, y)
raise TypeError( # pragma: no cover
"Unable to handle type '{}'.".format(type(y._dim)))
raise TypeError( # pragma: no cover
"Unable to handle type '{}'.".format(type(x._dim)))
def _evaluate_string_(self, args, **kwargs):
"""
Evalutes the operator assuming some of them are still strings.
@param args arguments extracted by method *evaluate*
@param kwargs value for the variables.
@return string or integer
"""
return self._name.join(map(lambda s: '({})'.format(s), args))
class ShapeBinaryFctOperator(ShapeBinaryOperator):
"""
Base class for shape binary operator defined by a function.
"""
def _to_string2(self, x, y):
return DimensionObject("{}({},{})".format(self._name, x._dim, y._dim)).to_string()
def _to_string2b(self, x, y):
return DimensionObject("{}({},{})".format(self._name, x._dim, y._dim)).to_string()
def _to_string3(self, x):
return DimensionObject("{}({},x)".format(self._name, x._dim)).to_string()
def _evaluate_string_(self, args, **kwargs):
"""
Evalutes the operator assuming some of them are still strings.
@param args arguments extracted by method *evaluate*
@param kwargs value for the variables.
@return string or integer
"""
return "{}({})".format(self._name, ",".join(map(str, args)))
class ShapeOperatorAdd(ShapeBinaryOperator):
"""
Shape addition.
"""
def __init__(self, x, y):
ShapeBinaryOperator.__init__(
self, '+', lambda a, b: a + b, 'lambda a, b: a + b', x, y)
def __repr__(self):
"""
Displays a string.
@return a string
"""
return "{0}({1}, {2})".format(
self.__class__.__name__, repr(self._args[0]), repr(self._args[1]))
class ShapeOperatorMul(ShapeBinaryOperator):
"""
Shape multiplication.
"""
def __init__(self, x, y):
ShapeBinaryOperator.__init__(
self, '*', lambda a, b: a * b, 'lambda a, b: a * b', x, y)
def __repr__(self):
"""
Displays a string.
@return a string
"""
return "{0}({1}, {2})".format(
self.__class__.__name__, repr(self._args[0]), repr(self._args[1]))
class ShapeOperatorGreater(ShapeBinaryOperator):
"""
Shape comparison.
"""
def __init__(self, x, y):
ShapeBinaryOperator.__init__(
self, '>', lambda a, b: a > b, 'lambda a, b: a > b', x, y)
def __repr__(self):
"""
Displays a string.
@return a string
"""
return "{0}({1}, {2})".format(
self.__class__.__name__, repr(self._args[0]), repr(self._args[1]))
class ShapeOperatorMax(ShapeBinaryFctOperator):
"""
Best on each dimension.
"""
def __init__(self, x, y):
ShapeBinaryFctOperator.__init__(
self, 'max', lambda a, b: max(a, b), 'max(a, b)', x, y)
def __repr__(self):
"""
Displays a string.
@return a string
"""
return "{0}({1}, {2})".format(
self.__class__.__name__, repr(self._args[0]), repr(self._args[1]))
class DimensionObject(BaseDimensionShape):
"""
One dimension of a shape.
"""
def __init__(self, obj):
"""
@param obj int or @see cl DimensionObject or None to
specify something unknown
"""
if obj is None or obj == 0 or obj == '?':
self._dim = None
elif isinstance(obj, (int, str, ShapeOperator, DimensionObject,
numpy.int32, numpy.int64)):
self._dim = obj
else:
raise TypeError("Unexpected type for obj: {}".format(type(obj)))
@property
def dim(self):
"""
Returns the dimension.
"""
return self._dim
def __repr__(self):
"""
usual
"""
if isinstance(self._dim, int):
return "DimensionObject({})".format(self._dim)
if isinstance(self._dim, DimensionObject):
return repr(self._dim)
if isinstance(self._dim, ShapeOperator):
return "DimensionObject({})".format(repr(self._dim))
return "DimensionObject('{}')".format(self._dim)
@staticmethod
def _same_(obj):
"""
Returns *obj* if *obj* is @see cl DimensionObject
otherwise converts it.
"""
if isinstance(obj, DimensionObject):
return obj
return DimensionObject(obj)
def to_string(self, use_x=True):
"""
Represents the dimension as a string.
"""
if isinstance(self._dim, int):
return '{}'.format(self._dim)
if isinstance(self._dim, ShapeOperator):
return self._dim.to_string()
if isinstance(self._dim, str):
return self._dim
if self._dim is None:
return 'x' if use_x else '?'
raise NotImplementedError( # pragma: no cover
"Not implemented for '{}'.".format(repr(self)))
def evaluate(self, **kwargs):
"""
Evalutes the dimension.
@param kwargs value for the variables.
@return string or integer
"""
if isinstance(self._dim, (int, ShapeOperator, DimensionObject)):
res = self._dim
elif isinstance(self._dim, str):
if self._dim in kwargs:
res = kwargs[self._dim]
else:
res = self._dim
elif self._dim is None:
pref = str(hex(id(self)))[2:]
res = "n{}".format(pref)
elif isinstance(self._dim, ):
res = self._dim.evaluate(**kwargs)
else:
raise NotImplementedError( # pragma: no cover
"Not implemented for '{}'.".format(repr(self)))
if isinstance(res, (ShapeOperator, DimensionObject)):
return res.evaluate(**kwargs)
return res
def __eq__(self, v):
"""
usual
"""
if isinstance(v, (int, str)):
return self._dim == v
if isinstance(v, DimensionObject):
return v == self._dim
if isinstance(v, ShapeOperator):
ve = v.evaluate()
return ve == self._dim
if v is None:
return self._dim is None
raise TypeError( # pragma: no cover
"Unable to compare a DimensionObject to {}".format(type(v)))
def __add__(self, obj):
"""
usual
"""
return DimensionObject(
ShapeOperatorAdd(self, DimensionObject._same_(obj)))
def __mul__(self, obj):
"""
usual
"""
return DimensionObject(
ShapeOperatorMul(self, DimensionObject._same_(obj)))
def __gt__(self, obj):
"""
usual
"""
if obj is None:
return not isinstance(self._dim, int)
if isinstance(self._dim, int) and isinstance(obj._dim, int):
return self._dim > obj._dim
return DimensionObject(
ShapeOperatorGreater(self, DimensionObject._same_(obj)))
class ShapeObject(BaseDimensionShape):
"""
Handles mathematical operations around shapes.
It stores a type (:epkg:`numpy` type),
and a name to somehow have an idea of where
the shape comes from in the :epkg:`ONNX` graph.
The shape itself is defined by a list of
@see cl DimensionObject or @see cl ShapeOperator
or *None* if the shape is unknown. A dimension is an
integer or a variable encoded as a string. This variable
is a way to tell the dimension may vary.
.. runpython::
:showcode:
import numpy
from mlprodict.onnxrt.shape_object import ShapeObject
sh1 = ShapeObject((1, 2), dtype=numpy.float32)
sh2 = ShapeObject((45, 2), dtype=numpy.float32)
mx = max(sh1, sh2)
print(mx)
sh1 = ShapeObject((1, 2), dtype=numpy.float32)
sh2 = ShapeObject((None, 2), dtype=numpy.float32)
print(sh2)
mx = max(sh1, sh2)
print(mx.to_string())
sh1 = ShapeObject((1, 2), dtype=numpy.float32)
sh2 = ShapeObject(('n', 2), dtype=numpy.float32)
print(sh2)
mx = max(sh1, sh2)
print(mx.evaluate(n=4))
"""
def __init__(self, shape, dtype=None, use_n1=False, name=None):
"""
@param shape tuple or `numpy.array`
@param dtype dtype
@param use_n1 use `'n'` if the first dimension is unknown
@param name optional, for debugging purposes
"""
self.name = name
if isinstance(shape, numpy.ndarray):
self._shape = [DimensionObject(s) for s in shape.shape]
self._dtype = shape.dtype
elif isinstance(shape, dict) and 'type' in shape:
tshape = shape['type']
if tshape['kind'] == 'tensor':
if tshape['shape'] == ('?', ):
self._shape = None
else:
self._shape = [DimensionObject(s) for s in tshape['shape']]
self._dtype = tshape['elem']
elif tshape['kind'] == 'map':
self._shape = []
self._dtype = 'map'
else:
raise ValueError( # pragma: no cover
"Wrong shape value {}".format(shape))
elif isinstance(shape, (tuple, list)):
self._shape = []
for s in shape:
self._shape.append(DimensionObject(s))
self._dtype = dtype
elif shape is None:
# shape is unknown
self._shape = None
self._dtype = dtype
else:
raise TypeError( # pragma: no cover
"Unexpected type for shape: {}".format(type(shape)))
if self._dtype is None:
raise ValueError(
"dtype cannot be None, shape type is {}\n{}".format(
type(shape), shape))
if self._dtype in (float, 'double'):
self._dtype = numpy.float64
elif self._dtype in ('float32', 'float'):
self._dtype = numpy.float32
elif self._dtype in ('int32', ):
self._dtype = numpy.int32
elif self._dtype in (int, 'int', 'int64'):
self._dtype = numpy.int64
elif self._dtype in (str, 'str'):
self._dtype = numpy.str
elif (hasattr(self._dtype, 'type') and self._dtype.type is numpy.string_):
pass
elif self._dtype in (bool, 'bool'):
self._dtype = numpy.bool
elif self._dtype in (object, numpy.object_):
pass
elif self._dtype in (numpy.int8, 'int8', ):
self._dtype = numpy.int8
elif self._dtype in (numpy.uint8, 'uint8', ):
self._dtype = numpy.uint8
elif self._dtype not in {
numpy.float32, numpy.float64, numpy.int32, numpy.int64,
numpy.str, numpy.bool, None,
'map'}:
raise ValueError( # pragma: no cover
"dtype has an unexpected value: '{}'.".format(self._dtype))
if self._shape is not None:
for i, a in enumerate(self._shape):
if not isinstance(a, DimensionObject):
raise TypeError( # pragma: no cover
'Dimension {} has a wrong type {}'.format(
i, type(a)))
if use_n1:
sh = self._shape[0] if self._shape else None
if isinstance(sh, DimensionObject) and sh._dim is None:
sh._dim = 'n'
def reshape(self, shape):
"""
Creates a new shape, checks the number of elements is the same.
"""
sh = ShapeObject(shape, self.dtype, getattr(self, '_dim', None),
self.name)
p1 = self.product().evaluate()
p2 = sh.product().evaluate()
if isinstance(p1, int) and p1 != p2:
raise ValueError("Shape {} cannot be reshaped into {} "
"(p1={}, p2={}).".format(sh, shape, p1, p2))
return sh
def copy(self, dtype=None, name=None):
"""
A copy not a deepcopy.
@param dtype None or a value to rewrite the type.
@param name overwrites the name
@return @see cl ShapeObject
"""
if self._shape is None:
return ShapeObject(None, dtype=self.dtype, name=name or self.name)
return ShapeObject(self._shape.copy(),
self.dtype if dtype is None else dtype,
name=name or self.name)
def __getitem__(self, index):
"""
Extracts a specific dimension.
"""
if self._shape is None:
return None
if isinstance(index, int) and index >= len(self._shape):
return 1
return self._shape[index]
def __setitem__(self, index, value):
"""
Changes a specific dimension.
"""
if self._shape is None:
return
while len(self._shape) <= index:
self._shape.append(DimensionObject(1))
self._shape[index] = value
@property
def shape(self):
"""
Returns the stored shape.
"""
if self._shape is None:
return None
return tuple(self._shape)
def __len__(self):
"""
Returns the number of dimensions.
"""
if self._shape is None:
return 0
return len(self._shape)
@property
def dtype(self):
"""
Returns the stored *dtype*.
"""
return self._dtype
def reduce(self, axis=1, keepdims=False, dtype=None):
"""
Reduces the matrix. Removes one dimension.
@param axis axis
@param keepdims keep dimensions, replaces the removed
dimension by 1
@param dtype if not None, changes the type
@return new dimension
"""
if self._shape is None:
if self.name is None:
return self.copy()
return self.copy(name="{}-RD".format(self.name))
if 0 <= axis < len(self._shape):
cp = self._shape.copy()
if keepdims:
cp[axis] = DimensionObject(1)
else:
del cp[axis]
return ShapeObject(cp, self._dtype if dtype is None else dtype,
name="{}-RD".format(self.name))
raise IndexError("axis={} is wrong, shape is {}-tuple and equal to "
"{}".format(axis, len(self._shape), self))
def __repr__(self):
"""
usual
"""
st = str(self.dtype)
if "'" in st:
st = st.split("'")[1]
if self.shape is None:
if self.name is None:
return "ShapeObject(None, dtype={})".format(st)
return "ShapeObject(None, dtype={}, name='{}')".format(st, self.name)
st_shape = []
for s in self.shape:
if isinstance(s._dim, (int, str)):
st_shape.append(str(s._dim))
else:
st_shape.append(repr(s))
if len(st_shape) == 1:
st_shape.append('')
st_shape = '({})'.format(", ".join(st_shape))
if self.name is None:
return "ShapeObject({}, dtype={})".format(st_shape, st)
return "ShapeObject({}, dtype={}, name='{}')".format(
st_shape, st, self.name)
def __iter__(self):
"""
Iterators over dimensions.
"""
if self._shape is not None:
for d in self._shape:
yield d
def __gt__(self, a):
"""
Compares shapes. Operator ``>``.
"""
if isinstance(a, tuple):
a = ShapeObject(a, dtype=self._dtype)
if self._shape is None and a._shape is None:
return False
if self._shape is None:
return True
if a._shape is None:
return False
if len(self) > len(a):
return True
if len(self) < len(a):
return False
for d1, d2 in zip(self, a):
if d1 > d2:
return True
if d1 < d2:
return False
return False
def __eq__(self, a):
"""
Tests equality between two shapes.
"""
if isinstance(a, tuple):
a = ShapeObject(a, dtype=self._dtype)
if self._shape is None and a._shape is None:
return True
if self._shape is None or a._shape is None:
return False
if len(self) != len(a):
return False
for d1, d2 in zip(self, a):
if d1 == d2:
continue
return False
return True
def evaluate(self, **kwargs):
"""
Evaluates the shape.
"""
vs = []
for v in self:
d = v.evaluate(**kwargs)
vs.append(d)
return ShapeObject(tuple(vs), self._dtype, name="{}-EV".format(self.name))
def to_string(self, use_x=False):
"""
Converts shapes into a string.
"""
shapes = []
for a in self._shape:
shapes.append(a.to_string(use_x=use_x))
return '({})'.format(', '.join(shapes))
def product(self):
"""
Multiplies all the dimension.
@return @see cl DimensionObject
"""
cl = self[0]
for i in range(1, len(self)):
cl = cl * self[i]
return cl
def append(self, dim):
"""
Appends a dimension.
"""
if self._shape is None:
return
if isinstance(dim, DimensionObject):
self._shape.append(dim)
else:
self._shape.append(DimensionObject(dim))
def insert(self, dim, pos=0):
"""
Inserts a dimension at position *pos*.
"""
if self._shape is None:
return
if isinstance(dim, DimensionObject):
self._shape.insert(pos, dim)
else:
self._shape.insert(pos, DimensionObject(dim))
def squeeze(self, axis):
"""
Removes one dimension.
"""
cp = self.copy(name='{}-SZ'.format(self.name))
cp.drop_axis(axis)
return cp
def unsqueeze(self, axes):
"""
Adds dimensions.
"""
cp = self
name = '{}-USZ'.format(self.name)
for ax in axes[::-1]:
cp = cp.copy(name=name)
cp.insert(ax, 1)
return cp
def transpose(self, perm):
"""
Removes one dimension.
"""
if self.shape is None:
return self.copy(name='{}-TR'.format(self.name))
cp = ShapeObject([None for p in perm], dtype=self.dtype,
name="{}-TR".format(self.name))
for i, p in enumerate(perm):
if p >= len(self):
# This should not happen.
cp._shape[i] = None
else:
cp._shape[i] = self._shape[p]
return cp
def drop_axis(self, axis):
"""
Drops an axis.
"""
if self._shape is not None:
if isinstance(axis, (tuple, list)):
for i in sorted(axis, reverse=True):
del self._shape[i]
else:
del self._shape[axis]
def broadcast(self, a):
"""
Computes the shape after a broadcast.
"""
if a is None:
raise ValueError("a should not be None") # pragma: no cover
if a._shape is None:
return a.copy()
if self._shape is None:
return self.copy()
mx = max(len(self._shape), len(a._shape))
res = []
for i in range(mx):
if i < len(self._shape):
if i < len(a._shape):
res.append(ShapeOperatorMax(self[i], a[i]))
else:
res.append(self[i])
else:
res.append(a[i])
return ShapeObject(tuple(res), self.dtype, False,
name="broadcast-{}-{}".format(self.name, a.name))
@staticmethod
def _infer_merged_type(*args):
tys = set(a.dtype for a in args)
if len(tys) == 1:
return list(tys)[0]
if any(tys & {numpy.float64, numpy.int64,
numpy.float32, numpy.int32}):
return numpy.float64
raise RuntimeError( # pragma: no cover
"Unable to infer types based on {}.".format(tys))
def concat_columns(self, axis, *shapes):
"""
Concatenates columns from *shapes* to this one
along one axis.
"""
args = [self] + list(shapes)
dtype = self._infer_merged_type(*args)
dim_axis = args[0][axis]
if dim_axis is None:
return ShapeObject(None, dtype=dtype)
for a in shapes:
if a[axis] is None:
return ShapeObject(None, dtype=dtype)
dim_axis = dim_axis + a[axis]
a0 = args[0].copy(dtype=dtype)
a0[axis] = dim_axis
return a0
@staticmethod
def einsum_shape(equation, *inputs):
"""
Computes :epkg:`einsum` shapes.
Not the most efficient one as it creates variables
of the given shapes.
"""
for inp in inputs:
if inp.shape is None:
return inp
inp, out = [_.strip() for _ in equation.split(b"->")]
inps = [_.strip() for _ in inp.split(b',')]
if len(inputs) != len(inps):
raise RuntimeError( # pragma: no cover
"Input mismatch between '{}' and {}.".format(equation, inps))
shs = {}
for a, b in zip(inps, inputs):
if len(a) != len(b):
raise RuntimeError( # pragma: no cover
"Input mismatch '{}' (in '{}') and {}.".format(a, equation, b))
for c, s in zip(a, b):
if c not in shs:
shs[c] = s
elif shs[c] != s:
raise RuntimeError( # pragma: no cover
"Equation '{}'. Dimension mismatch '{}' != {}.".format(
equation, s, shs[c]))
new_shape = [shs[i] for i in out]
return ShapeObject(new_shape, dtype=ShapeObject._infer_merged_type(*inputs))
@staticmethod
def gather_shape(input, indices, axis):
"""
Computes Gather shapes.
"""
input_rank = len(input)
if input_rank is None:
return ShapeObject(None, dtype=input._dtype)
index_rank = len(indices)
if index_rank is None:
return ShapeObject(None, dtype=input._dtype)
if axis < 0:
axis = input_rank + axis
shape = []
for i in range(axis):
shape.append(input[i])
for dim in indices:
shape.append(dim)
for i in range(axis + 1, input_rank):
shape.append(input[i])
return ShapeObject(shape, dtype=input._dtype)
class ShapeObjectFct(ShapeObject):
"""
Computes a shape depending on a user defined function.
See @see cl Conv for an example.
"""
def __init__(self, fct, *shapes, dtype=None, name=None):
"""
@param fct function
@param shapes shapes sent to fct
@param dtype dtype
@param name optional, for debugging purposes
"""
ShapeObject.__init__(self, None, dtype=dtype, name=name)
self._fct = fct
self._shapes = shapes
def evaluate(self, **kwargs):
"""
Evaluates the shape.
"""
vs = []
for v in self._shapes:
d = v.evaluate(**kwargs)
vs.append(d)
res = self._fct(*vs)
if self.name is not None:
res.name = self.name
return res
|
{"hexsha": "d223d40bfc40a1d73ae541b43f2d86c830d70e77", "size": 29965, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlprodict/onnxrt/shape_object.py", "max_stars_repo_name": "xadupre/mlprodict", "max_stars_repo_head_hexsha": "f82c8a26a60104948c67849b1c4af95ca812c153", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-18T03:49:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-18T03:49:53.000Z", "max_issues_repo_path": "mlprodict/onnxrt/shape_object.py", "max_issues_repo_name": "xadupre/mlprodict", "max_issues_repo_head_hexsha": "f82c8a26a60104948c67849b1c4af95ca812c153", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mlprodict/onnxrt/shape_object.py", "max_forks_repo_name": "xadupre/mlprodict", "max_forks_repo_head_hexsha": "f82c8a26a60104948c67849b1c4af95ca812c153", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0138888889, "max_line_length": 94, "alphanum_fraction": 0.5168696813, "include": true, "reason": "import numpy", "num_tokens": 6788}
|
\section{Efficient Implementation on ARM8}
\label{sec:arm}
We show that our semantics compiles efficiently to \armeight{}
\cite{deacon-git,DBLP:journals/pacmpl/PulteFDFSS18}. With one exception, we use the translation
strategy of \citet{DBLP:journals/pacmpl/PodkopaevLV19}, which was extended to
SC access by \citet[\textsection5]{imm-sc}: Relaxed access is implemented using
\texttt{ldr}/\texttt{str}, non-relaxed reads using \texttt{dmb}.\texttt{sy};\texttt{ldar}, non-relaxed
writes using \texttt{stlr},
acquire and other fences using
\texttt{dmb}.\texttt{ld}/\texttt{dmb}.\texttt{sy}.
We consider the fragment of our language where concurrent composition occurs
only at top level and there are no local declarations of the form
$(\VAR\aLoc\SEMI \aCmd)$. We show that any \emph{consistent} \armeight{}
execution graph for this sublanguage can be considered a top-level execution
of our semantics. The key step is constructing the order for the derived
pomset candidate. We would like to take ${\gtN} = ({\rob} \cup {\reco})^*$,
where $\rob$ is the \armeight{} acyclicity relation, and ${\reco}$ is the
\armeight{} extended coherence order. But this does not quite work.
The definition is complicated by \armeight's \emph{internal reads}, manifest
in ${\rrfi}$, which relates reads to writes that are fulfilled by the same
thread. \armeight{} drops $\rob$-order \emph{into} an internal read. As
discussed in \textsection\ref{sec:litmus}, however, our semantics drops pomset
order \emph{out of} an internal read. To accommodate this, we drop these
dependencies from the \armeight{} \emph{dependency order before} ($\rdob$)
relation.
The relation ${\rdobi}$ is defined from ${\rdob}$ by restricting the order
into and out of a read that is in the codomain of the $\rrfi$ relation. More
formally, let $\bEv\xdobi\aEv$ when $\bEv\xdob\aEv$ and
$\bEv\notin\fcodom(\rrfi), \aEv \notin\fcodom(\rrfi)$.
Let $\robi$ be defined as for $\rob$, simply replacing $\rdob$ with $\rdobi$.
We remove all relaxed internal reads from the event set.
For pomset order, we then take ${\gtN}=({\robi}\cup{\reco})^*$.
\begin{theorem}
For any consistent \armeight{} execution graph, the constructed candidate
is a top-level memory model pomset.
\end{theorem}
The proof for compilation into \tso\ is very similar, and also requires a
full fence before an acquiring read. The necessary
properties hold for \tso, where $\rob$ is replaced by (the transitive closure
of) the \tso\ propagation relation \citep{alglave}.
It is worth noting that efficient
compilation is not possible for the earlier Flowing and Pop model
\cite{DBLP:conf/popl/FlurGPSSMDS16}, referenced in
\cite[Fig.~4]{DBLP:conf/fm/LahavV16}, which allows the following:
\begin{gather*}
\begin{gathered}
r\GETS x\SEMI x\GETS 1
\PAR
y\GETS x
\PAR
x\GETS y
\\[-1.2ex]
\hbox{\begin{tikzinline}[node distance=1.5em]
\event{a}{\DR{x}{1}}{}
\event{b}{d:\DW{x}{1}}{right=of a}
\wk{a}{b}
\event{c}{\DR{x}{1}}{right=3em of b}
\event{d}{\DW{y}{1}}{right=of c}
\po{c}{d}
\event{e}{\DR{y}{1}}{right=3em of d}
\event{f}{e:\DW{x}{1}}{right=of e}
\po{e}{f}
\rf{b}{c}
\rf{d}{e}
\rf[out=172,in=8]{f}{a}
\end{tikzinline}}
\end{gathered}
\taglabel{MCA3}
\end{gather*}
This type of ``big detour'' \cite{alglave} is outlawed by
\armeight.\footnote{There is either a cycle
$\DR{x}{1}\xpoloc d\xcoe e \xrfex \DR{x}{1}$ % SC-pER-LOC
or % in ob
$d\xrfex \DR{x}{1}\xdata \DW{y}{1}\xrfex \DR{y}{1} \xdata e \xcoe d$.}
\section{Local Data Race Freedom and Sequential Consistency}
\label{sec:sc}
We adapt \citeauthor{Dolan:2018:BDR:3192366.3192421}'s
[\citeyear{Dolan:2018:BDR:3192366.3192421}] notion of \emph{Local Data Race
Freedom (LDRF)} to our setting.
The result requires that locations are properly initialized. We assume a
sufficient condition: that programs have the form
``$\aLoc_1\GETS\aVal_1\SEMI \cdots \aLoc_n\GETS\aVal_n\SEMI\aCmd$'' where
every location mentioned in $\aCmd$ is some $\aLoc_i$.
We make two further restrictions to simplify the exposition. To simplify the
definition of \emph{happens-before}, we ban fences and \RMWs. To simplify
the proof, we assume there are no local declarations of the form
$(\VAR\aLoc\SEMI \aCmd)$.
To state the theorem, we require several technical definitions. The reader
unfamiliar with \citep{Dolan:2018:BDR:3192366.3192421} may prefer to skip to
the examples in the proof sketch, referring back as needed.
\noparagraph{Definitions}
\paragraph{Data Race}
Data races are defined using \emph{program} order $(\rpox)$, not
\emph{pomset} order $(\le)$. %, and thus is stable with respect to augmentation.
In \ref{SB}, for example, $(\DR{x}{0})$ has an $x$-race with $(\DW{x}{1})$,
but not $(\DW{x}{0})$, which is $\rpox$-before it.
It is obvious how to enhance the semantics of prefixing and most other
operators to define $\rpox$. When combining pomsets using the conditional,
the obvious definition may result in cycles, since $\rpox$-ordered reads may
coalesce---see the discussion of \ref{CA} in \textsection\ref{sec:refine}. In
this case we include a separate pomset for each way of breaking these cycles.
Because we ignore the features of
\textsection\ref{sec:variants}, we can adopt the simplest definition of
\emph{synchronizes\hyp{}with}~($\rsw$): Let $\bEv\xsw\aEv$ exactly when
$\bEv$ fulfills $\aEv$, $\bEv$ is a release, $\aEv$ is an acquire, and
$\lnot(\bEv\xpox\aEv)$.
Let ${\rhb}=({\rpox}\cup{\rsw})^+$ be the \emph{happens-before} relation. In
\ref{Pub1}, for example, $(\DW{x}{1})$ happens-before $(\DR{x}{0})$, but this
fails if either $\mRA$ access is relaxed.
Let $L\subseteq\Loc$ be a set of locations. We say that $\bEv$ \emph{has an
$L$-race with} $\aEv$ (notation $\bEv\lrace{L}\aEv$) when at least one is
relaxed, they \emph{conflict}
(Def.~\ref{def:rf}) at
some location in $L$, and they are unordered by $\rhb$: neither $\bEv\xhb\aEv$ nor
$\aEv\xhb\bEv$.
\paragraph{Generators}
We say that $\aPS'$ \emph{generates} $\aPS$ if either
$\aPS$ augments $\aPS'$ or $\aPS$ implies $\aPS'$. For example, the
unordered pomset $(\DR{x}{1})$ $(\DW{y}{1})$ generates the ordered pomset
$(\DR{x}{1})\xpo(\aReg=1\mid\DW{y}{1})$.
We say that $\aPS$ is a \emph{generation-minimal} in $\aPSS$ if $\aPS\in\aPSS$ and
there is no $\aPS\neq\aPS'\in\aPSS$ that generates $\aPS$.
Let $\semmin{\aCmd}=\{\aPS\in\sem{\aCmd} \mid \aPS$ is \emph{top-level}
(Def.~\ref{def:top}) and generation-minimal in $\sem{\aCmd}\}$.
\paragraph{Extensions}
We say that $\aPS'$ \emph{$\aCmd$-extends} $\aPS$ if %$\aPS\in\semmin{\aCmd}$,
$\aPS\neq\aPS'\in\semmin{\aCmd}$ and $\aPS$ is a downset of $\aPS'$.
\paragraph{Similarity}
We say that \emph{$\aPS'$ is $\aEv$-similar to $\aPS$} if they differ at most
in (1) pomset order adjacent to $\aEv$ and (2) the value associated with
event $\aEv$, if it is a
read. % We say they are \emph{similar} if they are $\aEv$-similar for some $\aEv$.
Formally: $\Event'=\Event$, $\labelingForm'=\labelingForm$,
${\le'}\restrict{\Event\setminus\{\aEv\}}={\le}\restrict{\Event\setminus\{\aEv\}}$,
if $\aEv$ is not a read then $\labelingAct'=\labelingAct$, and if $\aEv$ is a
read then
$\labelingAct'\restrict{\Event\setminus\{\aEv\}}=\labelingAct\restrict{\Event\setminus\{\aEv\}}$
and $\labelingAct'(\aEv) = \labelingAct(\aEv)[\aVal'/\aVal]$, for some
$\aVal'$, $\aVal$.
\paragraph{Stability}
We say that $\aPS$ is \emph{$L$-stable in $\aCmd$} if
(1) $\aPS\in\semmin{\aCmd}$,
(2) $\aPS$ is $\rpox$-convex (nothing missing in program order),
(3) there is no $\aCmd$-extension of $\aPS$ with a \emph{crossing} $L$-race:
that is, there is no $\bEv\in\Event$, no $\aPS'$ $\aCmd$-extending
$\aPS$, and no $\aEv\in\Event'\setminus\Event$ such that $\bEv\lrace{L}\aEv$.
The empty pomset is $L$-stable.
\paragraph{Sequentiality}
Let ${\pole{L}}={\lt_L}\cup{\rpox}$, where $\lt_L$ is the restriction of $\lt$ to events that access locations in $L$.
We say that $\aPS'$ is \emph{$L$-sequential after $\aPS$} if
$\aPS'$ is $\rpox$-convex and %
$\pole{L}$ is acyclic in $\Event'\setminus\Event$.
\noparagraph{Theorem and Proof Sketch}
\begin{theorem}
Let $\aPS$ be $L$-stable in $\aCmd$. Let $\aPS'$ be a $\aCmd$-extension of
$\aPS$ that is $L$-sequential after $\aPS$. Let $\aPS''$ be a
$\aCmd$-extension of $\aPS'$ that is $\rpox$-convex, such that no subset of
$\Event''$ satisfies these criteria.
Then either (1) $\aPS''$ is $L$-sequential after $\aPS$ or (2) there is
some $\aCmd$-extension $\aPS'''$ of $\aPS'$ and some
$\aEv\in(\Event''\setminus\Event')$ such that (a) $\aPS'''$ is
$\aEv$-similar to $\aPS''$, (b) $\aPS'''$ is $L$-sequential after $\aPS$,
and (c) $\bEv\lrace{L}\aEv$, for some $\bEv\in(\Event''\setminus\Event)$.
\end{theorem}
The theorem provides an inductive characterization of \emph{Sequential
Consistency for Local
Data-Race Freedom (SC-LDRF)}: Any extension of a $L$-stable pomset is either
$L$-sequential, or is $\aEv$-similar to a $L$-sequential extension that
includes a race involving $\aEv$.
\begin{proof}[Proof Sketch]
In order to develop a technique to find $\aPS'''$ from $\aPS''$, we analyze
pomset order in generation-minimal top-level pomsets. First, we note that
$\le_*$ (the transitive reduction $\le$) can be decomposed into three
disjoint relations. Let ${\rppo}=({\le_*}\cap{\rpox})$ denote
\emph{preserved} program order, as required by prefixing (Def.~\ref{def:prefix}). The other two relations are cross-thread subsets of
$({\le_*}\setminus{\rpox})$, as required by fulfillment (Def.~\ref{def:rf}): $\rrfe$ orders writes before reads, satisfying fulfillment
requirement \ref{rf3}; $\rxw$ orders read and write accesses before writes,
satisfying requirement \ref{rf4}. ({Within a thread, \ref{rf3} and
\ref{rf4} follow from prefixing requirement \ref{5b}, which is included
in ${\rppo}$.})
Using this decomposition, we can show the following.
\begin{lemma}
Suppose $\aPS''\in\semmin{\aCmd}$ has a read $\aEv$ that is maximal in
$({\rppo}\cup{\rrfe})$ and such that every $\rpox$-following read is
also $\le$-following ($\aEv\xpox\bEv$ implies $\aEv\le\bEv$, for every
read $\bEv$). Further, suppose there is an $\aEv$-similar $\aPS'''$
that satisfies the requirements of fulfillment. Then
$\aPS'''\in\semmin{\aCmd}$.
\end{lemma}
The proof of the lemma follows an inductive construction of
$\semmin{\aCmd}$, starting from a large set with little order, and
pruning the set as order is added: We begin with all pomsets generated by
the semantics without imposing the requirements of fulfillment (including
only $\rppo$). We then prune reads which cannot be fulfilled, starting
with those that are minimally ordered. This proof is simplified by
precluding local declarations.
We can prove a similar result for $({\rpox}\cup{\rrfe})$-maximal read
and write accesses.
Turning to the proof of the theorem, if $\aPS''$ is $L$-sequential after
$\aPS$, then the result follows from (1). Otherwise, there must be a
$\pole{L}$ cycle in $\aPS''$ involving all of the actions in
$(\Event''\setminus\Event')$: If there were no such cycle, then $\aPS''$
would be $L$-sequential; if there were elements outside the cycle, then
there would be a subset of $\Event''$ that satisfies these criteria.
If there is a $({\rpox}\cup{\rrfe})$-maximal access, we select one of
these as $\aEv$. If $\aEv$ is a write, we reverse the outgoing order in
$\rxw$; the ability to reverse this order witnesses the race. If $\aEv$
is a read, we switch its fulfilling write to a ``newer'' one, updating
$\rxw$; the ability to switch witnesses the race. For
example, for $\aPS''$ on the left below, we choose the $\aPS'''$ on the
right; $\aEv$ is the read of $x$, which races with $(\DW{x}{1})$. % Program order
\begin{gather*}
x\GETS 0 \SEMI y\GETS 0 \SEMI (x \GETS 1 \SEMI y \GETS 1
\PAR
\IF{y}\THEN \aReg \GETS x \FI)
\\[-.5ex]
\hbox{\begin{tikzinline}[node distance=1.5em and 2em]
\event{wy0}{\DW{y}{0}}{}
\event{wx0}{\DW{x}{0}}{below=of wy0}
\event{wx1}{\DW{x}{1}}{right=3em of wy0}
\event{wy1}{\DW{y}{1}}{right=of wx1}
\event{ry1}{\DR{y}{1}}{below=of wx1}
\event{rx}{\DR{x}{0}}{below=of wy1}
\rf[bend right]{wx0}{rx}
\rf{wy1}{ry1}
\wk[bend left]{wy0}{wy1}
\pox{wx1}{wy1}
\pox{ry1}[below]{rx}
\wk{rx}{wx1}
\node(ix)[left=of wx0]{};
\node(iy)[left=of wy0]{};
\bgoval[yellow!50]{(ix)(iy)}{P}
\bgoval[pink!50]{(wx0)(wy0)}{P'\setminus P}
\bgoval[green!10]{(ry1)(wx1)(rx)(wy1)}{P''\setminus P'}
\pox{wx0}{wy0}
\pox{wy0}{wx1}
\pox{wy0}[below]{ry1}
\end{tikzinline}}
\qquad
\hbox{\begin{tikzinline}[node distance=1.5em and 2em]
\event{wy0}{\DW{y}{0}}{}
\event{wx0}{\DW{x}{0}}{below=of wy0}
\event{wx1}{\DW{x}{1}}{right=3em of wy0}
\event{wy1}{\DW{y}{1}}{right=of wx1}
\event{ry1}{\DR{y}{1}}{below=of wx1}
\event{rx}{\DR{x}{1}}{below=of wy1}
\rf{wx1}{rx}
\rf{wy1}{ry1}
\wk[bend left]{wy0}{wy1}
\pox{wx1}{wy1}
\pox{ry1}[below]{rx}
\wk{wx0}{wx1}
\node(ix)[left=of wx0]{};
\node(iy)[left=of wy0]{};
\bgoval[yellow!50]{(ix)(iy)}{P}
\bgoval[pink!50]{(wx0)(wy0)}{P'\setminus P}
\bgoval[green!10]{(ry1)(wx1)(rx)(wy1)}{P'''\setminus P'}
\pox{wx0}{wy0}
\pox{wy0}{wx1}
\pox{wy0}[below]{ry1}
\end{tikzinline}}
\end{gather*}
It is important that $\aEv$ be $({\rpox}\cup{\rrfe})$-maximal, not just
$({\rppo}\cup{\rrfe})$-maximal. The latter criterion would allow us to
choose $\aEv$ to be the read of $y$, but then there would be no
$\aEv$-similar pomset: if an execution reads $0$ for $y$ then there is no
read of $x$, due to the conditional.
If there is no $({\rpox}\cup{\rrfe})$-maximal access, then all
cross-thread order must be from $\rrfe$. In this case, we select a
$({\rppo}\cup{\rrfe})$-maximal read, switching its fulfilling write to an
``older'' one. As an example, consider the following; once again,
$\aEv$ is the read of $x$, which races with $(\DW{x}{1})$.
\begin{gather*}
x\GETS 0 \SEMI y\GETS 0 \SEMI (\aReg \GETS x \SEMI y \GETS 1
\PAR
\bReg \GETS y \SEMI x \GETS \bReg)
\\[-.5ex]
\hbox{\begin{tikzinline}[node distance=1.5em and 2em]
\event{wx0}{\DW{y}{0}}{}
\event{ry}{\DR{x}{1}}{right=3em of wx0}
\event{wx1}{\DW{y}{1}}{right=of ry}
\event{wy0}{\DW{x}{0}}{below=of wx0}
\event{rx1}{\DR{y}{1}}{right=3em of wy0}
\event{wy1}{\DW{x}{1}}{right=of rx1}
\rf{wx1}{rx1}
\rf{wy1}{ry}
\po{rx1}{wy1}
\pox{ry}{wx1}
\wk[bend left]{wx0}{wx1}
\wk[bend right]{wy0}{wy1}
\node(ix)[left=of wx0]{};
\node(iy)[left=of wy0]{};
\bgoval[yellow!50]{(ix)(iy)}{P}
\bgoval[pink!50]{(wx0)(wy0)}{P'\setminus P}
\bgoval[green!10]{(ry)(wx1)(rx1)(wy1)}{P''\setminus P'}
\pox{wy0}{wx0}
\pox{wx0}{ry}
\pox{wx0}[below]{rx1}
\end{tikzinline}}
\qquad
\hbox{\begin{tikzinline}[node distance=1.5em and 2em]
\event{wx0}{\DW{y}{0}}{}
\event{ry}{\DR{x}{0}}{right=3em of wx0}
\event{wx1}{\DW{y}{1}}{right=of ry}
\event{wy0}{\DW{x}{0}}{below=of wx0}
\event{rx1}{\DR{y}{1}}{right=3em of wy0}
\event{wy1}{\DW{x}{1}}{right=of rx1}
\pox{ry}{wx1}
\wk[bend left]{wx0}{wx1}
\rf{wx1}{rx1}
\rf{wy0}{ry}
\po{rx1}{wy1}
\wk{ry}{wy1}
\node(ix)[left=of wx0]{};
\node(iy)[left=of wy0]{};
\bgoval[yellow!50]{(ix)(iy)}{P}
\bgoval[pink!50]{(wx0)(wy0)}{P'\setminus P}
\bgoval[green!10]{(ry)(wx1)(rx1)(wy1)}{P'''\setminus P'}
\pox{wy0}{wx0}
\pox{wx0}{ry}
\pox{wx0}[below]{rx1}
\end{tikzinline}}
\end{gather*}
This example requires $(\DW{x}{0})$. Proper initialization ensures the
existence of such ``older'' writes.
\end{proof}
\noparagraph{Mixed Races}
The premises of the theorem allow us to avoid the complications caused by ``mixed races'' in
\cite{DBLP:conf/ppopp/DongolJR19}. In the left pomset below, $\aPS''$ is not
an extension of $\aPS'$, since $\aPS'$ is not a downset of $\aPS''$.
When considering this pomset, we must perform the decomposition on the right.
\begin{gather*}
(x\GETS 0 \SEMI x^\mRA \GETS 1)
\PAR
(\aReg\GETS x^\mRA)
\\[-2ex]
\hbox{\begin{tikzinline}[node distance=1.5em and 2em]
\event{wx0}{\DW{x}{0}}{}
\event{wx1}{\DWRel{x}{1}}{right=of wx0}
\event{rx}{\DRAcq{x}{0}}{below=of wx0}
\rf{wx0}{rx}
\pox{wx0}{wx1}
\wk{rx}{wx1}
\node(ix)[left=of wx0]{};
\bgoval[yellow!50]{(ix)}{P}
\bgoval[pink!50]{(wx0)(wx1)}{P'\setminus P}
\bgovalright[green!10]{(rx)}{P''\setminus P'}
\end{tikzinline}}
\qquad
\qquad
\qquad
\hbox{\begin{tikzinline}[node distance=1.5em and 2em]
\event{wx0}{\DW{x}{0}}{}
\event{wx1}{\DWRel{x}{1}}{right=of wx0}
\event{rx}{\DRAcq{x}{0}}{below=of wx0}
\rf{wx0}{rx}
\pox{wx0}{wx1}
\wk{rx}{wx1}
\node(ix)[left=of wx0]{};
\bgoval[yellow!50]{(ix)}{P}
\bgoval[pink!50]{(wx0)(rx)}{P'\setminus P}
\bgoval[green!10]{(wx1)}{P''\setminus P'}
\end{tikzinline}}
\end{gather*}
This affects the inductive order in which we move across pomsets, but does
not affect the set of pomsets that are considered. This simplification is
enabled by denotational reasoning.
\noparagraph{Comparison to Java}
In our language, past races are always resolved at a stable point, as in
\ref{Co3}. As another example, consider the following, which is disallowed
here, but allowed by Java \cite[Ex.~2]{Dolan:2018:BDR:3192366.3192421}. We
include an SC fence here to mimic the behavior of volatiles in the JMM.
\begin{gather*}
\taglabel{past}
\begin{gathered}
(x\GETS 1 \SEMI y^\mRA \GETS 1)
\PAR
(x\GETS 2 \SEMI \FENCE^\mSC \SEMI \IF{y^\mRA}\THEN r\GETS x \SEMI s\GETS x\FI)
\\[-2ex]
\hbox{\begin{tikzinline}[node distance=1.2em]
\event{wx1}{\DW{x}{1}}{}
\event{wy1}{\DWRel{y}{1}}{right=of wx0}
\sync{wx1}{wy1}
\event{wx2}{\DW{x}{2}}{right=3em of wy1}
\event{f}{\DFS{\mSC}}{right=of wx2}
\sync{wx2}{f}
\event{ry1}{\DR[\mRA]{y}{1}}{right=of f}
\sync{f}{ry1}
\event{rx1}{\DR{x}{1}}{right=3em of ry1}
\event{rx2}{\DR{x}{2}}{right=of rx1}
\sync{ry1}{rx1}
\sync[out=15,in=165]{ry1}{rx2}
\rf[out=10,in=170]{wy1}{ry1}
\wk[out=10,in=170]{wx1}{wx2}
\wk[out=-170,in=-10]{rx1}{wx2}
\bgellipsesmaller[yellow!50]{(wy1)(f)}{}
\end{tikzinline}}
\end{gathered}
\end{gather*}
The highlighted events are $L$-stable. The order from $(\DR{x}{1})$ to
$(\DW{x}{2})$ is required by fulfillment, causing the cycle. If the fence is
removed, there would be no order from $(\DW{x}{2})$ to
$(\DRAcq{y}{1})$, the highlighted events would no longer be $L$-stable, and
the execution would be allowed. This more relaxed notion of ``past'' is not
expressible using \citeauthor{Dolan:2018:BDR:3192366.3192421}'s
synchronization primitives.
The notion of ``future'' is also richer here. Consider \cite[Ex.~3]{Dolan:2018:BDR:3192366.3192421}:
\begin{gather*}
\taglabel{future}
\begin{gathered}
(r\GETS 1 \SEMI \REF{r}\GETS 42\SEMI s\GETS \REF{r}\SEMI x^\mRA\GETS r)
\PAR
(r\GETS x \SEMI \REF{r}\GETS 7)
\\[-1ex]
\hbox{\begin{tikzinline}[node distance=1.2em]
\event{a1}{\DW{\REF{1}}{42}}{}
\event{a2}{\DR{\REF{1}}{7}}{right=of a1}
\wk{a1}{a2}
\event{a4}{\DWRel{x}{1}}{right=of a2}
\sync{a2}{a4}
\event{b1}{\DR{x}{1}}{right=3em of a4}
\event{b2}{\DW{\REF{1}}{7}}{right=of b1}
\po{b1}{b2}
\rf{a4}{b1}
\wk[out=170,in=10]{b2}{a2}
\end{tikzinline}}
\end{gathered}
\end{gather*}
There is no interesting stable point here. The execution is disallowed
because of a read from the causal future. If we changed $x^\mRA$ to
$x^\mRLX$, then there would be no order from $(\DR{\REF{1}}{7})$ to
$(\DW[\mRLX]{x}{1})$, and the execution would be allowed. The distinction
between ``causal future'' and ``temporal future'' is not expressible in
\citeauthor{Dolan:2018:BDR:3192366.3192421}'s operational semantics.
Our definition of $L$-sequentiality does not quite correspond to SC
executions, since actions may be elided by read/write elimination
(\textsection\ref{sec:refine}). However, for any properly initialized
$L$-sequential pomset that uses elimination, there is larger $L$-sequential
pomset that does not use elimination. This can be shown inductively---in the
inductive step, writes that are introduced can be ignored by existing reads,
and reads that are introduced can be fulfilled, for some value, by some
preceding write.
|
{"hexsha": "52fc31ffdc222e480786eb6a26f666bfe524b9c7", "size": 21155, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "corrigendum/short.tex", "max_stars_repo_name": "chicago-relaxed-memory/memory-model", "max_stars_repo_head_hexsha": "fd606fdb6a04685d9bb0bee61a5641e4623b10be", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-08-13T02:36:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-25T12:46:13.000Z", "max_issues_repo_path": "corrigendum/short.tex", "max_issues_repo_name": "chicago-relaxed-memory/memory-model", "max_issues_repo_head_hexsha": "fd606fdb6a04685d9bb0bee61a5641e4623b10be", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "corrigendum/short.tex", "max_forks_repo_name": "chicago-relaxed-memory/memory-model", "max_forks_repo_head_hexsha": "fd606fdb6a04685d9bb0bee61a5641e4623b10be", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.1649269311, "max_line_length": 137, "alphanum_fraction": 0.6432994564, "num_tokens": 7596}
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
plt.rcParams['font.sans-serif'] = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False
def make_a_figure():
data = np.arange(10)
p = plt.figure(figsize=(8, 6))
plt.title('line')
plt.xlabel('X')
plt.xlabel('Y')
plt.xlim(0, 5)
plt.ylim(0, 100)
plt.xticks(range(0, 12, 2))
plt.yticks(range(0, 120, 20))
plt.plot(data, data) # y=x
plt.plot(data, data ** 2) # y=x*x
plt.plot([2], [4], 'o')
plt.annotate('(2,4)', xy=(2, 4), xytext=(2, 4),)
if not os.path.exists('pic'):
os.mkdir('pic')
plt.savefig('pic/line.png')
plt.show()
if not os.path.exists('pic'):
os.mkdir('pic')
plt.savefig('pic/line.png')
plt.show()
def make_sub_figure():
data = np.arange(0, np.pi * 2, 0.01)
p = plt.figure(figsize=(8, 6)) # 画布大小
sub1 = p.add_subplot(2, 1, 1)
plt.title('line')
plt.xlabel('X')
plt.xlabel('Y')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xticks(np.arange(0, 1.2, 0.2))
plt.yticks(np.arange(0, 1.2, 0.2))
plt.plot(data, data ** 2)
plt.plot(data, data ** 4)
plt.legend(['y=x^2', 'y=x^4'])
sub1 = p.add_subplot(2, 1, 2)
plt.title('sin/cos')
plt.xlabel('rad')
plt.xlabel('value')
plt.xlim(0, np.pi * 2)
plt.ylim(-1, 1)
plt.xticks(np.arange(0, np.pi * 2.5, np.pi * 0.5))
plt.yticks(np.arange(-1, 1.5, 0.5))
plt.plot(data, np.sin(data))
plt.plot(data, np.cos(data))
plt.legend(['sin', 'cos'])
plt.show()
def make_a_bar():
week = np.array(['周一', '周二', '周三', '周四', '周五', '周六', '周七'])
total = np.random.randint(1000, 5000, size=7)
color = np.random.rand(15).reshape(5, 3)
p = plt.figure(figsize=(8, 6))
sub1 = p.add_subplot(2, 1, 1)
plt.bar(week, total, color=color)
sub2 = p.add_subplot(2, 1, 2)
plt.barh(week, total, color=color)
plt.show()
def make_a_hist():
x = [np.random.randint(0, n, n) for n in [3000, 4000, 5000]]
bins = [0, 100, 500, 1000, 2000, 3000, 4000, 5000]
labels = ['3k', '4k', '5k']
plt.hist(x, bins=bins, label=labels)
plt.legend()
plt.show()
def make_a_pie():
data = np.array([6, 1, 2])
# data = np.array([0.6, 0.1, 0.2])
pet = ['Dog', 'Cat', 'Pig']
# plt.pie(data, labels=pet, autopct='%1.2f%%', colors=['red', 'yellow', 'green'])
plt.pie(data, labels=pet, autopct='%1.2f%%', colors=['red', 'yellow', 'green'],
labeldistance=1.2, pctdistance=0.5,
explode=[0.1, 0.1, 0.1],
shadow=True, startangle=90)
plt.legend()
plt.show()
def make_a_scatter():
x = np.random.randn(1000)
y = np.random.randn(1000)
color = np.random.rand(3000).reshape(1000, 3)
size = np.random.randint(0, 100, 1000) # 设置大小
plt.scatter(x, y, color=color, s=size, alpha=0.5)
plt.show()
def make_a_box():
data = np.random.randint(90, 150, 15).reshape(5, 3)
labels = ['2018', '2019', '2020']
plt.title('1-5年级总人口')
plt.boxplot(data, notch=True, labels=labels, meanline=True)
plt.show()
if __name__ == '__main__':
make_a_figure()
# makae_sub_figure()
# make_a_bar()
# make_a_hist()
# make_a_pie()
# make_a_scatter()
# make_a_box()
|
{"hexsha": "7b6bf9498441da99c2443512ac8f8a33f5c41cdd", "size": 3091, "ext": "py", "lang": "Python", "max_stars_repo_path": "11_data_science/matplotlib/test_pyplot.py", "max_stars_repo_name": "edgardeng/python-advance-interview", "max_stars_repo_head_hexsha": "59fd7bee8e871acdc7fdfecf2a110db840c47ebb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-06T13:03:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T13:03:56.000Z", "max_issues_repo_path": "11_data_science/matplotlib/test_pyplot.py", "max_issues_repo_name": "edgardeng/python-advance-interview", "max_issues_repo_head_hexsha": "59fd7bee8e871acdc7fdfecf2a110db840c47ebb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "11_data_science/matplotlib/test_pyplot.py", "max_forks_repo_name": "edgardeng/python-advance-interview", "max_forks_repo_head_hexsha": "59fd7bee8e871acdc7fdfecf2a110db840c47ebb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3360655738, "max_line_length": 83, "alphanum_fraction": 0.5981882886, "include": true, "reason": "import numpy", "num_tokens": 1175}
|
//
// Created by David Oberacker on 2019-07-31.
//
#include <string>
#include <map>
#include <queue>
#include <boost/dynamic_bitset.hpp>
#include "common/common.hpp"
struct Node
{
uint8_t Symbol;
bool IsLeaf;
int64_t Left;
int64_t Right;
};
uint8_t decodeByte(boost::dynamic_bitset<> data, int64_t* offset) {
uint8_t value = 0;
for (int i = 7; i >= 0; --i)
{
uint8_t v = data[(*offset)++] ? 1 : 0;
value |= (uint8_t)(v << (unsigned int) i);
}
return value;
};
int decodeNode(const boost::dynamic_bitset<>& data, int64_t* offset, Node* tree, int64_t* index)
{
uint32_t current = (*index);
(*index)++;
bool isLeaf = data[(*offset)++];
if (isLeaf)
{
uint8_t value = decodeByte(data, offset);
tree[current].Left = -1;
tree[current].Right = -1;
tree[current].IsLeaf = true;
tree[current].Symbol = value;
}
else
{
tree[current].Left = decodeNode(data, offset, tree, index);
tree[current].Right = decodeNode(data, offset, tree, index);
tree[current].IsLeaf = false;
}
return current;
}
void convertBytesToBits(const char* data, uint32_t data_size, boost::dynamic_bitset<>* bit_map)
{
for (int i = 0, o = 0; i < data_size; i++)
{
for (int j = 7; j >= 0; j--)
{
bool s = ((data[i] & (1 << j)) != 0);
(*bit_map)[o++] = s;
}
}
}
bool BORDERLANDS_COMMON_API
D4v3::Borderlands::Common::Huffman::decode(const char *input_array, uint32_t input_size, char *output_array,
int32_t output_size) noexcept(false) {
auto* bitArray = new boost::dynamic_bitset<>(input_size * 8);
convertBytesToBits(input_array, input_size, bitArray);
auto* tree = new Node[511];
int64_t index = 0;
int64_t offset = 0;
decodeNode(*bitArray, &offset, tree, &index);
int32_t left = output_size;
uint32_t o = 0;
while (left > 0)
{
Node branch = tree[0];
while (!branch.IsLeaf)
{
branch = tree[(*bitArray)[offset++] == false ? branch.Left : branch.Right];
}
output_array[o++] = branch.Symbol;
left--;
}
delete[] tree;
delete bitArray;
return true;
}
|
{"hexsha": "68a7f4cdf45bf66a465c7d4210dde3b9320bea23", "size": 2302, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/common/decoder.cpp", "max_stars_repo_name": "Oberacda/BorderlandsSaveEditor", "max_stars_repo_head_hexsha": "b959dc2c872f2a2ed4cc516c644b58f1f4425925", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/common/decoder.cpp", "max_issues_repo_name": "Oberacda/BorderlandsSaveEditor", "max_issues_repo_head_hexsha": "b959dc2c872f2a2ed4cc516c644b58f1f4425925", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/common/decoder.cpp", "max_forks_repo_name": "Oberacda/BorderlandsSaveEditor", "max_forks_repo_head_hexsha": "b959dc2c872f2a2ed4cc516c644b58f1f4425925", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3495145631, "max_line_length": 108, "alphanum_fraction": 0.5668983493, "num_tokens": 650}
|
import tensorflow as tf
import numpy as np
import json
from layers import Dense
class SimpleRNN(object):
def __init__(self,rnn_cell,overstructure,seq_len=5,feature_len=28,learning_rate=0.001,use_rnn_cell=True):
self._rnn_cell = rnn_cell
self._overstructure = overstructure
self.learning_rate = learning_rate
self._seq_len = seq_len
self._feature_len = feature_len
self._use_rnn_cell = use_rnn_cell
self._build_ph()
self._build_net()
self._build_output()
self._build_loss()
self._build_optimizer()
self.init = tf.global_variables_initializer()
def _build_ph(self):
self.input = tf.placeholder(tf.float32,[None,self._seq_len,self._feature_len])
self.target = tf.placeholder(tf.float32,[None,1])
self.weigth = tf.placeholder(tf.float32,[None,1])
self.learning_rate_ph = tf.placeholder(tf.float32,[])
def _build_net(self):
if self._use_rnn_cell:
out, _ = tf.nn.dynamic_rnn(self._rnn_cell, self.input,time_major=False, dtype=tf.float32)
#out BxTxC
out = out[:,-1]
#out = Dense(56)(tf.reshape(self.input,[-1,self._seq_len*self._feature_len]))
out = self.input
for i,l in enumerate(self._overstructure):
out = l(out,"layer_{}".format(i))
self._last_layer = Dense(1)
self.logit = self._last_layer(out,"out")
def _build_output(self):
self.out = tf.nn.sigmoid(self.logit)
def _build_loss(self):
loss = self.weigth*tf.nn.sigmoid_cross_entropy_with_logits(labels=self.target,logits=self.logit)
self.loss = tf.reduce_sum(loss)
self.loss_median = tf.contrib.distributions.percentile(loss, 50)
self.accuracy= tf.reduce_mean(tf.cast(tf.equal(tf.round(self.out), self.target), dtype=tf.float32))
def _build_optimizer(self):
self.global_step = tf.Variable(0,trainable=False)
self.learning_rate = tf.Variable(self.learning_rate,trainable=False)
self.assign_LR = self.learning_rate.assign(self.learning_rate_ph)
self.opt = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss,global_step=self.global_step)
def initialize(self,sess):
self.sess = sess
self.sess.run(self.init)
def predict(self,X):
return self.sess.run(self.out, feed_dict={
self.input: np.reshape(X,(-1,self._seq_len,self._feature_len))
})
def train_step(self,X,Y,W=None):
if W is None:
W = np.ones_like(Y)
self.sess.run(self.opt,feed_dict={
self.input : np.reshape(X,(-1,self._seq_len,self._feature_len)),
self.target : np.reshape(Y,(-1,1)),
self.weigth : np.reshape(W,(-1,1)),
})
def get_loss(self,X,Y,W=None):
if W is None:
W = np.ones_like(Y)
return self.sess.run([self.accuracy,self.loss, self.loss_median],feed_dict={
self.input : np.reshape(X,(-1,self._seq_len,self._feature_len)),
self.target : np.reshape(Y,(-1,1)),
self.weigth : np.reshape(W,(-1,1)),
})
def set_learning_rate(self,lr):
self.sess.run(self.assign_LR,feed_dict={self.learning_rate_ph: lr})
def get_learning_rate(self):
return self.sess.run(self.learning_rate)
def get_global_step(self):
return self.sess.run(self.global_step)
def to_json(self,filename):
print('Dumping network to file ' + filename)
res = {"layer0":self._rnn_cell.to_json(self.sess)}
for i,layer in enumerate(self._overstructure+[self._last_layer]):
layer_name = "layer"+str(i+1)
curr_layer = layer.to_json(self.sess)
res[layer_name] = curr_layer
res[layer_name]["activation"] = 'S'
res["parameters"] = {"use_abs":True,"is_rnn":True,"use_last_rnn_out":True}
with open(filename, 'w') as f:
json.dump(res, f)
|
{"hexsha": "2be597f5336bbc2877204bd1284387f0792203fa", "size": 3508, "ext": "py", "lang": "Python", "max_stars_repo_path": "rnn_net.py", "max_stars_repo_name": "Rufaim/Filtering-Clouds", "max_stars_repo_head_hexsha": "5703884a55f449ed737a3350d5276e29a69372f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rnn_net.py", "max_issues_repo_name": "Rufaim/Filtering-Clouds", "max_issues_repo_head_hexsha": "5703884a55f449ed737a3350d5276e29a69372f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rnn_net.py", "max_forks_repo_name": "Rufaim/Filtering-Clouds", "max_forks_repo_head_hexsha": "5703884a55f449ed737a3350d5276e29a69372f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7326732673, "max_line_length": 106, "alphanum_fraction": 0.7223489168, "include": true, "reason": "import numpy", "num_tokens": 948}
|
using FastMarching
using Images
using FileIO
function maze()
Float64.(channelview(img))
img = load(joinpath(Pkg.dir("FastMarching"),"examples/images/maze.png"))
end
maze()
|
{"hexsha": "3ed961c19d2dd45caf7c8c3deeb7fe0ba00a4123", "size": 178, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/maze.jl", "max_stars_repo_name": "jgoldfar/FastMarching.jl", "max_stars_repo_head_hexsha": "ecd9bbb5b5b1120ca9e4fb88f36af679017d93c3", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/maze.jl", "max_issues_repo_name": "jgoldfar/FastMarching.jl", "max_issues_repo_head_hexsha": "ecd9bbb5b5b1120ca9e4fb88f36af679017d93c3", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/maze.jl", "max_forks_repo_name": "jgoldfar/FastMarching.jl", "max_forks_repo_head_hexsha": "ecd9bbb5b5b1120ca9e4fb88f36af679017d93c3", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.1818181818, "max_line_length": 74, "alphanum_fraction": 0.7471910112, "num_tokens": 47}
|
import logging
import os
import torch
import torch.nn.functional as F
from functools import partial
from torch import nn, einsum
import collections.abc as container_abcs
import numpy as np
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from timm.models.layers import DropPath, trunc_normal_
# helper methods
from .registry import register_model
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class PreNorm(nn.Module):
def __init__(self, norm, dim, fn):
super().__init__()
self.norm = norm(dim)
self.fn = fn
def forward(self, x, *args, **kwargs):
x = rearrange(x, 'b c h w -> b h w c')
x = self.norm(x)
x = rearrange(x, 'b h w c -> b c h w')
return self.fn(x, *args, **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, act_layer, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, int(dim * mult), 1),
act_layer(),
nn.Conv2d(int(dim * mult), dim, 1),
)
def forward(self, x):
return self.net(x)
class DepthWiseConv2d(nn.Module):
def __init__(
self,
dim_in,
dim_out,
kernel_size,
padding,
stride,
bias=True
):
super().__init__()
self.dw = nn.Conv2d(
dim_in, dim_in,
kernel_size=kernel_size,
padding=padding,
groups=dim_in,
stride=stride,
bias=False
)
self.bn = nn.BatchNorm2d(dim_in)
self.pw = nn.Conv2d(
dim_in, dim_out,
kernel_size=1,
bias=bias
)
def forward(self, x):
x = self.dw(x)
x = self.bn(x)
x = self.pw(x)
return x
class Attention(nn.Module):
def __init__(
self,
dim_in,
dim_out,
num_heads,
qkv_bias,
kernel_size,
padding,
window_size,
shift_size,
rel_pos_embed,
**kwargs
):
super().__init__()
self.heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.scale = dim_out ** -0.5
self.attend = nn.Softmax(dim=-1)
self.qkv = DepthWiseConv2d(
dim_in, dim_out*3, kernel_size,
padding=padding, stride=1, bias=qkv_bias
)
self.proj_out = nn.Conv2d(dim_out, dim_in, 1)
self.rel_pos_embed = rel_pos_embed
if rel_pos_embed:
self.init_rel_pos_embed(window_size, num_heads)
def init_rel_pos_embed(self, window_size, num_heads):
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size)
coords_w = torch.arange(self.window_size)
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size - 1
relative_coords[:, :, 0] *= 2 * self.window_size - 1
rel_pos_idx = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("rel_pos_idx", rel_pos_idx)
# define a parameter table of relative position bias
self.rel_pos_bias_table = nn.Parameter(
torch.zeros(
(2 * window_size - 1) * (2 * window_size - 1),
num_heads
)
) # 2*Wh-1 * 2*Ww-1, nH
trunc_normal_(self.rel_pos_bias_table, std=.02)
def forward(self, x, mask):
shape = x.shape
_, _, H, W, h = *shape, self.heads
w = min(self.window_size, min(H, W))
pad_l = pad_t = 0
pad_r = (w - W % w) % w
pad_b = (w - H % w) % w
if pad_r > 0 or pad_b > 0:
x = F.pad(x, (pad_l, pad_r, pad_t, pad_b))
_, _, Hp, Wp = x.shape
s_x, s_y = Hp // w, Wp // w
else:
s_x, s_y = H // w, W // w
q, k, v = self.qkv(x).chunk(3, dim=1)
q, k, v = map(
lambda t: rearrange(
t, 'b (h d) (s_x w_x) (s_y w_y) -> (b s_x s_y) h (w_x w_y) d',
h=h, s_x=s_x, s_y=s_y, w_x=w, w_y=w
),
(q, k, v)
)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
if self.rel_pos_embed:
rel_pos_bias = self.rel_pos_bias_table[self.rel_pos_idx.view(-1)]\
.view(
self.window_size * self.window_size,
self.window_size * self.window_size,
-1
) # Wh*Ww,Wh*Ww,nH
rel_pos_bias = rel_pos_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
dots = dots + rel_pos_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
B_, H, N, M = dots.shape
dots = dots.view(
B_ // nW, nW, self.heads, N, M
) + mask.unsqueeze(1).unsqueeze(0)
dots = dots.view(-1, self.heads, N, M)
attn = self.attend(dots)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(
out, '(b s_x s_y) h (w_x w_y) d -> b (h d) (s_x w_x) (s_y w_y)',
h=h, s_x=s_x, s_y=s_y, w_x=w, w_y=w
).contiguous()
if pad_r > 0 or pad_b > 0:
out = out[:, :, :H, :W].contiguous()
return self.proj_out(out)
@staticmethod
def compute_macs(module, input, output):
# T: num_token
# S: num_token
input = input[0]
B, C, H, W = input.shape
flops = 0
params = sum([p.numel() for p in module.qkv.dw.parameters()])
flops += params * H * W
params = sum([p.numel() for p in module.qkv.pw.parameters()])
flops += params * H * W
params = sum([p.numel() for p in module.proj_out.parameters()])
flops += params * H * W
flops += 2 * C * H * W * module.window_size ** 2
module.__flops__ += flops
class Transformer(nn.Module):
def __init__(
self,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
drop_path_rate=None,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
kernel_qkv=3,
padding_qkv=1,
window_size=-1,
shift=False,
rel_pos_embed=False,
**kwargs
):
super().__init__()
self.layers = nn.ModuleList([])
for i in range(depth):
shift_size = window_size//2 if shift and i % 2 == 1 else 0,
self.layers.append(nn.ModuleList([
PreNorm(
norm_layer, embed_dim,
Attention(
dim_in=embed_dim,
dim_out=embed_dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
kernel_size=kernel_qkv,
padding=padding_qkv,
window_size=window_size,
shift_size=shift_size,
rel_pos_embed=rel_pos_embed,
*kwargs
)
),
PreNorm(
norm_layer, embed_dim,
FeedForward(embed_dim, act_layer, mlp_ratio)
),
DropPath(drop_path_rate[i])
if isinstance(drop_path_rate, list) else nn.Identity()
]))
self.window_size = window_size
self.shift = shift
def build_attn_mask(self, x):
_, _, H, W = x.shape
# calculate attention mask for SW-MSA
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
shift_size = self.window_size//2
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -shift_size),
slice(-shift_size, None)
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -shift_size),
slice(-shift_size, None)
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
s_x = Hp // self.window_size
s_y = Wp // self.window_size
mask_windows = rearrange(
img_mask, 'i (s_x w_x) (s_y w_y) j -> (i s_x s_y) w_x w_y j',
s_x=s_x, s_y=s_y, w_y=self.window_size, w_x=self.window_size
)
# mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(
-1, self.window_size * self.window_size
)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(
attn_mask != 0, float(-100.0)
).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self, x):
attn_mask = self.build_attn_mask(x) if self.shift else None
for attn, ff, drop_path in self.layers:
x = drop_path(attn(x, attn_mask)) + x
x = drop_path(ff(x)) + x
return x
def forward_with_features(self, x):
attn_mask = self.build_attn_mask(x) if self.shift else None
feats = []
for attn, ff, drop_path in self.layers:
x = drop_path(attn(x, attn_mask)) + x
x = drop_path(ff(x)) + x
feats.append(x)
return x, feats
class ConvEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(
self,
patch_size=7,
in_chans=3,
embed_dim=64,
stride=4,
padding=2,
norm_layer=None
):
super().__init__()
self.patch_size = patch_size
self.proj = nn.Conv2d(
in_chans, embed_dim,
kernel_size=patch_size,
stride=stride,
padding=padding
)
self.norm = norm_layer(embed_dim) if norm_layer else None
def forward(self, x):
x = self.proj(x)
B, C, H, W = x.shape
x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
if self.norm:
x = self.norm(x)
x = rearrange(x, 'b (h w) c -> b c h w', h=H, w=W).contiguous()
return x
class ResStem(nn.Module):
def __init__(self, channels_stem, deep=False):
super().__init__()
if deep:
self.stem = nn.Sequential(
nn.Conv2d(
3, channels_stem, kernel_size=3, stride=2, padding=1,
bias=False
),
nn.BatchNorm2d(channels_stem),
nn.ReLU(inplace=True),
nn.Conv2d(
channels_stem, channels_stem,
kernel_size=3, stride=1,
padding=1, bias=False
),
nn.BatchNorm2d(channels_stem),
nn.ReLU(inplace=True),
nn.Conv2d(
channels_stem, channels_stem,
kernel_size=3, stride=2,
padding=1, bias=False
),
nn.BatchNorm2d(channels_stem),
nn.ReLU(inplace=True)
)
else:
self.stem = nn.Sequential(
nn.Conv2d(
3, channels_stem, kernel_size=3, stride=2,
padding=1, bias=False
),
nn.BatchNorm2d(channels_stem),
nn.ReLU(inplace=True),
nn.Conv2d(
channels_stem, channels_stem,
kernel_size=3, stride=2,
padding=1, bias=False
),
nn.BatchNorm2d(channels_stem),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.stem(x)
return x
class CvT(nn.Module):
def __init__(
self,
*,
num_classes,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
init='trunc_norm',
use_dense_prediction=False,
spec=None
):
super().__init__()
self.num_stages = spec['NUM_STAGES']
total_depth = sum(spec['DEPTH'])
logging.info(f'=> total path: {total_depth}')
drop_path_rate = spec['DROP_PATH_RATE']
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, total_depth)
] # stochastic depth decay rule
in_chans = 3
depth_accum=0
for i in range(self.num_stages):
kwargs = {
'patch_size': spec['PATCH_SIZE'][i],
'patch_stride': spec['PATCH_STRIDE'][i],
'patch_padding': spec['PATCH_PADDING'][i],
'embed_dim': spec['DIM_EMBED'][i],
'depth': spec['DEPTH'][i],
'num_heads': spec['NUM_HEADS'][i],
'mlp_ratio': spec['MLP_RATIO'][i],
'qkv_bias': spec['QKV_BIAS'][i],
'kernel_qkv': spec['KERNEL_QKV'][i],
'padding_qkv': spec['PADDING_QKV'][i],
'window_size': spec['WINDOW_SIZE'][i],
'shift': spec['SHIFT'][i],
}
if i == 0 and getattr(spec, 'RES_STEM', False):
conv = ResStem(kwargs['embed_dim'], True)
else:
conv = ConvEmbed(
patch_size=kwargs['patch_size'],
in_chans=in_chans,
embed_dim=kwargs['embed_dim'],
stride=kwargs['patch_stride'],
padding=kwargs['patch_padding'],
norm_layer=norm_layer
)
stage = nn.Sequential(
conv,
Transformer(
embed_dim=kwargs['embed_dim'],
depth=kwargs['depth'],
num_heads=kwargs['num_heads'],
mlp_ratio=kwargs['mlp_ratio'],
qkv_bias=kwargs['qkv_bias'],
drop_path_rate=dpr[
depth_accum: depth_accum+kwargs['depth']
],
act_layer=act_layer,
norm_layer=norm_layer,
kernel_qkv=kwargs['kernel_qkv'],
padding_qkv=kwargs['padding_qkv'],
window_size=kwargs['window_size'],
shift=kwargs['shift'],
rel_pos_embed=spec['REL_POS_EMBED']
)
)
setattr(self, f'stage{i}', stage)
in_chans = spec['DIM_EMBED'][i]
depth_accum += kwargs['depth']
self.norm = norm_layer(in_chans)
self.avg_pool = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
Rearrange('... () () -> ...')
)
self.head = nn.Linear(in_chans, num_classes) if num_classes > 0 else nn.Identity()
# Region prediction head
self.use_dense_prediction = use_dense_prediction
if self.use_dense_prediction: self.head_dense = None
if init == 'xavier':
self.apply(self._init_weights_xavier)
else:
self.apply(self._init_weights_trunc_normal)
def _init_weights_trunc_normal(self, m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
logging.info('=> init weight from trunc norm')
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
logging.info('=> init bias to zeros')
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def _init_weights_xavier(self, m):
if isinstance(m, nn.Linear):
logging.info('=> init weight of Linear from xavier uniform')
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
logging.info('=> init bias of Linear to zeros')
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward_features(self, x):
for i in range(self.num_stages):
x = getattr(self, f'stage{i}')(x)
H, W = x.shape[-2], x.shape[-1]
x = rearrange(x, 'b c h w -> b (h w) c')
x_region = self.norm(x)
x = rearrange(x_region, 'b (h w) c -> b c h w', h=H, w=W)
x = self.avg_pool(x) # B C 1
if self.use_dense_prediction:
return x, x_region
else:
return x
def forward_return_n_last_blocks(self, x, n=1, return_patch_avgpool=False, depth=[]):
num_blks = sum(depth)
start_idx = num_blks - n
sum_cur = 0
for i, d in enumerate(depth):
sum_cur_new = sum_cur + d
if start_idx >= sum_cur and start_idx < sum_cur_new:
start_stage = i
start_blk = start_idx - sum_cur
sum_cur = sum_cur_new
# we will return the averaged token features from the `n` last blocks
# note: there is no [CLS] token in Swin Transformer
output = []
s = 0
for i in range(self.num_stages):
stage = getattr(self, f'stage{i}')
x = stage[0](x)
x, fea = stage[1].forward_with_features(x)
# x = getattr(self, f'stage{i}')(x)
# print(f'fea list length {len(fea)}')
# for i, layer in enumerate(self.layers):
# x, fea = layer.forward_with_features(x)
if i >= start_stage:
for x_ in fea[start_blk:]:
# print(f'x_ shape {x_.shape}')
if i == self.num_stages-1: # use the norm in the last stage
x_ = rearrange(x_, 'b c h w -> b h w c').contiguous()
x_ = self.norm(x_)
x_ = rearrange(x_, 'b h w c -> b c h w').contiguous()
x_avg = torch.flatten(self.avg_pool(x_), 1) # B C
# print(f'Stage {i}, x_avg {x_avg.shape}, x_ {x_.shape}')
output.append(x_avg)
start_blk = 0
return torch.cat(output, dim=-1)
def forward(self, x):
# convert to list
if not isinstance(x, list):
x = [x]
idx_crops = torch.cumsum(torch.unique_consecutive(
torch.tensor([inp.shape[-1] for inp in x]),
return_counts=True,
)[1], 0)
if self.use_dense_prediction:
start_idx = 0
for end_idx in idx_crops:
_out_cls, _out_fea = self.forward_features(torch.cat(x[start_idx: end_idx]))
B, N, C = _out_fea.shape
if start_idx == 0:
output_cls = _out_cls
output_fea = _out_fea.reshape(B * N, C)
npatch = [N]
else:
output_cls = torch.cat((output_cls, _out_cls))
output_fea = torch.cat((output_fea, _out_fea.reshape(B * N, C) ))
npatch.append(N)
start_idx = end_idx
return self.head(output_cls), self.head_dense(output_fea), output_fea, npatch
else:
start_idx = 0
for end_idx in idx_crops:
_out = self.forward_features(torch.cat(x[start_idx: end_idx]))
if start_idx == 0:
output = _out
else:
output = torch.cat((output, _out))
start_idx = end_idx
# Run the head forward on the concatenated features.
return self.head(output)
def init_weights(self, pretrained='', pretrained_layers=[], verbose=True):
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained, map_location='cpu')
logging.info(f'=> loading pretrained model {pretrained}')
model_dict = self.state_dict()
pretrained_dict = {
k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()
}
need_init_state_dict = {}
for k, v in pretrained_dict.items():
need_init = (
k.split('.')[0] in pretrained_layers
or pretrained_layers[0] is '*'
)
if need_init:
if verbose:
logging.info(f'=> init {k} from {pretrained}')
need_init_state_dict[k] = v
self.load_state_dict(need_init_state_dict, strict=False)
@register_model
def get_cls_model(config, is_teacher=False, use_dense_prediction=False, **kwargs):
cvt_spec = config.MODEL.SPEC
if is_teacher: cvt_spec['DROP_PATH_RATE']=0.0
cvt = CvT(
num_classes=config.MODEL.NUM_CLASSES,
act_layer=QuickGELU,
norm_layer=partial(LayerNorm, eps=1e-5),
init='trunc_norm',
use_dense_prediction=use_dense_prediction,
spec=cvt_spec
)
if config.MODEL.INIT_WEIGHTS:
cvt.init_weights(
config.MODEL.PRETRAINED,
config.MODEL.PRETRAINED_LAYERS,
config.VERBOSE
)
return cvt
|
{"hexsha": "6fa12aae615259dbcd8917a61f4dffd8c5db0c1c", "size": 22590, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/cvt_v4_transformer.py", "max_stars_repo_name": "rahulmangalampalli/esvit", "max_stars_repo_head_hexsha": "5caf6e36b088ae2e7aaa4100b307eec991078e3e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/cvt_v4_transformer.py", "max_issues_repo_name": "rahulmangalampalli/esvit", "max_issues_repo_head_hexsha": "5caf6e36b088ae2e7aaa4100b307eec991078e3e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/cvt_v4_transformer.py", "max_forks_repo_name": "rahulmangalampalli/esvit", "max_forks_repo_head_hexsha": "5caf6e36b088ae2e7aaa4100b307eec991078e3e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.906779661, "max_line_length": 104, "alphanum_fraction": 0.5127932714, "include": true, "reason": "import numpy", "num_tokens": 5545}
|
import torch.nn as nn
import torch
import numpy as np
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
# Custom Implementation because the Voice Conversion Cycle GAN
# paper assumes GLU won't reduce the dimension of tensor by 2.
def forward(self, input):
return input * torch.sigmoid(input)
class PixelShuffle(nn.Module):
def __init__(self, upscale_factor):
super(PixelShuffle, self).__init__()
# Custom Implementation because PyTorch PixelShuffle requires,
# 4D input. Whereas, in this case we have have 3D array
self.upscale_factor = upscale_factor
def forward(self, input):
n = input.shape[0]
c_out = input.shape[1] // 2
w_new = input.shape[2] * 2
return input.view(n, c_out, w_new)
class ResidualLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(ResidualLayer, self).__init__()
# self.residualLayer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
# out_channels=out_channels,
# kernel_size=kernel_size,
# stride=1,
# padding=padding),
# nn.InstanceNorm1d(
# num_features=out_channels,
# affine=True),
# GLU(),
# nn.Conv1d(in_channels=out_channels,
# out_channels=in_channels,
# kernel_size=kernel_size,
# stride=1,
# padding=padding),
# nn.InstanceNorm1d(
# num_features=in_channels,
# affine=True)
# )
self.conv1d_layer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
self.conv_layer_gates = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
self.conv1d_out_layer = nn.Sequential(nn.Conv1d(in_channels=out_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=in_channels,
affine=True))
def forward(self, input):
h1_norm = self.conv1d_layer(input)
h1_gates_norm = self.conv_layer_gates(input)
# GLU
h1_glu = h1_norm * torch.sigmoid(h1_gates_norm)
h2_norm = self.conv1d_out_layer(h1_glu)
return input + h2_norm
class downSample_Generator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(downSample_Generator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
self.convLayer_gates = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
def forward(self, input):
return self.convLayer(input) * torch.sigmoid(self.convLayer_gates(input))
class upSample_Generator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(upSample_Generator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
PixelShuffle(upscale_factor=2),
nn.InstanceNorm1d(num_features=out_channels // 2,
affine=True))
self.convLayer_gates = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
PixelShuffle(upscale_factor=2),
nn.InstanceNorm1d(num_features=out_channels // 2,
affine=True))
def forward(self, input):
return self.convLayer(input) * torch.sigmoid(self.convLayer_gates(input))
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.conv1 = nn.Conv1d(in_channels=24,
out_channels=128,
kernel_size=15,
stride=1,
padding=7)
self.conv1_gates = nn.Conv1d(in_channels=24,
out_channels=128,
kernel_size=15,
stride=1,
padding=7)
# Downsample Layer
self.downSample1 = downSample_Generator(in_channels=128,
out_channels=256,
kernel_size=5,
stride=2,
padding=1)
self.downSample2 = downSample_Generator(in_channels=256,
out_channels=512,
kernel_size=5,
stride=2,
padding=2)
# Residual Blocks
self.residualLayer1 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer2 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer3 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer4 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer5 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer6 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
# UpSample Layer
self.upSample1 = upSample_Generator(in_channels=512,
out_channels=1024,
kernel_size=5,
stride=1,
padding=2)
self.upSample2 = upSample_Generator(in_channels=1024 // 2,
out_channels=512,
kernel_size=5,
stride=1,
padding=2)
self.lastConvLayer = nn.Conv1d(in_channels=512 // 2,
out_channels=24,
kernel_size=15,
stride=1,
padding=7)
def forward(self, input):
# GLU
conv1 = self.conv1(input) * torch.sigmoid(self.conv1_gates(input))
downsample1 = self.downSample1(conv1)
downsample2 = self.downSample2(downsample1)
residual_layer_1 = self.residualLayer1(downsample2)
residual_layer_2 = self.residualLayer2(residual_layer_1)
residual_layer_3 = self.residualLayer3(residual_layer_2)
residual_layer_4 = self.residualLayer4(residual_layer_3)
residual_layer_5 = self.residualLayer5(residual_layer_4)
residual_layer_6 = self.residualLayer6(residual_layer_5)
upSample_layer_1 = self.upSample1(residual_layer_6)
upSample_layer_2 = self.upSample2(upSample_layer_1)
output = self.lastConvLayer(upSample_layer_2)
return output
class DownSample_Discriminator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(DownSample_Discriminator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
self.convLayerGates = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
def forward(self, input):
# GLU
return self.convLayer(input) * torch.sigmoid(self.convLayerGates(input))
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.convLayer1 = nn.Conv2d(in_channels=1,
out_channels=128,
kernel_size=[3, 3],
stride=[1, 2])
self.convLayer1_gates = nn.Conv2d(in_channels=1,
out_channels=128,
kernel_size=[3, 3],
stride=[1, 2])
# Note: Kernel Size have been modified in the PyTorch implementation
# compared to the actual paper, as to retain dimensionality. Unlike,
# TensorFlow, PyTorch doesn't have padding='same', hence, kernel sizes
# were altered to retain the dimensionality after each layer
# DownSample Layer
self.downSample1 = DownSample_Discriminator(in_channels=128,
out_channels=256,
kernel_size=[3, 3],
stride=[2, 2],
padding=0)
self.downSample2 = DownSample_Discriminator(in_channels=256,
out_channels=512,
kernel_size=[3, 3],
stride=[2, 2],
padding=0)
self.downSample3 = DownSample_Discriminator(in_channels=512,
out_channels=1024,
kernel_size=[6, 3],
stride=[1, 2],
padding=0)
# Fully Connected Layer
self.fc = nn.Linear(in_features=1024,
out_features=1)
# def downSample(self, in_channels, out_channels, kernel_size, stride, padding):
# convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
# out_channels=out_channels,
# kernel_size=kernel_size,
# stride=stride,
# padding=padding),
# nn.InstanceNorm2d(num_features=out_channels,
# affine=True),
# GLU())
# return convLayer
def forward(self, input):
# input has shape [batch_size, num_features, time]
# discriminator requires shape [batchSize, 1, num_features, time]
input = input.unsqueeze(1)
# GLU
pad_input = nn.ZeroPad2d((1, 0, 1, 1))
layer1 = self.convLayer1(
pad_input(input)) * torch.sigmoid(self.convLayer1_gates(pad_input(input)))
pad_input = nn.ZeroPad2d((1, 0, 1, 0))
downSample1 = self.downSample1(pad_input(layer1))
pad_input = nn.ZeroPad2d((1, 0, 1, 0))
downSample2 = self.downSample2(pad_input(downSample1))
pad_input = nn.ZeroPad2d((1, 0, 3, 2))
downSample3 = self.downSample3(pad_input(downSample2))
downSample3 = downSample3.contiguous().permute(0, 2, 3, 1).contiguous()
# fc = torch.sigmoid(self.fc(downSample3))
# Taking off sigmoid layer to avoid vanishing gradient problem
fc = self.fc(downSample3)
return fc
if __name__ == '__main__':
# Generator Dimensionality Testing
input = torch.randn(10, 24, 1100) # (N, C_in, Width) For Conv1d
np.random.seed(0)
print(np.random.randn(10))
input = np.random.randn(158, 24, 128)
input = torch.from_numpy(input).float()
# print(input)
generator = Generator()
output = generator(input)
print("Output shape Generator", output.shape)
# Discriminator Dimensionality Testing
# input = torch.randn(32, 1, 24, 128) # (N, C_in, height, width) For Conv2d
discriminator = Discriminator()
output = discriminator(output)
print("Output shape Discriminator", output.shape)
|
{"hexsha": "8224bdce03dfd49ea932f7b8bb72e7ee99a26b98", "size": 17354, "ext": "py", "lang": "Python", "max_stars_repo_path": "model_GLU.py", "max_stars_repo_name": "astricks/Voice-Conversion-GAN", "max_stars_repo_head_hexsha": "4ba2dc91a299413286c3976416442d54a08ec298", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 104, "max_stars_repo_stars_event_min_datetime": "2018-11-18T13:49:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T03:25:29.000Z", "max_issues_repo_path": "model_GLU.py", "max_issues_repo_name": "astricks/Voice-Conversion-GAN", "max_issues_repo_head_hexsha": "4ba2dc91a299413286c3976416442d54a08ec298", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-02-02T03:58:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T09:38:28.000Z", "max_forks_repo_path": "model_GLU.py", "max_forks_repo_name": "astricks/Voice-Conversion-GAN", "max_forks_repo_head_hexsha": "4ba2dc91a299413286c3976416442d54a08ec298", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2019-01-29T01:20:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T07:30:52.000Z", "avg_line_length": 49.3011363636, "max_line_length": 94, "alphanum_fraction": 0.4245130806, "include": true, "reason": "import numpy", "num_tokens": 2812}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: chengdicao
"""
import tensorflow as tf
import os
import numpy as np
from tqdm import tqdm
import argparse
from lib.utils import load_list, cosine_decay_lr, get_label_matrix, get_cosine_distance_matrix, get_rank_matrix
from lib.metrics import mean_average_precison, mean_precision_top_n, mean_rank_n
from lib.models import _set_class_num, _set_regularizer, get_model
from lib.generators import BasicGenerator
if __name__ == '__main__':
# Step 0: Configure arguments
parser = argparse.ArgumentParser(description='Training codes for cover song identification using Keras')
parser.add_argument('--tag', type=str, default='test',
help='tag for this experiment')
parser.add_argument('--block', default='wider',
help='building block: simple / bottleneck / wider')
# Argument for dataset
parser.add_argument('--data-dir', type=str, default='/data/youtube_hpcp_npy',
help='directory of dataset')
parser.add_argument('--train-ls', type=str, default='meta/SHS100K-TRAIN',
help='list of training set')
parser.add_argument('--val-ls', type=str, default='meta/SHS100K-VAL',
help='list of validation set')
parser.add_argument('--feature-len', type=int, default=400,
help='length of input feature')
# Argument for model
parser.add_argument('--checkpoint', type=str, default=None,
help='checkpoint directory')
parser.add_argument('--regularize', type=float, default=0.0001,
help='value of l2-regularization')
parser.add_argument('--time-field', type=int, default=48,
help='temporal reception field of KINet')
parser.add_argument('--ki-block-num', type=int, default=4,
help='number of key-invariant blocks')
parser.add_argument('--ki-out-channel', type=int, default=256,
help='output channel of key-invariant blocks')
parser.add_argument('--bn-ratio', type=int, default=4,
help='squeeze ratio of bottleneck blocks')
parser.add_argument('--no-chnlatt', action='store_true',
help='disable channel attention')
parser.add_argument('--no-tempatt', action='store_true',
help='disable temporal attention')
parser.add_argument('--attention-ratio', type=int, default=4,
help='squeeze ratio of attention modules')
parser.add_argument('--embedding-len', type=int, default=128,
help='length of final music embedding')
# Argument for training
parser.add_argument('--batchsize', type=int, default=32,
help='batch size for training')
parser.add_argument('--max-epoch', type=int, default=100,
help='max number of training epochs')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--gpu', type=str, default='0',
help='id of GPU to use')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
# Step 1: prepare data
train_generator = BasicGenerator(args.data_dir, args.train_ls, args.feature_len, args.batchsize)
val_feature, val_label= load_list(args.data_dir, args.val_ls, args.feature_len)
val_label_matrix = get_label_matrix(val_label)
_set_class_num(train_generator.get_class_num())
# Step 2: define model
if args.checkpoint:
model_all = tf.keras.models.load_model(args.checkpoint)
embedding = model_all.get_layer('Embedding_LReLU').output
embedding = tf.keras.layers.Lambda(lambda x:tf.keras.backend.l2_normalize(x, axis=1),
name='Embedding_L2Norm')(embedding)
model_embedding = tf.keras.models.Model(inputs=model_all.input, outputs=embedding)
model_embedding.compile(optimizer='adam', loss=tf_batch_all_loss)
else:
_set_regularizer(args.regularize)
input_tensor = tf.keras.Input(shape=(23, args.feature_len, 1), name='Feature')
model_embedding, model_all = get_model(input_tensor, args)
model_embedding.summary()
# Step 3: define file writer
model_dir = 'models/%s' %args.tag
log_dir = 'log/%s' %args.tag
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
writer = tf.summary.FileWriter(log_dir, sess.graph)
train_lr = tf.placeholder(tf.float32, [])
train_loss = tf.placeholder(tf.float32, [])
train_acc = tf.placeholder(tf.float32, [])
val_mAP = tf.placeholder(tf.float32, [])
val_TOP10 = tf.placeholder(tf.float32, [])
val_MR1 = tf.placeholder(tf.float32, [])
tf.summary.scalar('train_loss', train_loss)
tf.summary.scalar('train_acc', train_acc)
tf.summary.scalar('train_lr', train_lr)
tf.summary.scalar('val_mAP', val_mAP)
tf.summary.scalar('val_TOP10', val_TOP10)
tf.summary.scalar('val_MR1', val_MR1)
merged=tf.summary.merge_all()
# Step 4: train model
best_loss = float('inf')
best_acc = 0
best_mAP = 0
best_TOP10 = 0
best_MR1 = float('inf')
for epoch in range(args.max_epoch):
this_lr = cosine_decay_lr(epoch, args.lr, args.max_epoch)
tf.keras.backend.set_value(model_all.optimizer.lr, this_lr)
history = model_all.fit_generator(train_generator, epochs=epoch+1, initial_epoch=epoch, verbose=1)
this_loss = history.history['loss'][-1]
this_acc = history.history['acc'][-1]
val_embedding = []
for feature in tqdm(val_feature):
val_embedding.append(model_embedding.predict(feature, verbose=0))
val_embedding = np.vstack(val_embedding)
val_distance_matrix = get_cosine_distance_matrix(val_embedding)
val_rank_matrix = get_rank_matrix(val_distance_matrix)
this_mAP = mean_average_precison(val_label_matrix, val_rank_matrix)
this_TOP10 = mean_precision_top_n(val_label_matrix, val_rank_matrix, n=10)
this_MR1 = mean_rank_n(val_label_matrix, val_rank_matrix, n=1)
improve = False
if this_loss < best_loss:
best_loss = this_loss
improve = True
if this_acc < best_acc:
best_acc = this_acc
improve = True
if this_mAP < best_mAP:
best_mAP = this_mAP
improve = True
if this_TOP10 < best_TOP10:
best_TOP10 = this_TOP10
improve = True
if this_MR1 < best_MR1:
best_MR1 = this_MR1
improve = True
print('loss:%.4f\tacc:%.4f\tmAP:%.4f\tTOP10:%.4f\tMR1:%.2f' %(this_loss, this_acc, this_mAP, this_TOP10, this_MR1))
summary = sess.run(merged,
feed_dict={train_lr:this_lr,
train_loss:this_loss,
train_acc:this_acc,
val_mAP:this_mAP,
val_TOP10:this_TOP10,
val_MR1:this_MR1
})
writer.add_summary(summary, epoch)
if improve:
model_all.save(os.path.join(model_dir, 'model%d-%.4f-%.4f-%.4f-%.4f-%.2f.h5' %(epoch+1, this_loss, this_acc, this_mAP, this_TOP10, this_MR1)))
|
{"hexsha": "39510ace0f2b806c82ee4d770564015794c9ef66", "size": 7756, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "DiDiDoes/MulKINet", "max_stars_repo_head_hexsha": "9afb7c56e25b8c4dd8425139eb907912eb1f880f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-11-03T10:03:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-29T06:16:46.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "DiDiDoes/MulKINet", "max_issues_repo_head_hexsha": "9afb7c56e25b8c4dd8425139eb907912eb1f880f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "DiDiDoes/MulKINet", "max_forks_repo_head_hexsha": "9afb7c56e25b8c4dd8425139eb907912eb1f880f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9793814433, "max_line_length": 154, "alphanum_fraction": 0.6249355338, "include": true, "reason": "import numpy", "num_tokens": 1734}
|
The Tree of Peace was planted to the west of Mrak Hall on May 12^th^, 1984. It is a valley oaks valley oak that was planted in recognition of Native American Culture Days Native American Cultural Days by Chief Jake Swamp, an Iroquois Elder.
May the dream of the Peacemakera world without warone day come true.
|
{"hexsha": "d5f1f3673175d8f9fd3a3f66e4392da10fbd180c", "size": 315, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/The_Tree_of_Peace.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/The_Tree_of_Peace.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/The_Tree_of_Peace.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.5, "max_line_length": 240, "alphanum_fraction": 0.7841269841, "num_tokens": 79}
|
"""
this file is build based on the code found in evaluate_suffix_and_remaining_time.py
here the beam search (with breath-first-search) is implemented, to find compliant prediction
Author: Anton Yeshchenko
"""
from __future__ import division
import csv
import os.path
import time
from queue import PriorityQueue
from datetime import timedelta
import distance
import numpy as np
from keras.models import load_model
from sklearn import metrics
import shared_variables
from evaluation.prepare_data import amplify, get_symbol_ampl
from evaluation.prepare_data import encode
from evaluation.prepare_data_resource import prepare_testing_data, select_declare_verified_traces
def run_experiments(server_replayer, log_name, models_folder, fold):
beam_size = shared_variables.beam_size
model_filename = shared_variables.extract_last_model_checkpoint(log_name, models_folder, fold, 'CF')
declare_model_filename = shared_variables.extract_declare_model_filename(log_name)
log_settings_dictionary = shared_variables.log_settings[log_name]
formula = log_settings_dictionary['formula']
prefix_size_pred_from = log_settings_dictionary['prefix_size_pred_from']
prefix_size_pred_to = log_settings_dictionary['prefix_size_pred_to']
start_time = time.time()
# prepare the data
lines, \
lines_id, \
lines_group, \
lines_t, \
lines_t2, \
lines_t3, \
lines_t4, \
maxlen, \
chars, \
chars_group, \
char_indices, \
char_indices_group, \
divisor, \
divisor2, \
divisor3, \
predict_size, \
target_indices_char, \
target_indices_char_group, \
target_char_indices, \
target_char_indices_group = prepare_testing_data(log_name)
# this is the beam stack size, means how many "best" alternatives will be stored
one_ahead_gt = []
one_ahead_pred = []
# find cycles and modify the probability functionality goes here
stop_symbol_probability_amplifier_current = 1
# load model, set this to the model generated by train.py
model = load_model(model_filename)
class NodePrediction:
def __init__(self, data, crop_line, tot_predicted_time, probability_of=0):
self.data = data
self.cropped_line = crop_line
self.total_predicted_time = tot_predicted_time
self.probability_of = probability_of
folder_path = shared_variables.outputs_folder + models_folder + '/' + str(fold) + '/results/LTL/'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
output_filename = folder_path + '%s_%s.csv' % (log_name, 'CF')
with open(output_filename, 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(["Prefix length",
"Ground truth",
"Predicted",
"Damerau-Levenshtein",
"Jaccard",
"Ground truth times",
"Predicted times",
"RMSE",
"MAE",
"Median AE"])
for prefix_size in range(prefix_size_pred_from, prefix_size_pred_to):
print("prefix size: " + str(prefix_size))
curr_time = time.time()
lines_s, \
lines_id_s, \
lines_group_s, \
lines_t_s, \
lines_t2_s, \
lines_t3_s, \
lines_t4_s = select_declare_verified_traces(server_replayer,
declare_model_filename,
lines,
lines_id,
lines_group,
lines_t,
lines_t2,
lines_t3,
lines_t4,
prefix_size)
print("formulas verified: " + str(len(lines_s)) + " out of : " + str(len(lines)))
print('elapsed_time:', time.time() - curr_time)
counter = 0
for line, times, times2, times3 in zip(lines_s, lines_t_s, lines_t2_s, lines_t3_s):
times.append(0)
cropped_line = ''.join(line[:prefix_size])
cropped_times = times[:prefix_size]
cropped_times3 = times3[:prefix_size]
if len(times2) < prefix_size:
continue # make no prediction for this case, since this case has ended already
# initialize root of the tree for beam search
total_predicted_time_initialization = 0
search_node_root = NodePrediction(encode(cropped_line,
cropped_times,
cropped_times3,
maxlen,
chars,
char_indices,
divisor,
divisor2),
cropped_line,
total_predicted_time_initialization)
ground_truth = ''.join(line[prefix_size:prefix_size + predict_size])
ground_truth_t = times2[prefix_size - 1]
case_end_time = times2[len(times2) - 1]
ground_truth_t = case_end_time - ground_truth_t
queue_next_steps = PriorityQueue()
queue_next_steps.put((-search_node_root.probability_of, search_node_root))
queue_next_steps_future = PriorityQueue()
start_of_the_cycle_symbol = " "
found_satisfying_constraint = False
current_beam_size = beam_size
current_prediction_premis = None
for i in range(predict_size):
for k in range(current_beam_size):
if queue_next_steps.empty():
break
_, current_prediction_premis = queue_next_steps.get()
if not found_satisfying_constraint:
if server_replayer.verify_formula_as_compliant(current_prediction_premis.cropped_line,
formula,
prefix_size):
# the formula verified and we can just finish the predictions
# beam size is 1 because predict only sequence of events
current_beam_size = 1
current_prediction_premis.probability_of = 0.0
# overwrite new queue
queue_next_steps_future = PriorityQueue()
found_satisfying_constraint = True
enc = current_prediction_premis.data
temp_cropped_line = current_prediction_premis.cropped_line
y = model.predict(enc, verbose=0) # make predictions
# split predictions into separate activity and time predictions
y_char = y[0][0]
y_t = y[1][0][0]
if y_t < 0:
y_t = 0
cropped_times.append(y_t)
if not i == 0:
stop_symbol_probability_amplifier_current, start_of_the_cycle_symbol = \
amplify(temp_cropped_line)
# in not reached, function :choose_next_top_descendant: will backtrack
y_t = y_t * divisor3
cropped_times3.append(cropped_times3[-1] + timedelta(seconds=(int(y_t) if y_t == 0 else y_t)))
for j in range(current_beam_size):
temp_prediction = get_symbol_ampl(y_char, target_indices_char,
target_char_indices, start_of_the_cycle_symbol,
stop_symbol_probability_amplifier_current, j)
# end of case was just predicted, therefore, stop predicting further into the future
if temp_prediction == '!':
if server_replayer.verify_formula_as_compliant(temp_cropped_line, formula, prefix_size):
one_ahead_pred.append(current_prediction_premis.total_predicted_time)
one_ahead_gt.append(ground_truth_t)
stop_symbol_probability_amplifier_current = 1
# print('! predicted, end case')
queue_next_steps = PriorityQueue()
break
else:
continue
temp_cropped_line = current_prediction_premis.cropped_line + temp_prediction
temp_total_predicted_time = current_prediction_premis.total_predicted_time + y_t
temp_state_data = encode(temp_cropped_line, cropped_times, cropped_times3,
maxlen, chars, char_indices, divisor, divisor2)
probability_this = np.sort(y_char)[len(y_char) - 1 - j]
temp = NodePrediction(temp_state_data,
temp_cropped_line,
temp_total_predicted_time,
current_prediction_premis.probability_of + np.log(probability_this))
queue_next_steps_future.put((-temp.probability_of, temp))
# print 'INFORMATION: ' + str(counter) + ' ' + str(i) + ' ' + str(k) + ' ' + str(j) + ' ' + \
# temp_cropped_line[prefix_size:] + " " + str(temp.probability_of)
queue_next_steps = queue_next_steps_future
queue_next_steps_future = PriorityQueue()
counter += 1
if current_prediction_premis is None:
print("Cannot find any trace that is compliant with formula given current beam size")
break
output = []
if current_prediction_premis is None:
predicted = u""
total_predicted_time = 0
else:
predicted = (current_prediction_premis.cropped_line[prefix_size:])
total_predicted_time = current_prediction_premis.total_predicted_time
if len(ground_truth) > 0:
output.append(prefix_size)
output.append(ground_truth)
output.append(predicted)
output.append(1 - distance.nlevenshtein(predicted, ground_truth))
output.append(1 - distance.jaccard(predicted, ground_truth))
output.append(ground_truth_t)
output.append(total_predicted_time)
output.append('')
output.append(metrics.mean_absolute_error([ground_truth_t], [total_predicted_time]))
output.append(metrics.median_absolute_error([ground_truth_t], [total_predicted_time]))
spamwriter.writerow(output)
print("TIME TO FINISH --- %s seconds ---" % (time.time() - start_time))
|
{"hexsha": "90d4a0c3d32659fdf8443c0a1ccce0732e210241", "size": 12243, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/evaluation/inference_algorithms/baseline_2_cf.py", "max_stars_repo_name": "stebranchi/Incremental-predictive-monitoring-python3", "max_stars_repo_head_hexsha": "114b080df4afa0653ce03d8eb0059ceda096752b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/evaluation/inference_algorithms/baseline_2_cf.py", "max_issues_repo_name": "stebranchi/Incremental-predictive-monitoring-python3", "max_issues_repo_head_hexsha": "114b080df4afa0653ce03d8eb0059ceda096752b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/evaluation/inference_algorithms/baseline_2_cf.py", "max_forks_repo_name": "stebranchi/Incremental-predictive-monitoring-python3", "max_forks_repo_head_hexsha": "114b080df4afa0653ce03d8eb0059ceda096752b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.2007874016, "max_line_length": 121, "alphanum_fraction": 0.5197255575, "include": true, "reason": "import numpy", "num_tokens": 2075}
|
import numpy as np
import pandas as pd
from scipy import linalg
import scipy as sp
import matplotlib.pylab as plt
from scipy import sparse
try:
from firedrake import *
from firedrake.assemble import allocate_matrix, \
create_assembly_callable
except ImportError:
import_type = "fenics"
try:
from dolfin import *
except ImportError:
import_type = "firedrake"
def assemble_firedrake(dual_lin, bcs=[]):
matrix = allocate_matrix(dual_lin, bcs=bcs, mat_type="aij")
_assemble_form = create_assembly_callable(
dual_lin, tensor=matrix, bcs=bcs, mat_type="aij")
_assemble_form()
ai, aj, av = matrix.petscmat.getValuesCSR()
matrix_scipy = sp.sparse.csr_matrix((av, aj, ai))
return matrix_scipy
N = [4]
Re = 100.0
dual_min = []
dual_max = []
dual_eps_min = []
dual_eps_max = []
primal_min = []
primal_max = []
cell_num = []
for n in N:
mesh = UnitSquareMesh(n, n)
order = 1
H1 = VectorFunctionSpace(mesh, "CG", order + 1)
Hdiv = FunctionSpace(mesh, "BDM", order + 1)
P1 = FunctionSpace(mesh, "CG", order)
if import_type == "fenics":
sigma = TrialFunction(Hdiv)
tau = TestFunction(Hdiv)
u = TrialFunction(H1)
v = TestFunction(H1)
p = TrialFunction(P1)
q = TestFunction(P1)
p_ = TrialFunction(P1)
q_ = TestFunction(P1)
elif import_type == "firedrake":
W_stokes = H1 * P1
W_laplacian = Hdiv * P1
(sigma, p_) = TrialFunctions(W_laplacian)
(tau, q_) = TestFunctions(W_laplacian)
(u, p) = TrialFunctions(W_stokes)
(v, q) = TestFunctions(W_stokes)
eps = 1e-6
l = Re * inner(p, q) * dx
a = (1. / Re) * inner(grad(u), grad(v)) * dx
a_eps = ((1. / Re) * inner(grad(u), grad(v)) + eps * inner(u, v)) * dx
b = - q * div(u) * dx
l_ = Re * inner(p_, q_) * dx
c = div(sigma) * q_ * dx
p = (1. / Re) * (inner(sigma, tau) + inner(div(sigma), div(tau))) * dx
if import_type == "firedrake":
stokes_lin = a + b + l
stokes_eps_lin = a_eps + b + l
laplacian_lin = p + c + l_
bcs = \
[DirichletBC(W_stokes.sub(0), Constant((0, 0)), [1, 2, 3, 4])]
bcs_primal = \
[DirichletBC(H1, Constant((0, 0)), [1, 2, 3, 4])]
e_min_primal = np.inf
e_max_primal = -np.inf
e_min_dual = np.inf
e_max_dual = -np.inf
e_min_dual_eps = np.inf
e_max_dual_eps = -np.inf
if import_type == 'fenics':
for cell in cells(mesh):
C = assemble_local(c, cell)
P = assemble_local(p, cell)
L = assemble_local(l, cell)
S = np.matmul(C, np.linalg.solve(P, C.T))
e, _ = linalg.eig(S, L)
e = np.real(e)
e_min_dual = min(e) if min(e) < e_min_dual else e_min_dual
e_max_dual = max(e) if max(e) > e_max_dual else e_max_dual
A = assemble_local(a, cell)
B = assemble_local(b, cell)
S = A + np.matmul(B.T, np.linalg.solve(L, B))
e, _ = linalg.eig(S, A)
e = np.real(e)
e_min_primal = min(e) if min(e) < e_min_primal else e_min_primal
e_max_primal = max(e) if max(e) > e_max_primal else e_max_primal
A_eps = assemble_local(a_eps, cell)
S = np.matmul(B, np.linalg.solve(A_eps, B.T))
e, _ = linalg.eig(S, L)
e = np.real(e)
e_min_dual_eps = min(e) if min(
e) < e_min_dual_eps else e_min_dual_eps
e_max_dual_eps = max(e) if max(
e) > e_max_dual_eps else e_max_dual_eps
if import_type == 'firedrake':
A_laplacian = Tensor(laplacian_lin)
A = A_laplacian.blocks
dual_lin = A[1, 0] * A[0, 0].inv * A[1, 0].T
dual_ele = assemble_firedrake(dual_lin)
A_stokes_eps = Tensor(stokes_eps_lin)
A = A_stokes_eps.blocks
dual_lin = A[1, 0] * A[0, 0].inv * A[1, 0].T
dual_ele_eps = assemble_firedrake(dual_lin)
A_stokes = Tensor(stokes_eps_lin)
A = A_stokes.blocks
primal_lin = A[0, 0] + A[1, 0].T * A[1, 1].inv * A[1, 0]
primal_ele = assemble_firedrake(primal_lin, bcs=bcs_primal)
stokes = assemble_firedrake(stokes_lin, bcs=bcs)
n = H1.dim()
m = P1.dim()
print(m + n)
A = stokes[:n, :][:, :n].toarray()
B = stokes[n:n + m, :][:, :n].toarray()
L = stokes[n:n + m, :][:, n:n + m].toarray()
S_primal = A + np.matmul(B.T, np.linalg.solve(L, B))
S_dual = np.matmul(B, np.linalg.solve(A, B.T))
P_dual = sparse.block_diag([A, dual_ele]).toarray()
P_dual_eps = sparse.block_diag([A, dual_ele_eps]).toarray()
P_primal = sparse.block_diag([primal_ele, L]).toarray()
K = sparse.bmat([[A, B.T], [B, None]]).toarray()
fig = plt.figure()
e, _ = linalg.eig(K, P_dual)
e = np.sort(np.real(e))
plt.plot(e, "o", label="$\\mathcal{A}x = \\lambda \\mathcal{P}_1x$")
e, _ = linalg.eig(K, P_dual_eps)
e = np.sort(np.real(e))
plt.plot(e, "x", label="$\\mathcal{A}x = \\lambda \\mathcal{P}_2x$")
e, _ = linalg.eig(K, P_primal)
e = np.sort(np.real(e))
plt.plot(e, "+", label="$\\mathcal{A}x = \\lambda \\mathcal{P}_3x$")
plt.legend()
fig.savefig("stokes.png")
dual_min.append(e_min_dual)
dual_max.append(e_max_dual)
dual_eps_min.append(e_min_dual_eps)
dual_eps_max.append(e_max_dual_eps)
primal_min.append(e_min_primal)
primal_max.append(e_max_primal)
cell_num.append(mesh.num_cells())
data = {"# cells": cell_num,
"dual_min": dual_min,
"dual_max": dual_max,
"dual_eps_min": dual_eps_min,
"dual_eps_max": dual_eps_max,
"primal_min": primal_min,
"primal_max": primal_max}
table = pd.DataFrame.from_dict(data)
print(table.to_latex())
|
{"hexsha": "63740c9b892b391ae2fdcfb911069f5243b5d310", "size": 5926, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/Stokes/Stokes_infsup.py", "max_stars_repo_name": "ralna/ElementSchur", "max_stars_repo_head_hexsha": "840f111a10dc80ab2367222c4a5b257e6e37af8b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-23T15:48:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-23T15:48:02.000Z", "max_issues_repo_path": "examples/Stokes/Stokes_infsup.py", "max_issues_repo_name": "ralna/ElementSchur", "max_issues_repo_head_hexsha": "840f111a10dc80ab2367222c4a5b257e6e37af8b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/Stokes/Stokes_infsup.py", "max_forks_repo_name": "ralna/ElementSchur", "max_forks_repo_head_hexsha": "840f111a10dc80ab2367222c4a5b257e6e37af8b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2921348315, "max_line_length": 76, "alphanum_fraction": 0.5728990888, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1856}
|
import numpy as np
from lib.lif import LIF, ParamsLIF, LSM, ParamsLSM, LSM_const
n = 2 # Number of neurons
q = 100 # Number of LSM neurons
x_input = 2 # Constant input
alpha1 = 10 # Cost function params
alpha2 = 30 # Cost function params
tau_s = 0.020 # Time scale for output filter
mu = 1 # Threshold
p = 0.05 # Window size
t = 1 # Time for each epoch
N = 100 # Number of epochs
Wn = 20 # Number of W values we sweep over
wmax = 20 # Max W value of sweep
eta = 1 # Learning rate
#perturb_rate = 0.01 # Proportion of points that are perturbations
# # 1% = 10 Hz. Only half of these are spikes, so the injected noise rate is 5Hz
mvals = [0.0025, 0.005, 0.01, 0.015]
M = len(mvals)
# Filename for results
fn_out = './sweeps/learningbeta_fixedx_sweepw_banana_perturbation.npz'
#Cost function params
B1 = 2
B2 = 7
x = .05
y = 0.15
z = -0.2
params = ParamsLSM(q = q, p = 1, t = t)
lsm = LSM(params)
params_lif = ParamsLIF()
lif = LIF(params_lif, t = t)
t_filter = np.linspace(0, 0.15, 150)
exp_filter = np.exp(-t_filter/tau_s)
exp_filter = exp_filter/np.sum(exp_filter)
ds = exp_filter[0]
wvals = np.linspace(1, wmax, Wn)
beta_rd = np.zeros((Wn, Wn, n, N))
beta_rd_true = np.zeros((Wn, Wn, n, N))
beta_fd_true = np.zeros((Wn, Wn, n, N))
beta_sp = np.zeros((Wn, Wn, n, N, M))
for i, w0 in enumerate(wvals):
print("W0=%d"%w0)
for j, w1 in enumerate(wvals):
#init weights
lif.W = np.array([w0, w1])
V = np.ones((n,q))
U = np.ones((n,q,M))
#Also collect the c_abv, c_below for p = 0.03, p = 1, accumulated over each epoch, and estimate
#the 'true' beta as we go
c1_abv_p = np.zeros(0)
c1_abv_1 = np.zeros(0)
c1_blo_p = np.zeros(0)
c1_blo_1 = np.zeros(0)
c2_abv_p = np.zeros(0)
c2_abv_1 = np.zeros(0)
c2_blo_p = np.zeros(0)
c2_blo_1 = np.zeros(0)
count = 0
for idx in range(N):
#Simulate LSM
s_lsm = lsm.simulate(x_input)
#Simulate LIF
(v, h, _, _) = lif.simulate()
s1 = np.convolve(h[0,:], exp_filter)[0:h.shape[1]]
s2 = np.convolve(h[1,:], exp_filter)[0:h.shape[1]]
abvthr = np.zeros(n)
blothr = np.zeros(n)
cost = (B1*s1 - x)**2 + (z + B2*s2 - B2*(2*B1*s1 - y)**2)**2
ptb = 2*(np.random.rand(*h.shape) < 0.5)-1
#Create a perturbed set of trains
for idx2, perturb_rate in enumerate(mvals):
dU = np.zeros(U.shape[0:2])
qtb = np.random.rand(*h.shape) < perturb_rate
h_perturb = h.copy()
h_perturb[qtb == True] = ptb[qtb == True]
s1_perturb = np.convolve(h_perturb[0,:], exp_filter)[0:h.shape[1]]
s2_perturb = np.convolve(h_perturb[1,:], exp_filter)[0:h.shape[1]]
cost_perturbed = (B1*s1_perturb - x)**2 + (z + B2*s2_perturb - B2*(2*B1*s1_perturb - y)**2)**2
for t in range(v.shape[1]):
for k in range(n):
#If this timebin is a perturbation time then update U
if qtb[k,t]:
dU[k,:] = (np.dot(U[k,:,idx2], s_lsm[:,t])-ptb[k,t]*cost_perturbed[t])*s_lsm[:,t]
U[k,:,idx2] = U[k,:,idx2] - eta*dU[k,:]
s_lsm = lsm.simulate(x_input)
beta_sp[i,j,:,idx,idx2] = np.mean(np.dot(U[:,:,idx2], s_lsm[:,-100:]),1)
#cost = (alpha1*s1 + alpha2*s2 - x**2)**2
dV = np.zeros(V.shape)
bt = [False, False]
for t in range(v.shape[1]):
for k in range(n):
if (v[k,t] < mu):
if k == 0:
c1_blo_1 = np.hstack((c1_blo_1, cost[t]))
else:
c2_blo_1 = np.hstack((c2_blo_1, cost[t]))
if (v[k,t] >= mu):
if k == 0:
c1_abv_1 = np.hstack((c1_abv_1, cost[t]))
else:
c2_abv_1 = np.hstack((c2_abv_1, cost[t]))
if (v[k,t] > mu - p) & (v[k,t] < mu):
if k == 0:
c1_blo_p = np.hstack((c1_blo_p, cost[t]))
else:
c2_blo_p = np.hstack((c2_blo_p, cost[t]))
blothr[k] += 1
if bt[k] == False:
dV[k,:] += (np.dot(V[k,:], s_lsm[:,t])+cost[t])*s_lsm[:,t]
bt[k] = True
elif (v[k,t] < mu + p) & (v[k,t] >= mu):
if k == 0:
c1_abv_p = np.hstack((c1_abv_p, cost[t]))
else:
c2_abv_p = np.hstack((c2_abv_p, cost[t]))
abvthr[k] += 1
#Only do the update when firing...
if bt[k] == True:
dV[k,:] += (np.dot(V[k,:], s_lsm[:,t])-cost[t])*s_lsm[:,t]
count += 1
V[k,:] = V[k,:] - eta*dV[k,:]#*N/(N+1)
dV[k,:] = np.zeros((1,q))
bt[k] = False
beta_rd_true[i,j,0,idx] = np.mean(c1_abv_p)-np.mean(c1_blo_p)
beta_rd_true[i,j,1,idx] = np.mean(c2_abv_p)-np.mean(c2_blo_p)
beta_fd_true[i,j,0,idx] = np.mean(c1_abv_1)-np.mean(c1_blo_1)
beta_fd_true[i,j,1,idx] = np.mean(c2_abv_1)-np.mean(c2_blo_1)
s_lsm = lsm.simulate(x_input)
beta_rd[i,j,:,idx] = np.mean(np.dot(V, s_lsm[:,-100:]),1)
#Save the results
np.savez(fn_out, wvals = wvals, beta_rd = beta_rd, beta_rd_true = beta_rd_true, beta_fd_true = beta_fd_true,\
beta_sp = beta_sp)
|
{"hexsha": "070a883e0c2d6a2991026e0dd5ca3448d781e54b", "size": 6109, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/learningbeta_fixedx_sweepw_banana.py", "max_stars_repo_name": "benlansdell/deep-rdd", "max_stars_repo_head_hexsha": "2f1443aa9800d0e0f3a4ce9051c1b8b9ed8c2ae9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-01-21T18:21:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-30T16:31:12.000Z", "max_issues_repo_path": "scripts/learningbeta_fixedx_sweepw_banana.py", "max_issues_repo_name": "benlansdell/deep-rdd", "max_issues_repo_head_hexsha": "2f1443aa9800d0e0f3a4ce9051c1b8b9ed8c2ae9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/learningbeta_fixedx_sweepw_banana.py", "max_forks_repo_name": "benlansdell/deep-rdd", "max_forks_repo_head_hexsha": "2f1443aa9800d0e0f3a4ce9051c1b8b9ed8c2ae9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1907894737, "max_line_length": 110, "alphanum_fraction": 0.4624324767, "include": true, "reason": "import numpy", "num_tokens": 1858}
|
// Copyright (c) 2014-2015 DiMS dev-team
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "init.h"
#ifdef WIN32
#define MIN_CORE_FILEDESCRIPTORS 0
#else
#define MIN_CORE_FILEDESCRIPTORS 150
#endif
#if defined(HAVE_CONFIG_H)
#include "bitcoin-config.h"
#endif
#include "init.h"
#include "addrman.h"
#include "checkpoints.h"
#include "main.h"
#include "net.h"
#include "rpcserver.h"
#include "txdb.h"
#include "ui_interface.h"
#include "util.h"
#ifdef ENABLE_WALLET
#include "db.h"
#include "wallet.h"
#include "walletdb.h"
#endif
#include <inttypes.h>
#include <stdint.h>
#ifndef WIN32
#include <signal.h>
#endif
#include <boost/algorithm/string/predicate.hpp>
#include <boost/filesystem.hpp>
#include <boost/interprocess/sync/file_lock.hpp>
#include <openssl/crypto.h>
#include "common/actionHandler.h"
#include "common/manageNetwork.h"
#include "common/nodesManager.h"
#include "common/periodicActionExecutor.h"
#include "common/timeMedium.h"
#include "common/commandLine.h"
#include "common/originAddressScanner.h"
#include "common/authenticationProvider.h"
#include "common/noMediumHandling.h"
#include "monitor/processNetwork.h"
#include "monitor/controller.h"
#include "monitor/internalMediumProvider.h"
#include "monitor/server.h"
#include "monitor/clientRequestsManager.h"
#include "monitor/reputationTracer.h"
#include "monitor/registerRpcHooks.h"
#include "monitor/transactionRecordManager.h"
#include "monitor/copyStorageHandler.h"
#include "monitor/chargeRegister.h"
#ifdef ENABLE_WALLET
std::string strWalletFile;
CWallet* pwalletMain;
#endif
using namespace std;
using namespace boost;
namespace monitor
{
enum BindFlags {
BF_NONE = 0,
BF_EXPLICIT = (1U << 0),
BF_REPORT_ERROR = (1U << 1)
};
volatile extern bool fRequestShutdown;
void HandleSIGTERM(int)
{
fRequestShutdown = true;
}
void HandleSIGHUP(int)
{
fReopenDebugLog = true;
}
bool static InitError(const std::string &str)
{
uiInterface.ThreadSafeMessageBox(str, "", CClientUIInterface::MSG_ERROR | CClientUIInterface::NOSHOWGUI);
return false;
}
bool static InitWarning(const std::string &str)
{
uiInterface.ThreadSafeMessageBox(str, "", CClientUIInterface::MSG_WARNING | CClientUIInterface::NOSHOWGUI);
return true;
}
bool static Bind(const CService &addr, unsigned int flags) {
if (!(flags & BF_EXPLICIT) && IsLimited(addr))
return false;
std::string strError;
if (!BindListenPort(addr, strError)) {
if (flags & BF_REPORT_ERROR)
return InitError(strError);
return false;
}
return true;
}
/** Initialize bitcoin.
* @pre Parameters should be parsed and config file should be read.
*/
bool AppInit(boost::thread_group& threadGroup)
{
seed_insecure_rand();
// ********************************************************* Step 1: setup
#ifdef _MSC_VER
// Turn off Microsoft heap dump noise
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, 0));
#endif
#if _MSC_VER >= 1400
// Disable confusing "helpful" text message on abort, Ctrl-C
_set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);
#endif
#ifdef WIN32
// Enable Data Execution Prevention (DEP)
// Minimum supported OS versions: WinXP SP3, WinVista >= SP1, Win Server 2008
// A failure is non-critical and needs no further attention!
#ifndef PROCESS_DEP_ENABLE
// We define this here, because GCCs winbase.h limits this to _WIN32_WINNT >= 0x0601 (Windows 7),
// which is not correct. Can be removed, when GCCs winbase.h is fixed!
#define PROCESS_DEP_ENABLE 0x00000001
#endif
typedef BOOL (WINAPI *PSETPROCDEPPOL)(DWORD);
PSETPROCDEPPOL setProcDEPPol = (PSETPROCDEPPOL)GetProcAddress(GetModuleHandleA("Kernel32.dll"), "SetProcessDEPPolicy");
if (setProcDEPPol != NULL) setProcDEPPol(PROCESS_DEP_ENABLE);
// Initialize Windows Sockets
WSADATA wsadata;
int ret = WSAStartup(MAKEWORD(2,2), &wsadata);
if (ret != NO_ERROR || LOBYTE(wsadata.wVersion ) != 2 || HIBYTE(wsadata.wVersion) != 2)
{
return InitError(strprintf("Error: Winsock library failed to start (WSAStartup returned error %d)", ret));
}
#endif
#ifndef WIN32
umask(077);
// Clean shutdown on SIGTERM
struct sigaction sa;
sa.sa_handler = HandleSIGTERM;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sigaction(SIGTERM, &sa, NULL);
sigaction(SIGINT, &sa, NULL);
// Reopen debug.log on SIGHUP
struct sigaction sa_hup;
sa_hup.sa_handler = HandleSIGHUP;
sigemptyset(&sa_hup.sa_mask);
sa_hup.sa_flags = 0;
sigaction(SIGHUP, &sa_hup, NULL);
#if defined (__SVR4) && defined (__sun)
// ignore SIGPIPE on Solaris
signal(SIGPIPE, SIG_IGN);
#endif
#endif
// ********************************************************* Step 2: parameter interactions
if (mapArgs.count("-bind")) {
// when specifying an explicit binding address, you want to listen on it
// even when -connect or -proxy is specified
if (SoftSetBoolArg("-listen", true))
LogPrintf("AppInit2 : parameter interaction: -bind set -> setting -listen=1\n");
}
if (mapArgs.count("-connect") && mapMultiArgs["-connect"].size() > 0) {
// when only connecting to trusted nodes, do not seed via DNS, or listen by default
if (SoftSetBoolArg("-dnsseed", false))
LogPrintf("AppInit2 : parameter interaction: -connect set -> setting -dnsseed=0\n");
if (SoftSetBoolArg("-listen", false))
LogPrintf("AppInit2 : parameter interaction: -connect set -> setting -listen=0\n");
}
if (mapArgs.count("-proxy")) {
// to protect privacy, do not listen by default if a default proxy server is specified
if (SoftSetBoolArg("-listen", false))
LogPrintf("AppInit2 : parameter interaction: -proxy set -> setting -listen=0\n");
}
if (!GetBoolArg("-listen", true)) {
// do not map ports or try to retrieve public IP when not listening (pointless)
if (SoftSetBoolArg("-upnp", false))
LogPrintf("AppInit2 : parameter interaction: -listen=0 -> setting -upnp=0\n");
if (SoftSetBoolArg("-discover", false))
LogPrintf("AppInit2 : parameter interaction: -listen=0 -> setting -discover=0\n");
}
// Make sure enough file descriptors are available
int nBind = std::max((int)mapArgs.count("-bind"), 1);
nMaxConnections = GetArg("-maxconnections", 128);
nMaxConnections = std::max(std::min(nMaxConnections, (int)(FD_SETSIZE - nBind - MIN_CORE_FILEDESCRIPTORS)), 0);
int nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS);
if (nFD < MIN_CORE_FILEDESCRIPTORS)
return InitError(_("Not enough file descriptors available."));
if (nFD - MIN_CORE_FILEDESCRIPTORS < nMaxConnections)
nMaxConnections = nFD - MIN_CORE_FILEDESCRIPTORS;
// ********************************************************* Step 3: parameter-to-internal-flags
fDebug = !mapMultiArgs["-debug"].empty();
// Special-case: if -debug=0/-nodebug is set, turn off debugging messages
const vector<string>& categories = mapMultiArgs["-debug"];
if (GetBoolArg("-nodebug", false) || find(categories.begin(), categories.end(), string("0")) != categories.end())
fDebug = false;
// Check for -debugnet (deprecated)
if (GetBoolArg("-debugnet", false))
InitWarning(_("Warning: Deprecated argument -debugnet ignored, use -debug=net"));
fBenchmark = GetBoolArg("-benchmark", false);
mempool.setSanityCheck(GetBoolArg("-checkmempool", RegTest()));
Checkpoints::fEnabled = GetBoolArg("-checkpoints", true);
// -par=0 means autodetect, but nScriptCheckThreads==0 means no concurrency
nScriptCheckThreads = GetArg("-par", 0);
if (nScriptCheckThreads <= 0)
nScriptCheckThreads += boost::thread::hardware_concurrency();
if (nScriptCheckThreads <= 1)
nScriptCheckThreads = 0;
else if (nScriptCheckThreads > MAX_SCRIPTCHECK_THREADS)
nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS;
fServer = GetBoolArg("-server", true);
fPrintToConsole = GetBoolArg("-printtoconsole", false);
fLogTimestamps = GetBoolArg("-logtimestamps", true);
#ifdef ENABLE_WALLET
bool fDisableWallet = GetBoolArg("-disablewallet", false);
#endif
if (mapArgs.count("-timeout"))
{
int nNewTimeout = GetArg("-timeout", 5000);
if (nNewTimeout > 0 && nNewTimeout < 600000)
nConnectTimeout = nNewTimeout;
}
// Continue to put "/P2SH/" in the coinbase to monitor
// BIP16 support.
// This can be removed eventually...
const char* pszP2SH = "/P2SH/";
COINBASE_FLAGS << std::vector<unsigned char>(pszP2SH, pszP2SH+strlen(pszP2SH));
#ifdef ENABLE_WALLET
if (mapArgs.count("-paytxfee"))
{
if (!ParseMoney(mapArgs["-paytxfee"], nTransactionFee))
return InitError(strprintf(_("Invalid amount for -paytxfee=<amount>: '%s'"), mapArgs["-paytxfee"]));
if (nTransactionFee > 0.25 * COIN)
InitWarning(_("Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction."));
}
bSpendZeroConfChange = GetArg("-spendzeroconfchange", true);
strWalletFile = GetArg("-wallet", "wallet.dat");
#endif
// ********************************************************* Step 4: application initialization: dir lock, daemonize, pidfile, debug log
std::string strDataDir = GetDataDir( common::AppType::Monitor ).string();
#ifdef ENABLE_WALLET
// Wallet file must be a plain filename without a directory
if (strWalletFile != boost::filesystem::basename(strWalletFile) + boost::filesystem::extension(strWalletFile))
return InitError(strprintf(_("Wallet %s resides outside data directory %s"), strWalletFile, strDataDir));
#endif
boost::filesystem::path pathLockFile = GetDataDir(common::AppType::Monitor) / ".lock";
FILE* file = fopen(pathLockFile.string().c_str(), "a"); // empty lock file; created if it doesn't exist.
if (file) fclose(file);
static boost::interprocess::file_lock lock(pathLockFile.string().c_str());
if (!lock.try_lock())
return InitError(strprintf(_("Cannot obtain a lock on data directory %s. Bitcoin is probably already running."), strDataDir));
if (GetBoolArg("-shrinkdebugfile", !fDebug))
ShrinkDebugFile();
LogPrintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
LogPrintf("Bitcoin version %s (%s)\n", FormatFullVersion(), CLIENT_DATE);
LogPrintf("Using OpenSSL version %s\n", SSLeay_version(SSLEAY_VERSION));
if (!fLogTimestamps)
LogPrintf("Startup time: %s\n", DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime()));
LogPrintf("Default data directory %s\n", GetDefaultDataDir(common::AppType::Monitor).string());
LogPrintf("Using data directory %s\n", strDataDir);
LogPrintf("Using at most %i connections (%i file descriptors available)\n", nMaxConnections, nFD);
std::ostringstream strErrors;
int64_t nStart;
// ********************************************************* Step 5: verify wallet database integrity
#ifdef ENABLE_WALLET
if (!fDisableWallet) {
uiInterface.InitMessage(_("Verifying wallet..."));
if (!bitdb.Open(GetDataDir(common::AppType::Monitor)))
{
// try moving the database env out of the way
boost::filesystem::path pathDatabase = GetDataDir(common::AppType::Monitor) / "database";
boost::filesystem::path pathDatabaseBak = GetDataDir(common::AppType::Monitor) / strprintf("database.%"PRId64".bak", GetTime());
try {
boost::filesystem::rename(pathDatabase, pathDatabaseBak);
LogPrintf("Moved old %s to %s. Retrying.\n", pathDatabase.string(), pathDatabaseBak.string());
} catch(boost::filesystem::filesystem_error &error) {
// failure is ok (well, not really, but it's not worse than what we started with)
}
// try again
if (!bitdb.Open(GetDataDir(common::AppType::Monitor))) {
// if it still fails, it probably means we can't even create the database env
string msg = strprintf(_("Error initializing wallet database environment %s!"), strDataDir);
return InitError(msg);
}
}
if (GetBoolArg("-salvagewallet", false))
{
// Recover readable keypairs:
if (!CWalletDB::Recover(bitdb, strWalletFile, true))
return false;
}
if (filesystem::exists(GetDataDir(common::AppType::Monitor) / strWalletFile))
{
CDBEnv::VerifyResult r = bitdb.Verify(strWalletFile, CWalletDB::Recover);
if (r == CDBEnv::RECOVER_OK)
{
string msg = strprintf(_("Warning: wallet.dat corrupt, data salvaged!"
" Original wallet.dat saved as wallet.{timestamp}.bak in %s; if"
" your balance or transactions are incorrect you should"
" restore from a backup."), strDataDir);
InitWarning(msg);
}
if (r == CDBEnv::RECOVER_FAIL)
return InitError(_("wallet.dat corrupt, salvage failed"));
}
} // (!fDisableWallet)
#endif // ENABLE_WALLET
// ********************************************************* Step 6: network initialization
RegisterNodeSignals(GetNodeSignals());
int nSocksVersion = GetArg("-socks", 5);
if (nSocksVersion != 4 && nSocksVersion != 5)
return InitError(strprintf(_("Unknown -socks proxy version requested: %i"), nSocksVersion));
if (mapArgs.count("-onlynet")) {
std::set<enum Network> nets;
BOOST_FOREACH(std::string snet, mapMultiArgs["-onlynet"]) {
enum Network net = ParseNetwork(snet);
if (net == NET_UNROUTABLE)
return InitError(strprintf(_("Unknown network specified in -onlynet: '%s'"), snet));
nets.insert(net);
}
for (int n = 0; n < NET_MAX; n++) {
enum Network net = (enum Network)n;
if (!nets.count(net))
SetLimited(net);
}
}
#if defined(USE_IPV6)
#if ! USE_IPV6
else
SetLimited(NET_IPV6);
#endif
#endif
CService addrProxy;
bool fProxy = false;
if (mapArgs.count("-proxy")) {
addrProxy = CService(mapArgs["-proxy"], 9050);
if (!addrProxy.IsValid())
return InitError(strprintf(_("Invalid -proxy address: '%s'"), mapArgs["-proxy"]));
if (!IsLimited(NET_IPV4))
SetProxy(NET_IPV4, addrProxy, nSocksVersion);
if (nSocksVersion > 4) {
#ifdef USE_IPV6
if (!IsLimited(NET_IPV6))
SetProxy(NET_IPV6, addrProxy, nSocksVersion);
#endif
SetNameProxy(addrProxy, nSocksVersion);
}
fProxy = true;
}
// -onion can override normal proxy, -noonion disables tor entirely
// -tor here is a temporary backwards compatibility measure
if (mapArgs.count("-tor"))
printf("Notice: option -tor has been replaced with -onion and will be removed in a later version.\n");
if (!(mapArgs.count("-onion") && mapArgs["-onion"] == "0") &&
!(mapArgs.count("-tor") && mapArgs["-tor"] == "0") &&
(fProxy || mapArgs.count("-onion") || mapArgs.count("-tor"))) {
CService addrOnion;
if (!mapArgs.count("-onion") && !mapArgs.count("-tor"))
addrOnion = addrProxy;
else
addrOnion = mapArgs.count("-onion")?CService(mapArgs["-onion"], 9050):CService(mapArgs["-tor"], 9050);
if (!addrOnion.IsValid())
return InitError(strprintf(_("Invalid -onion address: '%s'"), mapArgs.count("-onion")?mapArgs["-onion"]:mapArgs["-tor"]));
SetProxy(NET_TOR, addrOnion, 5);
SetReachable(NET_TOR);
}
// see Step 2: parameter interactions for more information about these
fNoListen = !GetBoolArg("-listen", true);
fDiscover = GetBoolArg("-discover", true);
fNameLookup = GetBoolArg("-dns", true);
bool fBound = false;
if (!fNoListen) {
if (mapArgs.count("-bind")) {
BOOST_FOREACH(std::string strBind, mapMultiArgs["-bind"]) {
CService addrBind;
if (!Lookup(strBind.c_str(), addrBind, GetListenPort<CChainParams>(), false))
return InitError(strprintf(_("Cannot resolve -bind address: '%s'"), strBind));
fBound |= Bind(addrBind, (BF_EXPLICIT | BF_REPORT_ERROR));
}
}
else {
struct in_addr inaddr_any;
inaddr_any.s_addr = INADDR_ANY;
#ifdef USE_IPV6
fBound |= Bind(CService(in6addr_any, GetListenPort<CChainParams>()), BF_NONE);
#endif
fBound |= Bind(CService(inaddr_any, GetListenPort<CChainParams>()), !fBound ? BF_REPORT_ERROR : BF_NONE);
}
if (!fBound)
return InitError(_("Failed to listen on any port. Use -listen=0 if you want this."));
}
if (mapArgs.count("-externalip")) {
BOOST_FOREACH(string strAddr, mapMultiArgs["-externalip"]) {
CService addrLocal(strAddr, GetListenPort<CChainParams>(), fNameLookup);
if (!addrLocal.IsValid())
return InitError(strprintf(_("Cannot resolve -externalip address: '%s'"), strAddr));
AddLocal(CService(strAddr, GetListenPort<CChainParams>(), fNameLookup), LOCAL_MANUAL);
}
}
BOOST_FOREACH(string strDest, mapMultiArgs["-seednode"])
AddOneShot(strDest);
// ********************************************************* Step 7: load block chain
fReindex = GetBoolArg("-reindex", false);
// Upgrading to 0.8; hard-link the old blknnnn.dat files into /blocks/
filesystem::path blocksDir = GetDataDir(common::AppType::Monitor) / "blocks";
if (!filesystem::exists(blocksDir))
{
filesystem::create_directories(blocksDir);
bool linked = false;
for (unsigned int i = 1; i < 10000; i++) {
filesystem::path source = GetDataDir(common::AppType::Monitor) / strprintf("blk%04u.dat", i);
if (!filesystem::exists(source)) break;
filesystem::path dest = blocksDir / strprintf("blk%05u.dat", i-1);
try {
filesystem::create_hard_link(source, dest);
LogPrintf("Hardlinked %s -> %s\n", source.string(), dest.string());
linked = true;
} catch (filesystem::filesystem_error & e) {
// Note: hardlink creation failing is not a disaster, it just means
// blocks will get re-downloaded from peers.
LogPrintf("Error hardlinking blk%04u.dat : %s\n", i, e.what());
break;
}
}
if (linked)
{
fReindex = true;
}
}
bool fLoaded = false;
while (!fLoaded) {
bool fReset = fReindex;
std::string strLoadError;
uiInterface.InitMessage(_("Loading block index..."));
nStart = GetTimeMillis();
do {
try {
UnloadBlockIndex();
/*
if (!LoadBlockIndex()) {
strLoadError = _("Error loading block database");
break;
}
*/
// If the loaded chain has a wrong genesis, bail out immediately
// (we're likely using a testnet datadir, or the other way around).
if (!mapBlockIndex.empty() && chainActive.Genesis() == NULL)
return InitError(_("Incorrect or no genesis block found. Wrong datadir for network?"));
// Initialize the block index (no-op if non-empty database was already loaded)
if (!InitBlockIndex()) {
strLoadError = _("Error initializing block database");
break;
}
// Check for changed -txindex state
if (fTxIndex != GetBoolArg("-txindex", false)) {
strLoadError = _("You need to rebuild the database using -reindex to change -txindex");
break;
}
uiInterface.InitMessage(_("Verifying blocks..."));
if (!VerifyDB(GetArg("-checklevel", 3),
GetArg("-checkblocks", 288))) {
strLoadError = _("Corrupted block database detected");
break;
}
} catch(std::exception &e) {
if (fDebug) LogPrintf("%s\n", e.what());
strLoadError = _("Error opening block database");
break;
}
fLoaded = true;
} while(false);
if (!fLoaded) {
// first suggest a reindex
if (!fReset) {
bool fRet = uiInterface.ThreadSafeMessageBox(
strLoadError + ".\n\n" + _("Do you want to rebuild the block database now?"),
"", CClientUIInterface::MSG_ERROR | CClientUIInterface::BTN_ABORT);
if (fRet) {
fReindex = true;
fRequestShutdown = false;
} else {
LogPrintf("Aborted block database rebuild. Exiting.\n");
return false;
}
} else {
return InitError(strLoadError);
}
}
}
// As LoadBlockIndex can take several minutes, it's possible the user
// requested to kill the GUI during the last operation. If so, exit.
// As the program has not fully started yet, Shutdown() is possibly overkill.
if (fRequestShutdown)
{
LogPrintf("Shutdown requested. Exiting.\n");
return false;
}
// ********************************************************* Step 8: load wallet
#ifdef ENABLE_WALLET
if (fDisableWallet) {
pwalletMain = NULL;
LogPrintf("Wallet disabled!\n");
} else {
if (GetBoolArg("-zapwallettxes", false)) {
uiInterface.InitMessage(_("Zapping all transactions from wallet..."));
pwalletMain = CWallet::getInstance(strWalletFile);
DBErrors nZapWalletRet = pwalletMain->ZapWalletTx();
if (nZapWalletRet != DB_LOAD_OK) {
uiInterface.InitMessage(_("Error loading wallet.dat: Wallet corrupted"));
return false;
}
delete pwalletMain;
pwalletMain = NULL;
}
uiInterface.InitMessage(_("Loading wallet..."));
nStart = GetTimeMillis();
bool fFirstRun = true;
pwalletMain = CWallet::getInstance(strWalletFile);
DBErrors nLoadWalletRet = pwalletMain->LoadWallet(fFirstRun);
if (nLoadWalletRet != DB_LOAD_OK)
{
if (nLoadWalletRet == DB_CORRUPT)
strErrors << _("Error loading wallet.dat: Wallet corrupted") << "\n";
else if (nLoadWalletRet == DB_NONCRITICAL_ERROR)
{
string msg(_("Warning: error reading wallet.dat! All keys read correctly, but transaction data"
" or address book entries might be missing or incorrect."));
InitWarning(msg);
}
else if (nLoadWalletRet == DB_TOO_NEW)
strErrors << _("Error loading wallet.dat: Wallet requires newer version of Bitcoin") << "\n";
else if (nLoadWalletRet == DB_NEED_REWRITE)
{
strErrors << _("Wallet needed to be rewritten: restart Bitcoin to complete") << "\n";
LogPrintf("%s", strErrors.str());
return InitError(strErrors.str());
}
else
strErrors << _("Error loading wallet.dat") << "\n";
}
if (GetBoolArg("-upgradewallet", fFirstRun))
{
int nMaxVersion = GetArg("-upgradewallet", 0);
if (nMaxVersion == 0) // the -upgradewallet without argument case
{
LogPrintf("Performing wallet upgrade to %i\n", FEATURE_LATEST);
nMaxVersion = CLIENT_VERSION;
pwalletMain->SetMinVersion(FEATURE_LATEST); // permanently upgrade the wallet immediately
}
else
LogPrintf("Allowing wallet upgrade up to %i\n", nMaxVersion);
if (nMaxVersion < pwalletMain->GetVersion())
strErrors << _("Cannot downgrade wallet") << "\n";
pwalletMain->SetMaxVersion(nMaxVersion);
}
if (fFirstRun)
{
// Create new keyUser and set as default key
RandAddSeedPerfmon();
CPubKey newDefaultKey;
if (pwalletMain->GetKeyFromPool(newDefaultKey)) {
pwalletMain->SetDefaultKey(newDefaultKey);
if (!pwalletMain->SetAddressBook(pwalletMain->vchDefaultKey.GetID(), "", "receive"))
strErrors << _("Cannot write default address") << "\n";
}
}
LogPrintf("%s", strErrors.str());
LogPrintf(" wallet %15"PRId64"ms\n", GetTimeMillis() - nStart);
RegisterWallet(pwalletMain);
} // (!fDisableWallet)
#else // ENABLE_WALLET
LogPrintf("No wallet compiled in!\n");
#endif // !ENABLE_WALLET
// ********************************************************* Step 9: import blocks
common::COriginAddressScanner::getInstance()->setStorage( monitor::CTransactionRecordManager::getInstance() );
/* create threads of action handler */
threadGroup.create_thread( boost::bind( &common::COriginAddressScanner::loop, common::COriginAddressScanner::getInstance() ) );
threadGroup.create_thread( boost::bind( &monitor::CReputationTracker::loop, monitor::CReputationTracker::getInstance() ) );
threadGroup.create_thread( boost::bind( &common::CSegmentFileStorage::flushLoop, common::CSegmentFileStorage::getInstance() ) );
threadGroup.create_thread( boost::bind( &common::CActionHandler::loop, common::CActionHandler::getInstance() ) );
threadGroup.create_thread( boost::bind( &common::CTimeMedium::workLoop, common::CTimeMedium::getInstance() ) );
threadGroup.create_thread( boost::bind( &monitor::CClientRequestsManager::processRequestLoop, monitor::CClientRequestsManager::getInstance() ) );
threadGroup.create_thread( boost::bind( &common::CCommandLine::workLoop, common::CCommandLine::getInstance() ) );
threadGroup.create_thread( boost::bind( &CCopyStorageHandler::loop, CCopyStorageHandler::getInstance() ) );
threadGroup.create_thread( boost::bind( &CChargeRegister::loop, CChargeRegister::getInstance() ) );
common::CActionHandler::getInstance()->addConnectionProvider( (common::CConnectionProvider*)monitor::CInternalMediumProvider::getInstance() );
common::CActionHandler::getInstance()->addConnectionProvider( (common::CConnectionProvider*)monitor::CReputationTracker::getInstance() );
common::CActionHandler::getInstance()->addConnectionProvider( (common::CConnectionProvider*)common::CErrorMediumProvider::getInstance() );
common::CManageNetwork::getInstance()->registerNodeSignals( CProcessNetwork::getInstance() );
common::CManageNetwork::getInstance()->connectToNetwork( threadGroup );
common::CPeriodicActionExecutor * periodicActionExecutor
= common::CPeriodicActionExecutor::getInstance();
threadGroup.create_thread(boost::bind(&common::CPeriodicActionExecutor::processingLoop, periodicActionExecutor ));
monitor::CInternalMediumProvider::getInstance()->registerRemoveCallback( GetNodeSignals() );
// ********************************************************* Step 10: load peers
CController::getInstance();
CWallet::getInstance()->AddKeyPubKey(
common::CAuthenticationProvider::getInstance()->getMyPrivKey()
, common::CAuthenticationProvider::getInstance()->getMyKey());
nStart = GetTimeMillis();
{
CAddrDB adb;
if (!adb.Read(addrman))
LogPrintf("Invalid or missing peers.dat; recreating\n");
}
LogPrintf("Loaded %i addresses from peers.dat %"PRId64"ms\n",
addrman.size(), GetTimeMillis() - nStart);
// ********************************************************* Step 11: start node
if (!CheckDiskSpace())
return false;
if (!strErrors.str().empty())
return InitError(strErrors.str());
RandAddSeedPerfmon();
//// debug print
LogPrintf("mapBlockIndex.size() = %"PRIszu"\n", mapBlockIndex.size());
LogPrintf("nBestHeight = %d\n", chainActive.Height());
#ifdef ENABLE_WALLET
LogPrintf("setKeyPool.size() = %"PRIszu"\n", pwalletMain ? pwalletMain->setKeyPool.size() : 0);
LogPrintf("mapAddressBook.size() = %"PRIszu"\n", pwalletMain ? pwalletMain->mapAddressBook.size() : 0);
#endif
// InitRPCMining is needed here so getwork/getblocktemplate in the GUI debug console works properly.
// InitRPCMining();
m_setTransaction.connect( boost::bind( &monitor::CInternalMediumProvider::setTransaction, monitor::CInternalMediumProvider::getInstance(), _1, _2 ) );
m_setMerkleBlock.connect( boost::bind( &monitor::CInternalMediumProvider::setMerkleBlock, monitor::CInternalMediumProvider::getInstance(), _1, _2 ) );
if (fServer)
StartRPCThreads();
StartNode(threadGroup);
StopHook.connect( &StartShutdown );
monitor::registerHooks();
monitor::runServer();
// ********************************************************* Step 12: finished
uiInterface.InitMessage(_("Done loading"));
#ifdef ENABLE_WALLET
if (pwalletMain) {
// Run a thread to flush wallet periodically
threadGroup.create_thread(boost::bind(&ThreadFlushWalletDB, boost::ref(pwalletMain->strWalletFile)));
}
#endif
return !fRequestShutdown;
}
void Shutdown()
{
LogPrintf("Shutdown : In progress...\n");
static CCriticalSection cs_Shutdown;
TRY_LOCK(cs_Shutdown, lockShutdown);
if (!lockShutdown) return;
RenameThread("bitcoin-shutoff");
#ifdef ENABLE_WALLET
if (pwalletMain)
bitdb.Flush(false);
#endif
StopNode();
UnregisterNodeSignals(GetNodeSignals());
{
LOCK(cs_main);
}
#ifdef ENABLE_WALLET
if (pwalletMain)
bitdb.Flush(true);
#endif
boost::filesystem::remove(GetPidFile());
UnregisterAllWallets();
#ifdef ENABLE_WALLET
if (pwalletMain)
delete pwalletMain;
#endif
LogPrintf("Shutdown : done\n");
}
}
|
{"hexsha": "c5475e9830b14a8edfd77f561692196f5187b866", "size": 27471, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/monitor/init.cpp", "max_stars_repo_name": "salarii/dims", "max_stars_repo_head_hexsha": "b8008c49edd10a9ca50923b89e3b469c342d9cee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2015-01-22T11:22:19.000Z", "max_stars_repo_stars_event_max_datetime": "2015-01-22T11:22:19.000Z", "max_issues_repo_path": "src/monitor/init.cpp", "max_issues_repo_name": "salivan-ratcoin-dev-team/dims", "max_issues_repo_head_hexsha": "b8008c49edd10a9ca50923b89e3b469c342d9cee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/monitor/init.cpp", "max_forks_repo_name": "salivan-ratcoin-dev-team/dims", "max_forks_repo_head_hexsha": "b8008c49edd10a9ca50923b89e3b469c342d9cee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5547169811, "max_line_length": 151, "alphanum_fraction": 0.6936405664, "num_tokens": 7215}
|
"""
This code is modified from Hengyuan Hu's repository.
https://github.com/hengyuan-hu/bottom-up-attention-vqa
Reads in a tsv file with pre-trained bottom up attention features and
stores it in HDF5 format. Also store {image_id: feature_idx}
as a pickle file.
Hierarchy of HDF5 file:
{ 'image_features': num_images x num_boxes x 2048 array of features
'image_bb': num_images x num_boxes x 4 array of bounding boxes }
"""
from __future__ import print_function
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import base64
import csv
import h5py
import _pickle as cPickle
import numpy as np
import utils
target = 'test2015'
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
infile = 'data/%s_36/%s_resnet101_faster_rcnn_genome_36.tsv' % (target, target)
data_file = 'data/%s36.hdf5' % target
indices_file = 'data/%s36_imgid2idx.pkl' % target
ids_file = 'data/%s_ids.pkl' % target
feature_length = 2048
num_fixed_boxes = 36
if __name__ == '__main__':
h = h5py.File(data_file, "w")
if os.path.exists(ids_file):
imgids = cPickle.load(open(ids_file, 'rb'))
else:
imgids = utils.load_imageid('data/%s' % target)
cPickle.dump(imgids, open(ids_file, 'wb'))
indices = {}
img_bb = h.create_dataset(
'image_bb', (len(imgids), num_fixed_boxes, 4), 'f')
img_features = h.create_dataset(
'image_features', (len(imgids), num_fixed_boxes, feature_length), 'f')
spatial_img_features = h.create_dataset(
'spatial_features', (len(imgids), num_fixed_boxes, 6), 'f')
counter = 0
print("reading tsv...")
with open(infile, "r+") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
item['num_boxes'] = int(item['num_boxes'])
item['boxes'] = bytes(item['boxes'], 'utf')
item['features'] = bytes(item['features'], 'utf')
image_id = int(item['image_id'])
image_w = float(item['image_w'])
image_h = float(item['image_h'])
bboxes = np.frombuffer(
base64.decodestring(item['boxes']),
dtype=np.float32).reshape((item['num_boxes'], -1))
box_width = bboxes[:, 2] - bboxes[:, 0]
box_height = bboxes[:, 3] - bboxes[:, 1]
scaled_width = box_width / image_w
scaled_height = box_height / image_h
scaled_x = bboxes[:, 0] / image_w
scaled_y = bboxes[:, 1] / image_h
box_width = box_width[..., np.newaxis]
box_height = box_height[..., np.newaxis]
scaled_width = scaled_width[..., np.newaxis]
scaled_height = scaled_height[..., np.newaxis]
scaled_x = scaled_x[..., np.newaxis]
scaled_y = scaled_y[..., np.newaxis]
spatial_features = np.concatenate(
(scaled_x,
scaled_y,
scaled_x + scaled_width,
scaled_y + scaled_height,
scaled_width,
scaled_height),
axis=1)
if image_id in imgids:
imgids.remove(image_id)
indices[image_id] = counter
img_bb[counter, :, :] = bboxes
img_features[counter, :, :] = np.frombuffer(
base64.decodestring(item['features']),
dtype=np.float32).reshape((item['num_boxes'], -1))
spatial_img_features[counter, :, :] = spatial_features
counter += 1
else:
assert False, 'Unknown image id: %d' % image_id
if len(imgids) != 0:
print('Warning: image_ids is not empty')
cPickle.dump(indices, open(indices_file, 'wb'))
h.close()
print("done!")
|
{"hexsha": "c668783dad78c64f8cfe41eb15a5879ecf5a33f1", "size": 3910, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/detection_features_converter_target.py", "max_stars_repo_name": "Shaobo-Xu/ban-vqa", "max_stars_repo_head_hexsha": "9b2f2a2acc91c542b80b756aed23fbb380e69f3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 505, "max_stars_repo_stars_event_min_datetime": "2018-06-16T07:02:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T06:07:03.000Z", "max_issues_repo_path": "tools/detection_features_converter_target.py", "max_issues_repo_name": "Shaobo-Xu/ban-vqa", "max_issues_repo_head_hexsha": "9b2f2a2acc91c542b80b756aed23fbb380e69f3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 42, "max_issues_repo_issues_event_min_datetime": "2018-06-19T12:15:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T08:29:02.000Z", "max_forks_repo_path": "tools/detection_features_converter_target.py", "max_forks_repo_name": "Shaobo-Xu/ban-vqa", "max_forks_repo_head_hexsha": "9b2f2a2acc91c542b80b756aed23fbb380e69f3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 110, "max_forks_repo_forks_event_min_datetime": "2018-06-18T18:38:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T05:21:21.000Z", "avg_line_length": 33.7068965517, "max_line_length": 83, "alphanum_fraction": 0.5982097187, "include": true, "reason": "import numpy", "num_tokens": 966}
|
import collections
import itertools
import json
import os
import shutil
from copy import deepcopy
import click
import joblib
import numpy as np
import pandas as pd
import torch
from ceem import logger, utils
from ceem.dynamics import *
from ceem.learner import *
from ceem.opt_criteria import *
from ceem.ceem import CEEM
from ceem.smoother import *
from ceem.systems import LorenzSystem, default_lorenz_system
from ceem.particleem import *
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
opj = os.path.join
@click.command()
@click.option('--sys-seed', default=4, type=int)
@click.option('--num-seeds', default=4, type=int)
@click.option('--logdir', default='./data/lorenz/convergence_experiment/pem', type=click.Path())
def run(sys_seed, num_seeds, logdir):
if os.path.exists(logdir):
print('Directory exists. Press d to delete.')
action = None
while not action:
action = input().lower().strip()
act = action
if act == 'd':
shutil.rmtree(logdir)
else:
quit()
os.mkdir(logdir)
k = 6
Brange = [8] #, 4, 2]
parallel = True
if parallel:
joblib.Parallel(n_jobs=10)(joblib.delayed(train)(seed, opj(logdir, f'k={k}_B={B}_seed={seed}'),
sys_seed, k=k, b=B)
for seed, B in itertools.product(range(42, 42 + num_seeds), Brange))
else:
for B in Brange:
for seed in range(42, 42 + num_seeds):
logdir_ = opj(logdir, 'k=%d_B=%d_seed=%d' % (k, B, seed))
train(seed, logdir_, sys_seed, k, B)
def plot3d(ax, x, y, z, **kwargs):
ax.plot(x.detach().numpy(), y.detach().numpy(), z.detach().numpy(), **kwargs)
def train(seed, logdir, sys_seed, k, b):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
ystd = 0.01
# ystd = 0.
torch.set_default_dtype(torch.float64)
logger.setup(logdir, action='d')
N = 128
n = 3 * k
B = b
true_system = default_lorenz_system(k, obsdif=2)
utils.set_rng_seed(sys_seed)
xdim = true_system.xdim
ydim = true_system.ydim
dt = true_system._dt
x0mean = torch.tensor([[-6] * k + [-6] * k + [24.] * k]).unsqueeze(0)
# simulate true_dynamics over IC distribution
x_test = x0mean.repeat(1024, 1, 1)
x_test += 5.0 * torch.randn_like(x_test)
x_test = x_test.detach()
t_test = torch.zeros(1024, 1)
tgt_test = true_system.step_derivs(t_test, x_test).detach()
Q = (0.01 ** 2) * torch.eye(true_system.xdim)
R = (ystd ** 2) * torch.eye(true_system.ydim)
Px0 = 2.5**2 * torch.eye(true_system.xdim)
## simulate the true system
xs = [x0mean.repeat(B, 1, 1)]
xs[0] += 2.5 * torch.randn_like(xs[0])
with torch.no_grad():
for t in range(N - 1):
xs.append(true_system.step(torch.tensor([0.] * B), xs[-1]))
xs = torch.cat(xs, dim=1)
fig = plt.figure()
for b in range(B):
ax = fig.add_subplot(int(np.ceil(B / 2.)), 2, b + 1, projection='3d')
for k_ in range(k):
plot3d(plt.gca(), xs[b, :, k_], xs[b, :, k + k_], xs[b, :, 2 * k + k_], linestyle='--',
alpha=0.5)
plt.savefig(os.path.join(logger.get_dir(), 'figs/traj_%d.png' % b), dpi=300)
# plt.show()
plt.close()
t = torch.tensor(range(N)).unsqueeze(0).expand(B, -1).to(torch.get_default_dtype())
y = true_system.observe(t, xs).detach()
# seed for real now
utils.set_rng_seed(seed)
y += ystd * torch.randn_like(y)
# prep system
system = deepcopy(true_system)
true_params = parameters_to_vector(true_system.parameters())
utils.set_rng_seed(seed)
params = true_params * ((torch.rand_like(true_params) - 0.5) / 5. + 1.) # within 10%
vector_to_parameters(params, system.parameters())
params = list(system.parameters())
Np = 100
fapf = faPF(Np, system, Q, R, Px0)
timer = {'start_time':timeit.default_timer()}
def ecb(epoch):
logger.logkv('time/epoch', epoch)
params = list(system.parameters())
vparams = parameters_to_vector(params)
error = (vparams - true_params).norm().item()
logger.logkv('test/log10_paramerror', np.log10(error))
logger.logkv('time/epochtime', timeit.default_timer() - timer['start_time'])
timer['start_time'] = timeit.default_timer()
with torch.no_grad():
tgt_test_pr = system.step_derivs(t_test, x_test)
error = float(torch.nn.functional.mse_loss(tgt_test_pr, tgt_test))
logger.logkv('test/log10_error', np.log10(error))
return
epoch_callbacks = [ecb]
ecb(-1)
logger.dumpkvs()
trainer = SAEMTrainer(fapf, y,
# gamma_sched=lambda x: HarmonicDecayScheduler(x, a=50.),
gamma_sched=lambda x: 0.2,
max_k=5000,
xlen_cutoff = 15,
)
trainer.train(params, callbacks=epoch_callbacks)
return
if __name__ == '__main__':
run()
|
{"hexsha": "6f16fb2439b55242329646607ebece817d18c27f", "size": 5089, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/lorenz/convergence_experiment_pem.py", "max_stars_repo_name": "sisl/CEEM", "max_stars_repo_head_hexsha": "6154587fe3cdb92e8b7f70eedb1262caa1553cc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-06-21T16:50:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-14T04:02:01.000Z", "max_issues_repo_path": "experiments/lorenz/convergence_experiment_pem.py", "max_issues_repo_name": "sisl/CEEM", "max_issues_repo_head_hexsha": "6154587fe3cdb92e8b7f70eedb1262caa1553cc8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-13T07:46:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-16T05:14:47.000Z", "max_forks_repo_path": "experiments/lorenz/convergence_experiment_pem.py", "max_forks_repo_name": "sisl/CEEM", "max_forks_repo_head_hexsha": "6154587fe3cdb92e8b7f70eedb1262caa1553cc8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-30T12:08:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-30T12:08:20.000Z", "avg_line_length": 25.3184079602, "max_line_length": 103, "alphanum_fraction": 0.6077814895, "include": true, "reason": "import numpy", "num_tokens": 1439}
|
/*
// Copyright (c) 2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
*/
#pragma once
#include "node.hpp"
#include <boost/container/flat_map.hpp>
#include <cstdlib>
#include <ctime>
#include <error_messages.hpp>
#include <http_client.hpp>
#include <memory>
#include <utils/json_utils.hpp>
#include <variant>
namespace redfish
{
class Subscription
{
public:
std::string id;
std::string destinationUrl;
std::string protocol;
std::string retryPolicy;
std::string customText;
std::string eventFormatType;
std::string subscriptionType;
std::vector<std::string> registryMsgIds;
std::vector<std::string> registryPrefixes;
std::vector<nlohmann::json> httpHeaders; // key-value pair
Subscription(const Subscription&) = delete;
Subscription& operator=(const Subscription&) = delete;
Subscription(Subscription&&) = delete;
Subscription& operator=(Subscription&&) = delete;
Subscription(const std::string& inHost, const std::string& inPort,
const std::string& inPath, const std::string& inUriProto) :
host(inHost),
port(inPort), path(inPath), uriProto(inUriProto)
{
conn = std::make_shared<crow::HttpClient>(
crow::connections::systemBus->get_io_context(), host, port, path);
}
~Subscription()
{
}
void sendEvent(const std::string& msg)
{
std::vector<std::pair<std::string, std::string>> reqHeaders;
for (const auto& header : httpHeaders)
{
for (const auto& item : header.items())
{
std::string key = item.key();
std::string val = item.value();
reqHeaders.emplace_back(std::pair(key, val));
}
}
conn->setHeaders(reqHeaders);
conn->sendData(msg);
}
private:
std::string host;
std::string port;
std::string path;
std::string uriProto;
std::shared_ptr<crow::HttpClient> conn;
};
class EventServiceManager
{
private:
EventServiceManager(const EventServiceManager&) = delete;
EventServiceManager& operator=(const EventServiceManager&) = delete;
EventServiceManager(EventServiceManager&&) = delete;
EventServiceManager& operator=(EventServiceManager&&) = delete;
EventServiceManager()
{
// TODO: Read the persistent data from store and populate.
// Populating with default.
enabled = true;
retryAttempts = 3;
retryTimeoutInterval = 30; // seconds
}
boost::container::flat_map<std::string, std::shared_ptr<Subscription>>
subscriptionsMap;
public:
bool enabled;
uint32_t retryAttempts;
uint32_t retryTimeoutInterval;
static EventServiceManager& getInstance()
{
static EventServiceManager handler;
return handler;
}
void updateSubscriptionData()
{
// Persist the config and subscription data.
// TODO: subscriptionsMap & configData need to be
// written to Persist store.
return;
}
std::shared_ptr<Subscription> getSubscription(const std::string& id)
{
auto obj = subscriptionsMap.find(id);
if (obj == subscriptionsMap.end())
{
BMCWEB_LOG_ERROR << "No subscription exist with ID:" << id;
return nullptr;
}
std::shared_ptr<Subscription> subValue = obj->second;
return subValue;
}
std::string addSubscription(const std::shared_ptr<Subscription> subValue)
{
std::srand(static_cast<uint32_t>(std::time(0)));
std::string id;
int retry = 3;
while (retry)
{
id = std::to_string(std::rand());
auto inserted = subscriptionsMap.insert(std::pair(id, subValue));
if (inserted.second)
{
break;
}
--retry;
};
if (retry <= 0)
{
BMCWEB_LOG_ERROR << "Failed to generate random number";
return std::string("");
}
updateSubscriptionData();
return id;
}
bool isSubscriptionExist(const std::string& id)
{
auto obj = subscriptionsMap.find(id);
if (obj == subscriptionsMap.end())
{
return false;
}
return true;
}
void deleteSubscription(const std::string& id)
{
auto obj = subscriptionsMap.find(id);
if (obj != subscriptionsMap.end())
{
subscriptionsMap.erase(obj);
updateSubscriptionData();
}
}
size_t getNumberOfSubscriptions()
{
return subscriptionsMap.size();
}
std::vector<std::string> getAllIDs()
{
std::vector<std::string> idList;
for (const auto& it : subscriptionsMap)
{
idList.emplace_back(it.first);
}
return idList;
}
bool isDestinationExist(const std::string& destUrl)
{
for (const auto& it : subscriptionsMap)
{
std::shared_ptr<Subscription> entry = it.second;
if (entry->destinationUrl == destUrl)
{
BMCWEB_LOG_ERROR << "Destination exist already" << destUrl;
return true;
}
}
return false;
}
};
} // namespace redfish
|
{"hexsha": "0634328701a932def82619510ead97cd39c00d2a", "size": 5850, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "redfish-core/include/event_service_manager.hpp", "max_stars_repo_name": "ztai-goog/bmcweb", "max_stars_repo_head_hexsha": "881e50b775fcccbc447fc39f40671574e0fa4157", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "redfish-core/include/event_service_manager.hpp", "max_issues_repo_name": "ztai-goog/bmcweb", "max_issues_repo_head_hexsha": "881e50b775fcccbc447fc39f40671574e0fa4157", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "redfish-core/include/event_service_manager.hpp", "max_forks_repo_name": "ztai-goog/bmcweb", "max_forks_repo_head_hexsha": "881e50b775fcccbc447fc39f40671574e0fa4157", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3364485981, "max_line_length": 78, "alphanum_fraction": 0.6052991453, "num_tokens": 1281}
|
import numpy as np
import pdb
def running_cost(sys, x, u, xf_current):
"""
:param sys: system from gym environment this stores the
:param x: state trajectory
:param u: control trajectory
:return: gradients and Hessians of the loss function with respect to states and controls
"""
xf = np.squeeze(xf_current)
states = sys.states
controllers = sys.num_controllers
R = sys.R_ddp
Qr = sys.Q_r_ddp
err = x - xf
l0 = 0.5 * err.T.dot(Qr).dot(err) + 0.5 * u.T.dot(R).dot(u)
lx = Qr.dot(err)
lxx = Qr
lu = R.dot(u)
luu = R
lux = np.zeros([controllers, states])
lxu = lux.T
return l0, lx, lxx, lu, luu, lux, lxu
def state_action(L, Lx, Lu, Lxx, Luu, Lxu, V, Vx, Vxx, phi, B):
""" takes in the value function, loss and the gradients and Hessians and evaluates
the state action value function """
Q = L + V
Qx = Lx + phi.T.dot(Vx)
Qu = Lu + B.T.dot(Vx)
Qxx = Lxx + phi.T.dot(Vxx).dot(phi)
Quu = Luu + B.T.dot(Vxx).dot(B)
Qxu = Lxu + phi.T.dot(Vxx).dot(B)
return Q, Qx, Qu, Qxx, Quu, Qxu
def ddp(sys, x, u):
""" takes in the current state and control trajectories and outputs optimal control trajectory """
states = sys.states
controllers = sys.num_controllers
# these come from teh params file of the system
timesteps = sys.timesteps
dt = sys.dt
# xf = np.squeeze(sys.goal)
# if sys.goal.shape[1] == 1:
# xf = np.squeeze(sys.goal)
# else:
# xf_traj = sys.goal # gives the chunk of the goal state trajectory
# xf = sys.goal[:, sys.goal.shape[1]] # use the last state in the value function initialization for the backward pass
xf_traj = sys.goal
Qf = sys.Q_f_ddp
q0 = np.zeros([1, timesteps - 1])
qk = np.zeros([states, timesteps - 1])
Qk = np.zeros([states, states, timesteps - 1])
rk = np.zeros([controllers, timesteps - 1])
Rk = np.zeros([controllers, controllers, timesteps - 1])
Pk = np.zeros([states, controllers, timesteps - 1])
A = np.zeros([states, states, timesteps - 1])
B = np.zeros([states, controllers, timesteps - 1])
V = np.zeros([1, timesteps])
Vx = np.zeros([states, timesteps])
Vxx = np.zeros([states, states, timesteps])
u_new = np.zeros([controllers, timesteps - 1])
for t in range(timesteps - 1):
xf_current = xf_traj[:, t]
l0, lx, lxx, lu, luu, lux, lxu = running_cost(sys, x[:, t], u[:, t], xf_current)
q0[:, t] = dt * l0
qk[:, t] = dt * lx
Qk[:, :, t] = dt * lxx
rk[:, t] = dt * lu
Rk[:, :, t] = dt * luu
Pk[:, :, t] = dt * lxu
dfx, dfu = sys.state_control_transition(x[:, t], u[:, t])
A[:, :, t] = np.eye(states) + dfx * dt
B[:, :, t] = dfu * dt
# back prop for value function
last_index = int(V.shape[1] - 1)
err = x[:, last_index] - xf_traj[:, xf_traj.shape[1]-1]
V[:, last_index] = err.T.dot(Qf).dot(err)
Vx[:, last_index] = Qf.dot(err)
Vxx[:, :, last_index] = Qf
Lk = np.zeros([controllers, states, timesteps])
lk = np.zeros([controllers, timesteps])
for t in range((timesteps - 2), -1, -1):
# get state action value function to evaluate the linearized bellman equation
Q, Qx, Qu, Qxx, Quu, Qxu = state_action(q0[:, t], qk[:, t], rk[:, t], Qk[:, :, t], Rk[:, :, t],
Pk[:, :, t], V[:, t + 1], Vx[:, t + 1], Vxx[:, :, t + 1],
A[:, :, t], B[:, :, t])
Lk[:, :, t] = -1 * np.linalg.inv(Quu).dot(Qxu.T)
lk[:, t] = -1 * np.linalg.inv(Quu).dot(Qu)
V[:, t] = Q + Qu.T.dot(lk[:, t]) + 1 / 2 * lk[:, t].dot(Quu).dot(lk[:, t])
Vx[:, t] = Qx + Lk[:, :, t].T.dot(Qu) + Qxu.dot(lk[:, t]) + Lk[:, :, t].T.dot(Quu).dot(lk[:, t])
Vxx[:, :, t] = Qxx + 2 * Lk[:, :, t].T.dot(Qxu.T) + Lk[:, :, t].T.dot(Quu).dot(Lk[:, :, t])
dx = np.zeros([states, 1])
for t in range(timesteps - 1):
gamma = sys.gamma
du = lk[:, t] + np.squeeze(Lk[:, :, t].dot(dx))
dx = np.squeeze(A[:, :, t].dot(dx)) + B[:, :, t].dot(du)
u_new[:, t] = u[:, t] + gamma * du
u_opt = u_new
return u_opt
def apply_control(sys, u_opt):
""" evaluates the controlled system trajectory """
states = sys.states
# if not MPC:
timesteps = sys.timesteps
x_new = np.zeros([states, timesteps])
x_new[:, 0] = sys.state
cost = 0
for t in range(timesteps - 1):
u = u_opt[:, t]
# returns next state and the reward of that state
x1, c1 = sys.step(u)
x_new[:, t + 1] = x1
cost += c1
return x_new, -cost
|
{"hexsha": "83c6e05b00d4371dec9d931da9c99ce25ca62d22", "size": 4735, "ext": "py", "lang": "Python", "max_stars_repo_path": "ddp/mpc_ddp_functions_circlequad.py", "max_stars_repo_name": "rebeccali/ese650-project", "max_stars_repo_head_hexsha": "0d96ff707384e67afdfb0ff259d5629257b0a78b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ddp/mpc_ddp_functions_circlequad.py", "max_issues_repo_name": "rebeccali/ese650-project", "max_issues_repo_head_hexsha": "0d96ff707384e67afdfb0ff259d5629257b0a78b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ddp/mpc_ddp_functions_circlequad.py", "max_forks_repo_name": "rebeccali/ese650-project", "max_forks_repo_head_hexsha": "0d96ff707384e67afdfb0ff259d5629257b0a78b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3532934132, "max_line_length": 125, "alphanum_fraction": 0.5330517423, "include": true, "reason": "import numpy", "num_tokens": 1533}
|
from __future__ import print_function
import os
from argparse import ArgumentParser
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
if __name__ == '__main__':
parser = ArgumentParser("")
parser.add_argument("feats", help="Path to the npy features.")
parser.add_argument("out_dir", help="Path where to save the processed files.")
parser.add_argument("prefix", help="Files prefix, name of the dataset.")
parser.add_argument("pca_variance", help="Ratio of the variance to keep with the pca.")
args = parser.parse_args()
feats_path = args.feats
out_dir = args.out_dir
file_prefix = args.prefix
pca_variance = float(args.pca_variance)
feats = np.load(feats_path)
# perform standardization
feats = StandardScaler().fit_transform(feats)
pca = PCA(n_components=pca_variance, svd_solver="full")
feats_pc = pca.fit_transform(feats)
print(feats_pc.shape)
# Create output directory if not present
if not os.path.exists(out_dir):
try:
os.makedirs(out_dir)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
np.save(os.path.join(out_dir, file_prefix + '-feats.npy'), feats_pc)
|
{"hexsha": "b844e3f1daf05c31ab6d47c5b357a7c583f02cf8", "size": 1299, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocessing/pca.py", "max_stars_repo_name": "enricovian/GraphSAGE", "max_stars_repo_head_hexsha": "0cdda29dbc075fb8f3441c15638d1b06de992a57", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "preprocessing/pca.py", "max_issues_repo_name": "enricovian/GraphSAGE", "max_issues_repo_head_hexsha": "0cdda29dbc075fb8f3441c15638d1b06de992a57", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocessing/pca.py", "max_forks_repo_name": "enricovian/GraphSAGE", "max_forks_repo_head_hexsha": "0cdda29dbc075fb8f3441c15638d1b06de992a57", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3076923077, "max_line_length": 91, "alphanum_fraction": 0.7028483449, "include": true, "reason": "import numpy", "num_tokens": 294}
|
import os, json, copy, pickle
import numpy as np
import pandas as pd
from collections import defaultdict
from scipy import stats
from utils.metrics import Metrics
# this dir contains CLINC and SNIPS partial fewshot experiments
# EXPERIMENTS_DIR = "/path/to/partial-fewshot/savedir/"
# this dir contains CLINC, SNIPS, Banking77, and HWU64 full fewshot experiments
EXPERIMENTS_DIR = "/path/to/full-fewshot/savedir/"
# FILTERS (NOTE: any of these filters can be disabled by setting to None)
datasets = None # clinc_oos, snips_official, banking77, hwu64
augmentation = None # eda (in FS), ada, babbage, curie, davinci, gptj
temperatures = None # 0.5-2.0
skip_oracle = False # True/False
# exp_type-wise significance tests are always computed w.r.t davinci models
TTEST = False # True/False
# print results as a latex table
TO_LATEX = False
# save metrics for plotting purposes
GEN_FOR_PLOT = False
# name of the results file (NOTE: only GPTJ for full fewshot is supported)
FNAME = "results/gptj_val_fidelity_et_al.pkl"
# The below vars are used by to_latex()
# ALL_METRICS and ALL_DATASETS are in the order or reporting in the paper
# defining the order here so that we don't need to apply any convoluted
# ops like sorting to ensure consistent performance reporting.
# NOTE: to_latex() auto-ignores a dataset if it's filtered out in this script
ALL_DATASETS = ["clinc_oos", "hwu64", "banking77", "snips_official"]
BACKBONE = "BERT" if "bert" in EXPERIMENTS_DIR else "T5"
SCOPES = ["overall", "few_shot"] if "ex2" in EXPERIMENTS_DIR else ["test"]
ALL_METRICS = ["IA", "OR"]
# No need to touch anything else below this line.
FILTER = {
"datasets": datasets, # set None to fetch for all datasets
"augmentation": augmentation, # set None to fetch for all augmentors
"temps": temperatures, # set None to fetch for all temperatures
"skip_oracle?": skip_oracle, # set False/None to fetch oracle results
}
pjoin = os.path.join
def fmt(xs):
return f"{np.mean(xs):.2f} ({np.std(xs):.2f})"
def compile_results(folder_list, exp_dir):
# fetch experiment type, and populate total_results accordingly
exp_dict_path = pjoin(exp_dir, folder_list[0], "exp_dict.json")
exp_dict = json.load(open(exp_dict_path, "r"))
dataset = exp_dict["dataset"]["name"]
dconfig = exp_dict["dataset"]["config"]
oos_id = exp_dict["dataset"]["oos_id"]
exp_type = exp_dict["exp_type"]
ex2_setup = True if dconfig.startswith("full_") else False
full_fewshot = True if dconfig == "few_pure" else False
if dconfig != "few_pure" and not ex2_setup and exp_type == "baseline":
org_baseline = True
else:
org_baseline = False
# declare total_results template based on exp config
if full_fewshot or org_baseline: # no need for overall, fewshot keys
total_results = {
"test": {"IA": [], "OR": [], "A": []},
"val": {"IA": [], "OR": [], "A": []},
}
if oos_id is None:
total_results = {
"test": {"A": []},
"val": {"A": []},
}
else: # ex2 setup
total_results = {
"few_shot": {"IA": []},
"few_shot_val": {"IA": []},
"overall": {"IA": [], "OR": []},
"overall_val": {"IA": [], "OR": []},
}
# read all the json files
for folder in folder_list:
sub_results = json.load(open(pjoin(exp_dir, folder, "code/results.json")))
key = list(sub_results.keys())[0]
sub_results = sub_results[key]
if dataset == "snips_official" and ex2_setup: # snips has no OR
_overall = sub_results["overall"]
total_results["overall"]["IA"].append(_overall["test_accuracy"])
total_results["overall_val"]["IA"].append(_overall["valid_accuracy"])
elif full_fewshot or org_baseline:
total_results["test"]["A"].append(sub_results["test_accuracy"])
total_results["val"]["A"].append(sub_results["valid_accuracy"])
if not oos_id:
continue
total_results["test"]["IA"].append(sub_results["test_inscope_accuracy"])
total_results["val"]["IA"].append(sub_results["valid_inscope_accuracy"])
total_results["test"]["OR"].append(sub_results["test_oos_recall"])
total_results["val"]["OR"].append(sub_results["valid_oos_recall"])
continue
elif ex2_setup and dataset == "clinc_oos":
# not handling oos_id as ex2_setup ALWAYS has an oos_id
overall = sub_results["overall"]
total_results["overall_val"]["IA"].append(overall["valid_inscope_accuracy"])
total_results["overall"]["IA"].append(overall["test_inscope_accuracy"])
total_results["overall_val"]["OR"].append(overall["valid_oos_recall"])
total_results["overall"]["OR"].append(overall["test_oos_recall"])
fs = sub_results["few_shot"]
total_results["few_shot"]["IA"].append(fs["test_accuracy"])
total_results["few_shot_val"]["IA"].append(fs["valid_accuracy"])
for k in total_results:
for m in total_results[k]:
results = [100 * v for v in total_results[k][m]]
# the following is averging across different fewshot domains.
# For EX2, it's nth run's avg. across full_banking, full_meta, etc.
# For Full fewshot, it's computing the mean for one domain, i.e,
# the value is going to remain the same except it won't be a list.
total_results[k][m] = np.mean(results)
return total_results
def segregate_sub_folders(exp_dir):
sub_folder_dict = {}
for folder in os.listdir(exp_dir):
exp_dict_path = pjoin(exp_dir, folder, "exp_dict.json")
exp_dict = json.load(open(exp_dict_path))
dname = exp_dict["dataset"]["name"] # aggregate on the dataset
# dataset filter
if FILTER["datasets"] and dname not in FILTER["datasets"]:
continue
exp_dict["gpt3_temp"] = 1.0 # TODO: remove
if "gpt" in exp_dict["exp_type"]:
engine, temp = exp_dict["gpt3_engine"], exp_dict["gpt3_temp"]
# engine and temperature filter
if FILTER["augmentation"] and engine not in FILTER["augmentation"]:
continue
if FILTER["temps"] and temp not in FILTER["temps"]:
continue
exp_type = f"{exp_dict['exp_type']}_{engine}_{temp}"
else:
# NOTE that for non-GPT experiments exp_type is the augmentation mode
exp_type = exp_dict["exp_type"] # eda/eda_oracle
_aug = exp_type.replace("_oracle", "") if "oracle" in exp_type else exp_type
if FILTER["augmentation"] and _aug not in FILTER["augmentation"]:
continue
# oracle filter
if "oracle" in exp_type and FILTER["skip_oracle?"]:
continue
if exp_type not in sub_folder_dict:
sub_folder_dict[exp_type] = {}
if dname not in sub_folder_dict[exp_type]:
sub_folder_dict[exp_type][dname] = defaultdict(list)
sub_folder_dict[exp_type][dname][exp_dict["run#"]].append(folder)
# a sanity check line, prints the number of experiments per config.
folders_per_exp = []
for exp_type in sub_folder_dict:
for dname in sub_folder_dict[exp_type]:
num_runs = len(sub_folder_dict[exp_type][dname])
folders_per_exp.append((exp_type, dname, num_runs))
print(folders_per_exp, len(folders_per_exp))
return sub_folder_dict
def final_compile(sub_results_dicts):
final_result = {}
for s in sub_results_dicts:
for scope in s.keys():
if scope not in final_result:
final_result[scope] = {}
for metric in s[scope]:
if np.isnan(s[scope][metric]):
continue
if metric not in final_result[scope]:
final_result[scope][metric] = []
final_result[scope][metric].append(s[scope][metric])
return final_result
def get_performance(exp_dir):
"""
returns a results dictionary which is not aggregated by runs
An example of hierarchy:
gpt3_ada_1.0:
clinc_oos:
test:
IA[90.32, 90.1...90.3]
OR[40.23, 39.12...38.1]
val:
IA[92.32, 91.1...93.3]
OR[45.23, 41.12...40.1]
banking77:
test:
A[82.3...80.1]
val:
A[83.2...79.2]
snips_official:
.
.
.
gpt3_babbage_1.0:
.
.
.
It's not aggregated so that other functions may use to for:
- mean and std computation across multiple runs
- significace testing
"""
sub_folder_dict = segregate_sub_folders(exp_dir)
performance = {}
for exp_type in sorted(list(sub_folder_dict.keys())):
for dname in sorted(list(sub_folder_dict[exp_type].keys())):
config_results = []
for config in sub_folder_dict[exp_type][dname]:
folderlist = sub_folder_dict[exp_type][dname][config]
config_results.append(compile_results(folderlist, exp_dir))
if exp_type not in performance:
performance[exp_type] = {}
performance[exp_type][dname] = final_compile(config_results)
return performance
def to_latex(performance):
"""
Generates latex table code for aug, aug+relabel settings
"""
table_latex = ""
# backbone mode (aug/aug.+relabel)
template = "{} {} (Ours) &" # line template
# num of columns to report will be same for all exp. settings
_etype = list(performance.keys())[0]
n_cols = 0
for dname in performance[_etype]:
for s in SCOPES:
curr_metrics = performance[_etype][dname][s]
for _m in curr_metrics:
if _m == "A" and "IA" in curr_metrics:
continue
n_cols += 1
template += " {} &" * (n_cols - 1)
template += " {} \\\\\n"
for etype in performance:
dscores = []
for dname in ALL_DATASETS:
# print(dname)
if dname not in performance[etype]:
continue
# print(dname)
for s in SCOPES:
# print(s)
curr_metrics = list(performance[etype][dname][s])
for _m in ALL_METRICS:
if _m not in curr_metrics:
# a dataset without IA means no OOS. in that case,
# A is the same as IA.
if _m == "IA":
_m = "A"
else:
continue
dscores.append(fmt(performance[etype][dname][s][_m]))
# print(_m)
# print("===")
table_latex += template.format(
BACKBONE, etype.replace("_1.0", "").replace("_", "\_"), *dscores
)
print(table_latex)
def perform_ttest(performance):
"""
receives a performance for datasets and performs two statistical
t-tests w.r.t davinci model at 1.0 temp. for that experiment type
"""
if performance == {}:
print("Nothing to show here")
return
bigger_model = None
for e in performance:
if "davinci" in e:
bigger_model = e
bigger_results = performance[bigger_model]
# Gather model-wise metrics
for dname in bigger_results.keys():
print(f"Dataset: {dname.upper()}")
print("-" * 30)
for s in SCOPES:
for m in bigger_results[dname][s]:
_bresults = bigger_results[dname][s][m]
print(f"--- {s} {m} test ---")
print(f"{bigger_model.upper()}: ({fmt(_bresults)})")
for model, results in performance.items():
if model == bigger_model:
continue
_sresults = results[dname][s][m]
test_result = stats.ttest_ind(_bresults, _sresults)
print(f" vs {model.upper()} ({fmt(_sresults)}) {test_result}")
print()
def display_results(performance):
for etype in performance:
for dname in performance[etype]:
for scope in performance[etype][dname]:
for metric in performance[etype][dname][scope]:
results = performance[etype][dname][scope][metric]
performance[etype][dname][scope][metric] = fmt(results)
for etype in performance:
for dname in performance[etype]:
print(f"Setting: {etype} | {dname}")
print("-" * 20)
print(pd.DataFrame().from_dict(performance[etype][dname]))
print("=" * 30)
print("\n")
def gen_for_plot(performance):
"""
Will save fidelity and fs accuries for all the datasets in a file
NOTE: this doesn't save metrics for partial fewshot temp. profiling nor
does it support any engine other than GPTJ right now.
"""
if FILTER["augmentation"] != ["gptj"]:
raise NotImplementedError(
"Metrics generation for plotting only supported for GPTJ!"
)
if "fs" not in EXPERIMENTS_DIR:
raise NotImplementedError(
"Metrics generation for plotting only supported for Full Fewshot!"
)
if os.path.exists(FNAME):
print(f"{FNAME} already exists!! Loading...")
print("Delete/Rename it to recompute fidelities.")
return pickle.load(open(FNAME, "rb"))
print("Compiling plotting metrics in a file...")
# init df
df = pd.DataFrame(columns=["temp", "ds", "val_acc_mean", "val_acc_std", "fidelity"])
# compute fidelities
fidelities = {ds: Metrics().compute_fidelities(ds) for ds in ALL_DATASETS}
for etype, results in performance.items():
# etype: gpt3_gptj_1.0
_, temp = etype.rsplit("_", 1)
for ds in results:
acc_key = "IA" if ds == "clinc_oos" else "A"
val_accs = results[ds]["val"][acc_key]
val_acc_mean, val_acc_std = np.mean(val_accs), np.std(val_accs)
_fid = fidelities[ds][f"gptj_{temp}"]
# create a new entry in the dataframe
df.loc[len(df.index)] = [float(temp), ds, val_acc_mean, val_acc_std, _fid]
# will be used to plot the threshold lines in the fidelity plots
thresholds = {ds: fidelities[ds]["threshold"] for ds in ALL_DATASETS}
metrics = {"metrics": df, "thresholds": thresholds}
print(f"Saving fidelity metrics for plotting {FNAME}")
with open(FNAME, "wb") as f:
pickle.dump(metrics, f)
return metrics
def main():
"""
Computes mean and std of metrics obtained by get_performance
"""
# remove the "deleted" folder if it exists
if os.path.exists(pjoin(EXPERIMENTS_DIR, "deleted")):
print(f"Removing {pjoin(EXPERIMENTS_DIR, 'deleted')}...")
os.system(f"rm -rf {pjoin(EXPERIMENTS_DIR, 'deleted')}")
print("Removed.")
performance = get_performance(EXPERIMENTS_DIR)
# display results (deepcopy needed as display_results permutes its input)
display_results(copy.deepcopy(performance))
if TTEST:
# non-oracle etype
print("T-Test no oracle")
perform_ttest({k: v for k, v in performance.items() if "oracle" not in k})
# oracle etype
print("T-Test with oracle")
perform_ttest({k: v for k, v in performance.items() if "oracle" in k})
if TO_LATEX:
to_latex(performance)
if GEN_FOR_PLOT:
gen_for_plot(performance)
if __name__ == "__main__":
main()
|
{"hexsha": "b12d598b8283cda1183706ce5c88e929611e1602", "size": 15822, "ext": "py", "lang": "Python", "max_stars_repo_path": "runners/compile_results.py", "max_stars_repo_name": "ElementAI/data-augmentation-with-llms", "max_stars_repo_head_hexsha": "23673ab55cfb72295468e92ae58d0906f5dc7b05", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-30T21:23:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:00:07.000Z", "max_issues_repo_path": "runners/compile_results.py", "max_issues_repo_name": "ElementAI/data-augmentation-with-llms", "max_issues_repo_head_hexsha": "23673ab55cfb72295468e92ae58d0906f5dc7b05", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "runners/compile_results.py", "max_forks_repo_name": "ElementAI/data-augmentation-with-llms", "max_forks_repo_head_hexsha": "23673ab55cfb72295468e92ae58d0906f5dc7b05", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3160377358, "max_line_length": 88, "alphanum_fraction": 0.5996081406, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3858}
|
"""
NETALIGNMR
----------
solve the network alignment problem with Klau's algorithm
"""
function netalignmr(S::SparseMatrixCSC{Int64,Int64},w::Vector{Float64},
a::Int64,b::Int64,li::Vector{Int64},lj::Vector{Int64},
gamma::Float64,stepm::Int64,rtype::Int64,maxiter::Int64,verbose::Bool)
m = maximum(li)
n = maximum(lj)
Matching_Setup = bipartite_matching_setup(w,li,lj,m,n)
tripi = Matching_Setup.tripi
matm = Matching_Setup.m
matn = Matching_Setup.n
rp = Matching_Setup.rp
ci = Matching_Setup.ci
mperm = tripi[tripi.>0]
U = spzeros(Float64,size(S,1),size(S,2))
xbest = zeros(Float64,length(w))
flower = 0.0 #best lower bound on the solution
fupper = Inf #best upper bound on the solution
next_reduction_iteration = stepm #reduction step
hist = zeros(Float64,maxiter,7)
iter = 1
@inbounds for iter = 1:maxiter
Q = (b/2)*S + U-U'
Qt = Q'
Qp = Qt.colptr
Qr = Qt.rowval
Qv = Qt.nzval
M = size(Qt,1)
N = size(Qt,2)
nedges = length(li)
all_matching = column_maxmatchsum(M,N,Qp,Qr,Qv,m,n,nedges,li,lj)
q = all_matching.q
qj = all_matching.mi
qi = all_matching.mj
medges = all_matching.medges
SM = sparse(qi[1:medges],qj[1:medges],1,size(Q,1),size(Q,2))
x = a*w + q
ai = zeros(Float64,length(tripi))
ai[tripi.>0] = x[mperm]
M_output = MatrixNetworks.bipartite_matching_primal_dual(rp,ci,ai,matm,matn)
mi = MatrixNetworks.edge_indicator(M_output,li,lj)
val = M_output.weight
# compute stats
matchval = dot(mi,w)
overlap = dot(mi,(S*mi)/2)
card = M_output.cardinality
f = a*matchval + b*overlap
if val < fupper
fupper = val
next_reduction_iteration = iter+stepm
end
if f > flower
flower = f
itermark = "*"
xbest = convert(Vector{Float64},mi)
else
itermark = " "
end
if rtype == 1
# no work
elseif rtype==2
mw = S*x
mw = a*w + b/2*mw
ai = zeros(Float64,length(tripi))
ai[tripi.>0] = mw[mperm]
M_output2 = MatrixNetworks.bipartite_matching_primal_dual(rp,ci,ai,matm,matn)
mx = MatrixNetworks.edge_indicator(M_output2,li,lj)
card = M_output2.cardinality
matchval = dot(w,mx)
overlap = dot(mx,(S*mx)/2)
f = a*matchval + b*overlap
if f > flower
flower = f
itermark = "**"
mi = mx
xbest = mw
end
end
# report on current iter
hist[iter,1:end] = [norm(nonzeros(U),1), flower, fupper, f, matchval, card, overlap]
# the below if statement causes type instability due to @printf being type instable
if verbose
@printf("%5s %4i %8.1e %7.2f %7.2f %7.2f %7.2f %7.2f %7i %7i\n",
itermark, iter, norm(nonzeros(U),1),
flower, fupper, val,
f, matchval, card, overlap)
end
if iter == next_reduction_iteration
gamma = gamma*0.5
# the below if statement causes type instability due to @printf being type instable
if verbose
@printf("%5s %4s reducing step to %f\n", "", "", gamma);
end
if gamma < 1e-24
break
end
next_reduction_iteration = iter+stepm
end
if (fupper-flower) < 1e-2
break
end
Wtemp = sparse(1:length(mi),1:length(mi),gamma*mi)
U = U - Wtemp*triu(SM) + tril(SM)'*Wtemp
Utemp1 = spones(U)
Utemp1 *= 0.5
U = min(U,Utemp1)
Utemp1 *= -1
U = max(U,Utemp1)
end
status = zeros(Float64,2)
st = (fupper-flower) < 1e-2
status[1] = flower
status[2] = fupper
hist = hist[1:iter,:]
return (xbest,st,status,hist)
end
############################
### Additional functions ###
############################
function netalignmr(S::SparseMatrixCSC{Int64,Int64},w::Vector{Float64},
a::Int64,b::Int64,li::Vector{Int64},lj::Vector{Int64},
gamma::Float64,stepm::Int64,rtype::Int64,maxiter::Int64)
return netalignmr(S,w,a,b,li,lj,gamma,stepm,rtype,maxiter,false)
end
function netalignmr(S::SparseMatrixCSC{Int64,Int64},w::Vector{Float64},
a::Int64,b::Int64,li::Vector{Int64},lj::Vector{Int64},
gamma::Float64,stepm::Int64,rtype::Int64)
return netalignmr(S,w,a,b,li,lj,gamma,stepm,rtype,1000,false)
end
function netalignmr(S::SparseMatrixCSC{Int64,Int64},w::Vector{Float64},
a::Int64,b::Int64,li::Vector{Int64},lj::Vector{Int64},
gamma::Float64,stepm::Int64)
return netalignmr(S,w,a,b,li,lj,gamma,stepm,1,1000,false)
end
function netalignmr(S::SparseMatrixCSC{Int64,Int64},w::Vector{Float64},
a::Int64,b::Int64,li::Vector{Int64},lj::Vector{Int64},
gamma::Float64)
return netalignmr(S,w,a,b,li,lj,gamma,25,1,1000,false)
end
function netalignmr(S::SparseMatrixCSC{Int64,Int64},w::Vector{Float64},
a::Int64,b::Int64,li::Vector{Int64},lj::Vector{Int64})
return netalignmr(S,w,a,b,li,lj,0.4,25,1,1000,false)
end
|
{"hexsha": "78b16e16c3cad7668a6c4e9313fbf3ba9a4af903", "size": 5233, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/netalignmr.jl", "max_stars_repo_name": "nassarhuda/NetworkAlign", "max_stars_repo_head_hexsha": "2f1e2501b9c2fbb7dc97ae95da8fa3f1de503d45", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-03-04T14:33:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-04T20:37:30.000Z", "max_issues_repo_path": "src/netalignmr.jl", "max_issues_repo_name": "nassarhuda/NetworkAlign", "max_issues_repo_head_hexsha": "2f1e2501b9c2fbb7dc97ae95da8fa3f1de503d45", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-05-15T09:21:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-29T17:21:23.000Z", "max_forks_repo_path": "src/netalignmr.jl", "max_forks_repo_name": "nassarhuda/NetworkAlign", "max_forks_repo_head_hexsha": "2f1e2501b9c2fbb7dc97ae95da8fa3f1de503d45", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-28T00:20:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-28T00:20:20.000Z", "avg_line_length": 28.7527472527, "max_line_length": 94, "alphanum_fraction": 0.5828396713, "num_tokens": 1665}
|
import numpy as np
import os
import galsim
## Compute lensed ellipticities from shear and convergence
def calc_lensed_ellipticity_1(es1, es2, gamma1, gamma2, kappa):
gamma = gamma1 + gamma2*1j # shear (as a complex number)
es = es1 + es2*1j # intrinsic ellipticity (as a complex number)
g = gamma / (1.0 - kappa) # reduced shear
e = (es + g) / (1.0 + g.conjugate()*es) # lensed ellipticity
return np.real(e)
def calc_lensed_ellipticity_2(es1, es2, gamma1, gamma2, kappa):
gamma = gamma1 + gamma2*1j # shear (as a complex number)
es = es1 + es2*1j # intrinsic ellipticity (as a complex number)
g = gamma / (1.0 - kappa) # reduced shear
e = (es + g) / (1.0 + g.conjugate()*es) # lensed ellipticity
return np.imag(e)
# read in the LSST filters
filters = {}
lsst_filters_dir = '../share_galsim/bandpasses/'
filter_names_lsst = 'ugrizy'
for filter_name in filter_names_lsst:
filter_filename = os.path.join(lsst_filters_dir, 'LSST_{0}.dat'.format(filter_name))
filters[filter_name] = galsim.Bandpass(filter_filename, wave_type='nm')
filters[filter_name] = filters[filter_name].thin(rel_err=1e-4)
# read in SEDs
datapath = '../share_galsim/'
SED_names = ['CWW_E_ext', 'CWW_Sbc_ext', 'CWW_Scd_ext', 'CWW_Im_ext']
SEDs = {}
for SED_name in SED_names:
SED_filename = os.path.join(datapath, 'SEDs/{0}.sed'.format(SED_name))
SED = galsim.SED(SED_filename, wave_type='Ang', flux_type='flambda')
SEDs[SED_name] = SED.withFluxDensity(target_flux_density=1.0, wavelength=500)
# Function to generate noiseless galaxy from an id
def generate_noiseless_img_dc2(galaxy_id, psf_img, truth_i, truth_data, truth_idx, pixel_scale = 0.2, bulge_n = 4, disk_n = 1, xsize = 59, ysize = 59):
idx = truth_i[galaxy_id]
rng = galsim.BaseDeviate(0)
# Depends on galaxy
## True shear from extragalactic catalog
gal_g1 = -truth_data['shear_1'][truth_idx[idx]]
gal_g2 = truth_data['shear_2'][truth_idx[idx]]
## Disk and bulge part
bulge_frac = truth_data['bulge_to_total_ratio_i'][truth_idx[idx]]
disk_frac = 1 - bulge_frac
knot_frac = 0.
smooth_disk_frac = disk_frac - knot_frac
disk_e1 = truth_data['ellipticity_1_disk_true'][truth_idx[idx]]
disk_e2 = truth_data['ellipticity_2_disk_true'][truth_idx[idx]]
bulge_e1 = truth_data['ellipticity_1_bulge_true'][truth_idx[idx]]
bulge_e2 = truth_data['ellipticity_2_bulge_true'][truth_idx[idx]]
disk_hlr = truth_data['size_disk_true'][truth_idx[idx]]
bulge_hlr = truth_data['size_bulge_true'][truth_idx[idx]]
## Create bulge + disk profiles
bulge = galsim.Sersic(bulge_n, half_light_radius=bulge_hlr)
disk = galsim.Sersic(disk_n, half_light_radius=disk_hlr)
## Compute ellipticities
shear_1 = np.array(truth_data['shear_1'][truth_idx[idx]])
shear_2 = np.array(truth_data['shear_2'][truth_idx[idx]])
convergence = np.array(truth_data['convergence'][truth_idx[idx]])
disk_1 = calc_lensed_ellipticity_1(-disk_e1, disk_e2, shear_1, shear_2, convergence)
disk_2 = calc_lensed_ellipticity_2(-disk_e1, disk_e2, shear_1, shear_2, convergence)
bulge_1 = calc_lensed_ellipticity_1(-bulge_e1, bulge_e2, shear_1, shear_2, convergence)
bulge_2 = calc_lensed_ellipticity_2(-bulge_e1, bulge_e2, shear_1, shear_2, convergence)
## Add knots if necessary
#knots = galsim.RandomKnots(n_knots, half_light_radius=disk_hlr, flux=knot_frac, rng=rng)
# Shear bulge and disk
bulge = bulge.shear(g1=bulge_1, g2=bulge_2)
disk = disk.shear(g1=disk_1, g2=disk_2)
## Create the galaxy
gal = bulge_frac * bulge + (1-bulge_frac) * disk
## Create output array of noiseless galaxy
gal_noiseless = np.zeros((xsize,ysize,len(filters)))
# Depends on filters
for i, k in enumerate(filters):
# As object_data['r_FLUXMAG0']=object_data['y_FLUXMAG0'] and is constant I supposed that the fluxmag0 is the same for each filter
fluxmag0 = 6.30957344e+10 # object_data['r_FLUXMAG0']
if k=='y':
gal_flux = fluxmag0*10**(-(truth_data['mag_true_Y_lsst'][truth_idx[idx]])/2.5) # Scale flux as function of magnitude
else:
gal_flux = fluxmag0*10**(-(truth_data['mag_true_'+k+'_lsst'][truth_idx[idx]])/2.5) # Scale flux as function of magnitude
gal = gal.withFlux(gal_flux)
# convolve with the PSF
psf_i = galsim.Image(psf_img[galaxy_id,:,:,i].copy())
interp = 'lanczos15'#'lanczos15'
psf_int = galsim.InterpolatedImage(psf_i,scale = 0.2)#x_interpolant= interp,
# Create final image and store it
final = galsim.Convolve([psf_int, gal])
image = galsim.ImageF(xsize, ysize, scale=0.2)
_ = final.drawImage(image=image)#, method = 'phot')
gal_noiseless[:,:,i] = image.array.data
return gal_noiseless
|
{"hexsha": "50c3ac8e23146f3d3c40a945a8ec354d68a7287f", "size": 4891, "ext": "py", "lang": "Python", "max_stars_repo_path": "script/generate_noiseless.py", "max_stars_repo_name": "BastienArcelin/dc2_img_generation", "max_stars_repo_head_hexsha": "e3b84625afcd6a3127c98246841a9f5825c1262a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "script/generate_noiseless.py", "max_issues_repo_name": "BastienArcelin/dc2_img_generation", "max_issues_repo_head_hexsha": "e3b84625afcd6a3127c98246841a9f5825c1262a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "script/generate_noiseless.py", "max_forks_repo_name": "BastienArcelin/dc2_img_generation", "max_forks_repo_head_hexsha": "e3b84625afcd6a3127c98246841a9f5825c1262a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-01-11T09:05:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T02:36:24.000Z", "avg_line_length": 41.4491525424, "max_line_length": 151, "alphanum_fraction": 0.6902473932, "include": true, "reason": "import numpy", "num_tokens": 1554}
|
import sympy
from typing import List, Union
import logging
def test_homogeneity(exprs: Union[List[sympy.And], sympy.Matrix], vars: List[sympy.Symbol], trigger=None):
"""
Tests whether dynamics are homogeneous. If so, return also the homogeneity degree.
@param exprs: List of sympy expressions representing the ETC dynamics. Same length as vars.
@param vars: List of the used sympy symbols. Same length as exprs.
@return: Degree of Homogeneity if dynamics are homogeneous. None otherwise.
"""
# Test whether a given expression is homogeneous. If this is the case return the degree, otherwise -1
alpha = sympy.symbols('a', real=True, positive=True)
l = sympy.symbols('l', real=True, positive=True)
dic = {}
for v in vars:
# print(v)
dic[v] = l * v
if type(exprs) == list:
exprs = sympy.Matrix(exprs)
if type(exprs) == sympy.Add:
a2 = l ** (alpha + 1) * exprs
else:
a2 = sympy.Matrix([l ** (alpha + 1) * x for x in exprs])
a1 = exprs.subs(dic)
# a2 = sympy.Matrix([l**(alpha+1)*x for x in exprs])
res = sympy.solve(a1 - a2, alpha)
if res != []:
res = sympy.simplify(res[0][0])
logging.info(f'Alpha solution: {res}')
logging.info(f'Meaning the system is homogeneous with degree {res}')
return res
else:
logging.info('System is not Homogeneous')
return None
def make_homogeneous(exprs: Union[List[sympy.And], sympy.Matrix], vars: List[sympy.Symbol],
des_hom_degree: int):
"""
Make an general system homogeneous with desired degree
@param exprs: List of sympy expressions representing the dynamics. Same length as vars.
@param vars: List of the used sympy symbols. Same length as exprs.
@param des_hom_degree: The desired homogeneity degree
@return: New list/matrix of expressions and variables
"""
w1 = sympy.symbols('w1')
w1dot = 0
dic = {}
for v in vars:
dic[v] = v * w1 ** -1
n = int(len(vars) / 2)
res = []
for i in range(0, int(len(exprs) / 2)):
res.append(sympy.simplify(w1 ** (1 + des_hom_degree) * exprs[i].subs(dic)))
res.append(w1dot)
newvars = tuple((*vars[0:n], w1))
if type(exprs) == sympy.Matrix:
res = sympy.Matrix(res)
return res, newvars
def make_homogeneous_etc(exprs: Union[List[sympy.And], sympy.Matrix], vars: List[sympy.Symbol],
des_hom_degree: int, trigger: sympy.And = None):
"""
Make an ETC system homogeneous with desired degree
@param exprs: List of sympy expressions representing the ETC dynamics. Same length as vars.
@param vars: List of the used sympy symbols. Same length as exprs.
@param des_hom_degree: The desired homogeneity degree
@return: New list/matrix of expressions and variables
"""
w1, ew = sympy.symbols('w1 ew')
w1dot = 0
ew_dot = -w1dot
dic = {}
for v in vars:
dic[v] = v * w1**-1
n = int(len(vars) / 2)
res = []
for i in range(0, int(len(exprs) / 2)):
res.append(sympy.simplify(w1 ** (1 + des_hom_degree) * exprs[i].subs(dic)))
res.append(w1dot)
for i in range(int(len(exprs) / 2), len(exprs)):
res.append(sympy.simplify(w1 ** (1 + des_hom_degree) * exprs[i].subs(dic)))
res.append(ew_dot)
newvars = tuple((*vars[0:n], w1, *vars[n:], ew))
if type(exprs) == sympy.Matrix:
res = sympy.Matrix(res)
if trigger is not None:
trigger = sympy.simplify(w1 ** (2) * trigger.subs(dic))
return res, newvars, trigger
return res, newvars
if __name__ == '__main__':
# Variable Declaration
state_vector = x1, y1, ex, ey = sympy.symbols('x1 y1 ex ey') # state variables + errors
init_cond_vector = x0, y0 = sympy.symbols('x0 y0') # symbols for initial conditions of state variables
parameters = () # disturbances
parameters_domain = [] # disturbances' domain
# Declare symbolic dynamics
x1dot = (-x1 ** 3 + x1 * y1 ** 2)
u1 = -(ey + y1) ** 3 - (ex + x1) * (ey + y1) ** 2
y1dot = (x1 * y1 ** 2 - y1 * x1 ** 2 + u1)
ex_dot = -x1dot
ey_dot = -y1dot
# x1dot = -x1
# u1 = -(y1 + ey) - (x1 + ex) ** 2 * (y1 + ey) - (y1 + ey) ** 3
# y1dot = x1 ** 2 * y1 + y1 ** 3 + u1
# print(y1dot)
# w1dot = 0
# ex_dot = -x1dot
# ey_dot = -y1dot
print("Test Homogeneity:")
a = test_homogeneity([x1dot, y1dot, ex_dot, ey_dot], [x1, y1, ex, ey])
if a is None:
print('')
print("Make Homogeneous")
a, b = make_homogeneous([x1dot, y1dot, ex_dot, ey_dot], [x1, y1, ex, ey], 2)
print(a)
print(b)
# w1, ew = sympy.symbols('w1 ew')
# # Declaring symbolic dynamics
# x1dot = -x1 * w1 ** 2
# u1 = -(y1 + ey) * w1 ** 2 - (x1 + ex) ** 2 * (y1 + ey) - (y1 + ey) ** 3
# y1dot = x1 ** 2 * y1 + y1 ** 3 + u1
# print(y1dot)
|
{"hexsha": "d37ecad824b0c72b770927eb87abb1e69c42fe74", "size": 4926, "ext": "py", "lang": "Python", "max_stars_repo_path": "ETCetera/util/homogeneous.py", "max_stars_repo_name": "ggleizer/ETCetera", "max_stars_repo_head_hexsha": "8fa9f3c82fd1944507a0c02d52a236244821f3ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ETCetera/util/homogeneous.py", "max_issues_repo_name": "ggleizer/ETCetera", "max_issues_repo_head_hexsha": "8fa9f3c82fd1944507a0c02d52a236244821f3ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ETCetera/util/homogeneous.py", "max_forks_repo_name": "ggleizer/ETCetera", "max_forks_repo_head_hexsha": "8fa9f3c82fd1944507a0c02d52a236244821f3ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-11T11:15:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T11:15:20.000Z", "avg_line_length": 33.7397260274, "max_line_length": 107, "alphanum_fraction": 0.5946000812, "include": true, "reason": "import sympy", "num_tokens": 1555}
|
# -*- coding: utf-8 -*-
import numpy as np
from functools import lru_cache
from .base import Predictor
from ..base import Property
from ..functions import gauss2sigma, unscented_transform
from ..types.prediction import GaussianStatePrediction
from ..types.state import State
class KalmanPredictor(Predictor):
"""KalmanPredictor class
An implementation of a standard Kalman Filter predictor.
"""
@lru_cache()
def predict(self, prior, control_input=None, timestamp=None, **kwargs):
"""Kalman Filter state prediction step
Parameters
----------
prior : :class:`~.GaussianState`
The prior state
control_input : :class:`~.State`, optional
The control input. It will only have an effect if
:attr:`control_model` is not `None` (the default is `None`)
timestamp: :class:`datetime.datetime`, optional
A timestamp signifying when the prediction is performed \
(the default is `None`)
Returns
-------
: :class:`~.GaussianStatePrediction`
The predicted state
"""
# Compute time_interval
try:
time_interval = timestamp - prior.timestamp
except TypeError:
# TypeError: (timestamp or prior.timestamp) is None
time_interval = None
# Transition model parameters
transition_matrix = self.transition_model.matrix(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
transition_noise_covar = self.transition_model.covar(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
# Control model parameters
if self.control_model is None:
control_matrix = np.zeros(prior.covar.shape)
contol_noise_covar = np.zeros(prior.covar.shape)
control_input = State(np.zeros(prior.state_vector.shape))
else:
# Extract control matrix
control_matrix = self.control_model.matrix(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
# Extract control noise covariance
try:
# covar() is implemented for control_model
contol_noise_covar = self.control_model.covar(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
except AttributeError:
# covar() is NOT implemented for control_model
contol_noise_covar = np.zeros(self.control_model.ndim_ctrl)
if control_input is None:
control_input = np.zeros((self.control_model.ndim_ctrl, 1))
# Perform prediction
prediction_mean, prediction_covar = self.predict_lowlevel(
prior.mean, prior.covar, transition_matrix,
transition_noise_covar, control_input.state_vector,
control_matrix, contol_noise_covar)
return GaussianStatePrediction(prediction_mean,
prediction_covar,
timestamp)
@staticmethod
def predict_lowlevel(x, P, F, Q, u, B, Qu):
"""Low-level Kalman Filter state prediction
Parameters
----------
x : :class:`numpy.ndarray` of shape (Ns,1)
The prior state mean
P : :class:`numpy.ndarray` of shape (Ns,Ns)
The prior state covariance
F : :class:`numpy.ndarray` of shape (Ns,Ns)
The state transition matrix
Q : :class:`numpy.ndarray` of shape (Ns,Ns)
The process noise covariance matrix
u : :class:`numpy.ndarray` of shape (Nu,1)
The control input
B : :class:`numpy.ndarray` of shape (Ns,Nu)
The control gain matrix
Qu : :class:`numpy.ndarray` of shape (Ns,Ns)
The control process covariance matrix
Returns
-------
: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
"""
x_pred = F@x + B@u
P_pred = F@P@F.T + Q + B@Qu@B.T
return x_pred, P_pred
class ExtendedKalmanPredictor(KalmanPredictor):
"""ExtendedKalmanPredictor class
An implementation of an Extended Kalman Filter predictor"""
@lru_cache()
def predict(self, prior, control_input=None, timestamp=None, **kwargs):
""" Extended Kalman Filter state prediction step
Parameters
----------
prior : :class:`~.GaussianState`
The prior state
control_input : :class:`~.State`, optional
The control input. It will only have an effect if
:attr:`control_model` is not `None` (the default is `None`)
timestamp: :class:`datetime.datetime`, optional
A timestamp signifying when the prediction is performed \
(the default is `None`)
Returns
-------
: :class:`~.GaussianStatePrediction`
The predicted state
"""
# Compute time_interval
try:
time_interval = timestamp - prior.timestamp
except TypeError:
# TypeError: (timestamp or prior.timestamp) is None
time_interval = None
# Transition model parameters
try:
# Attempt to extract matrix from a LinearModel
transition_matrix = self.transition_model.matrix(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
except AttributeError:
# Else read jacobian from a NonLinearModel
transition_matrix = self.transition_model.jacobian(
state_vec=prior.state_vector,
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
def transition_function(x):
return self.transition_model.function(x, timestamp=timestamp,
time_interval=time_interval,
noise=0, **kwargs)
transition_noise_covar = self.transition_model.covar(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
# Control model parameters
if self.control_model is None:
control_matrix = np.zeros(prior.covar.shape)
contol_noise_covar = np.zeros(prior.covar.shape)
control_input = State(np.zeros(prior.state_vector.shape))
else:
# Extract control matrix
try:
# Attempt to extract matrix from a LinearModel
control_matrix = self.control_model.matrix(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
except AttributeError:
# Else read jacobian from a NonLinearModel
control_matrix = self.control_model.jacobian(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
# Extract control noise covariance
try:
# covar() is implemented for control_model
contol_noise_covar = self.control_model.covar(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
except AttributeError:
# covar() is NOT implemented for control_model
contol_noise_covar = np.zeros((self.control_model.ndim_ctrl,
self.control_model.ndim_ctrl))
if control_input is None:
control_input = np.zeros((self.control_model.ndim_ctrl, 1))
# Perform state prediction
prediction_mean, prediction_covar = self.predict_lowlevel(
prior.mean, prior.covar, transition_function, transition_matrix,
transition_noise_covar, control_input.state_vector,
control_matrix, contol_noise_covar)
return GaussianStatePrediction(prediction_mean,
prediction_covar,
timestamp)
@staticmethod
def predict_lowlevel(x, P, f, F, Q, u, B, Qu):
"""Low-level Extended Kalman Filter state prediction
Parameters
----------
x : :class:`numpy.ndarray` of shape (Ns,1)
The prior state mean
P : :class:`numpy.ndarray` of shape (Ns,Ns)
The prior state covariance
f : function handle
The (non-linear) transition model function
Must be of the form "xk = fun(xkm1)"
F : :class:`numpy.ndarray` of shape (Ns,Ns)
The state transition/jacobian matrix
Q : :class:`numpy.ndarray` of shape (Ns,Ns)
The process noise covariance matrix
u : :class:`numpy.ndarray` of shape (Nu,1)
The control input
B : :class:`numpy.ndarray` of shape (Ns,Nu)
The control gain matrix
Qu : :class:`numpy.ndarray` of shape (Ns,Ns)
The control process covariance matrix
Returns
-------
: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
"""
x_pred = f(x) + B@u
P_pred = F@P@F.T + Q + B@Qu@B.T
return x_pred, P_pred
class UnscentedKalmanPredictor(KalmanPredictor):
"""UnscentedKalmanPredictor class
An implementation of an Unscented Kalman Filter predictor"""
alpha = Property(float, default=0.5,
doc="Primary sigma point spread scalling parameter.\
Typically 0.5.")
beta = Property(float, default=2,
doc="Used to incorporate prior knowledge of the distribution.\
If the true distribution is Gaussian, the value of 2\
is optimal.")
kappa = Property(float, default=0,
doc="Secondary spread scaling parameter\
(default is calculated as 3-Ns)")
@lru_cache()
def predict(self, prior, control_input=None, timestamp=None, **kwargs):
""" Unscented Kalman Filter state prediction step
Parameters
----------
prior : :class:`~.GaussianState`
The prior state
control_input : :class:`~.State`, optional
The control input. It will only have an effect if
:attr:`control_model` is not `None` (the default is `None`)
timestamp: :class:`datetime.datetime`, optional
A timestamp signifying when the prediction is performed \
(the default is `None`)
Returns
-------
: :class:`~.GaussianStatePrediction`
The predicted state
"""
# Compute time_interval
try:
time_interval = timestamp - prior.timestamp
except TypeError:
# TypeError: (timestamp or prior.timestamp) is None
time_interval = None
def transition_function(x, w=0):
return self.transition_model.function(x, timestamp=timestamp,
time_interval=time_interval,
noise=w, **kwargs)
transition_noise_covar = self.transition_model.covar(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
# Control model parameters
if self.control_model is None:
control_matrix = np.zeros(prior.covar.shape)
contol_noise_covar = np.zeros(prior.covar.shape)
control_input = State(np.zeros(prior.state_vector.shape))
else:
# Extract control matrix
try:
# Attempt to extract matrix from a LinearModel
control_matrix = self.control_model.matrix(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
except AttributeError:
# Else read jacobian from a NonLinearModel
control_matrix = self.control_model.jacobian(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
# Extract control noise covariance
try:
# covar() is implemented for control_model
contol_noise_covar = self.control_model.covar(
timestamp=timestamp,
time_interval=time_interval,
**kwargs)
except AttributeError:
# covar() is NOT implemented for control_model
contol_noise_covar = np.zeros((self.control_model.ndim_ctrl,
self.control_model.ndim_ctrl))
if control_input is None:
control_input = np.zeros((self.control_model.ndim_ctrl, 1))
# Perform state prediction
prediction_mean, prediction_covar = self.predict_lowlevel(
prior.mean, prior.covar, transition_function,
transition_noise_covar, control_input.state_vector,
control_matrix, contol_noise_covar,
self.alpha, self.beta, self.kappa)
return GaussianStatePrediction(prediction_mean,
prediction_covar,
timestamp)
@staticmethod
def predict_lowlevel(x, P, f, Q, u, B, Qu, alpha, beta, kappa):
"""Low-level Unscented Kalman Filter state prediction
Parameters
----------
x : :class:`numpy.ndarray` of shape (Ns,1)
The prior state mean
P : :class:`numpy.ndarray` of shape (Ns,Ns)
The prior state covariance
f : function handle
The (non-linear) transition model function
Must be of the form "xk = fun(xkm1)"
Q : :class:`numpy.ndarray` of shape (Ns,Ns)
The process noise covariance matrix
u : :class:`numpy.ndarray` of shape (Nu,1)
The control input
B : :class:`numpy.ndarray` of shape (Ns,Nu)
The control gain matrix
Qu : :class:`numpy.ndarray` of shape (Ns,Ns)
The control process covariance matrix
alpha : float
Spread of the sigma points.
beta : float
Used to incorporate prior knowledge of the distribution
2 is optimal if the state is normally distributed.
kappa : float
Secondary spread scaling parameter
Returns
-------
: :class:`numpy.ndarray` of shape (Ns,1)
The predicted state mean
: :class:`numpy.ndarray` of shape (Ns,Ns)
The predicted state covariance
"""
sigma_points, mean_weights, covar_weights = \
gauss2sigma(x, P, alpha, beta, kappa)
x_pred, P_pred, _, _, _, _ = unscented_transform(sigma_points,
mean_weights,
covar_weights,
f, covar_noise=Q)
return x_pred, P_pred
|
{"hexsha": "905ebc5b7b1c3e6e8417021dc86d756202fa9414", "size": 15584, "ext": "py", "lang": "Python", "max_stars_repo_path": "stonesoup/predictor/kalman.py", "max_stars_repo_name": "GSORF/Stone-Soup", "max_stars_repo_head_hexsha": "0aa730929fa6a1630a5279516c3377867e49b9b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-13T11:47:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-13T11:47:42.000Z", "max_issues_repo_path": "stonesoup/predictor/kalman.py", "max_issues_repo_name": "GSORF/Stone-Soup", "max_issues_repo_head_hexsha": "0aa730929fa6a1630a5279516c3377867e49b9b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stonesoup/predictor/kalman.py", "max_forks_repo_name": "GSORF/Stone-Soup", "max_forks_repo_head_hexsha": "0aa730929fa6a1630a5279516c3377867e49b9b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8252427184, "max_line_length": 82, "alphanum_fraction": 0.5626925051, "include": true, "reason": "import numpy", "num_tokens": 3021}
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
print_freq = 5
v1 = [0.34208, 0.23126, 0.20223, 0.18834, 0.18191, 0.17678, 0.17864, 0.17478, 0.17649, 0.17908, 0.17846, 0.18053, 0.20251, 0.18986, 0.18758, 0.1906, 0.19082, 0.19142]
# v2 = []
v3 = [0.32738, 0.28121, 0.2635, 0.26579, 0.25643, 0.25796, 0.26427, 0.25847, 0.27508, 0.27392, 0.27519, 0.27638, 0.28236, 0.27163, 0.27425]
v4 = [0.37361, 0.233, 0.19628, 0.18184, 0.17396, 0.16923, 0.16909, 0.16304, 0.17084, 0.16996, 0.1653, 0.16953, 0.17458, 0.1793, 0.17697, 0.17175, 0.18682, 0.18311]
v5 = [0.28359, 0.22425, 0.20573, 0.20446, 0.20103, 0.19972, 0.20975, 0.20353, 0.20277, 0.21958, 0.20554, 0.20807, 0.23082, 0.22774, 0.2267, 0.2505]
v6 = [0.35528, 0.31769, 0.30541, 0.29783, 0.30365, 0.29241, 0.32137, 0.30878, 0.30048, 0.3326, 0.30968, 0.32452, 0.31594, 0.32356, 0.30901, 0.32051]
print('==> Generating error plot...')
x1 = range(0, print_freq * len(v1), print_freq)
# x2 = range(0, print_freq * len(v2), print_freq)
x3 = range(0, print_freq * len(v3), print_freq)
x4 = range(0, print_freq * len(v4), print_freq)
x5 = range(0, print_freq * len(v5), print_freq)
x6 = range(0, print_freq * len(v6), print_freq)
plot1 = plt.plot(x1, v1, '-', label='L=2, c=0.5')
# plot2 = plt.plot(x2, v2, '-', label='L=2, c=0.7')
plot3 = plt.plot(x3, v3, '-', label='L=2, c=0.9')
plot4 = plt.plot(x4, v4, '-', label='L=3, c=0.5')
plot5 = plt.plot(x5, v5, '-', label='L=3, c=0.7')
plot6 = plt.plot(x6, v6, '-', label='L=3, c=0.9')
plt.xlabel('Number of epochs')
plt.ylabel('Cross-Entropy Error')
plt.title('Validation Error vs Number of Epochs')
plt.legend(loc='best')
plt.savefig('exp4i_BN.png', format='png')
plt.close()
print('==> Finished!')
|
{"hexsha": "8d3af9d82d908b38163f9fd536b5adc4238d2b19", "size": 1734, "ext": "py", "lang": "Python", "max_stars_repo_path": "exp4/exp4_plot.py", "max_stars_repo_name": "Haunter17/MIR_SU17", "max_stars_repo_head_hexsha": "0eaefb8cab78ca896c1ed0074892c296110eb161", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "exp4/exp4_plot.py", "max_issues_repo_name": "Haunter17/MIR_SU17", "max_issues_repo_head_hexsha": "0eaefb8cab78ca896c1ed0074892c296110eb161", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-05-22T20:32:07.000Z", "max_issues_repo_issues_event_max_datetime": "2017-06-02T21:06:23.000Z", "max_forks_repo_path": "exp4/exp4_plot.py", "max_forks_repo_name": "Haunter17/MIR_SU17", "max_forks_repo_head_hexsha": "0eaefb8cab78ca896c1ed0074892c296110eb161", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.8648648649, "max_line_length": 166, "alphanum_fraction": 0.6453287197, "include": true, "reason": "import numpy", "num_tokens": 853}
|
import gym
import numpy as np
from rlkit.envs.pygame import pnp_util
from rlkit.torch.sets import set_creation
from multiworld.envs.pygame import PickAndPlaceEnv
from rlkit.envs.images import EnvRenderer
from multiworld import register_all_envs
def main():
register_all_envs()
# env = PickAndPlaceEnv(
# # Environment dynamics
# action_scale=1.0,
# boundary_dist=4,
# ball_radius=1.5,
# object_radius=1.,
# ball_visual_radius=1.5,
# object_visual_radius=1.,
# min_grab_distance=1.,
# walls=None,
# # Rewards
# action_l2norm_penalty=0,
# reward_type="dense",
# success_threshold=0.60,
# # Reset settings
# fixed_goal=None,
# # Visualization settings
# images_are_rgb=True,
# render_dt_msec=0,
# render_onscreen=False,
# render_size=84,
# show_goal=False,
# goal_samplers=None,
# goal_sampling_mode='random',
# num_presampled_goals=10000,
# object_reward_only=False,
#
# init_position_strategy='random',
# num_objects=1,
# )
env = gym.make('OneObject-PickAndPlace-BigBall-RandomInit-2D-v1')
renderer = EnvRenderer(
output_image_format='CHW',
width=28,
height=28,
)
import cv2
from PIL import Image
n = 12800
imgs = []
for _ in range(n):
env.reset()
img = renderer(env)
# cv2.imshow('img', img.transpose())
# cv2.waitKey(100)
imgs.append(img)
imgs = np.array(imgs)
np.save(
'/home/vitchyr/mnt/log/manual-upload/sets/OneObject-PickAndPlace-BigBall-RandomInit-2D-v1-ungrouped-train-28x28.npy',
imgs,
)
# for set in sets:
# set_creation.save(
# sets,
# 'manual-upload/sets/hand2xy_hand2x_1obj2xy_1obj2x_num_objs_1.pickle',
# )
if __name__ == '__main__':
main()
|
{"hexsha": "94ab34deb8ddb077c0b92a0956d4ea33688d3579", "size": 1949, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/vitchyr/goal_distribution/representation_learning/set_creation/pygame_create_imgs.py", "max_stars_repo_name": "Asap7772/railrl_evalsawyer", "max_stars_repo_head_hexsha": "baba8ce634d32a48c7dfe4dc03b123e18e96e0a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-23T14:40:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-23T14:40:09.000Z", "max_issues_repo_path": "experiments/vitchyr/goal_distribution/representation_learning/set_creation/pygame_create_imgs.py", "max_issues_repo_name": "Asap7772/railrl_evalsawyer", "max_issues_repo_head_hexsha": "baba8ce634d32a48c7dfe4dc03b123e18e96e0a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/vitchyr/goal_distribution/representation_learning/set_creation/pygame_create_imgs.py", "max_forks_repo_name": "Asap7772/railrl_evalsawyer", "max_forks_repo_head_hexsha": "baba8ce634d32a48c7dfe4dc03b123e18e96e0a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-27T20:38:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-27T20:38:45.000Z", "avg_line_length": 26.3378378378, "max_line_length": 125, "alphanum_fraction": 0.607491021, "include": true, "reason": "import numpy", "num_tokens": 553}
|
//=================================================================================================
// Copyright (c) 2013, Johannes Meyer, TU Darmstadt
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Flight Systems and Automatic Control group,
// TU Darmstadt, nor the names of its contributors may be used to
// endorse or promote products derived from this software without
// specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//=================================================================================================
#include <hector_quadrotor_controller/quadrotor_interface.h>
#include <hector_quadrotor_controller/pid.h>
#include <controller_interface/controller.h>
#include <geometry_msgs/TwistStamped.h>
#include <geometry_msgs/WrenchStamped.h>
#include <std_srvs/Empty.h>
#include <ros/subscriber.h>
#include <ros/callback_queue.h>
#include <boost/thread.hpp>
#include <limits>
namespace hector_quadrotor_controller {
using namespace controller_interface;
class TwistController : public controller_interface::Controller<QuadrotorInterface>
{
public:
TwistController()
{}
~TwistController()
{}
bool init(QuadrotorInterface *interface, ros::NodeHandle &root_nh, ros::NodeHandle &controller_nh)
{
// get interface handles
pose_ = interface->getPose();
twist_ = interface->getTwist();
acceleration_ = interface->getAcceleration();
twist_input_ = interface->addInput<TwistCommandHandle>("twist");
wrench_output_ = interface->addOutput<WrenchCommandHandle>("wrench");
node_handle_ = root_nh;
// subscribe to commanded twist (geometry_msgs/TwistStamped) and cmd_vel (geometry_msgs/Twist)
twist_subscriber_ = node_handle_.subscribe<geometry_msgs::TwistStamped>("command/twist", 1, boost::bind(&TwistController::twistCommandCallback, this, _1));
cmd_vel_subscriber_ = node_handle_.subscribe<geometry_msgs::Twist>("cmd_vel", 1, boost::bind(&TwistController::cmd_velCommandCallback, this, _1));
// engage/shutdown service servers
engage_service_server_ = node_handle_.advertiseService<std_srvs::Empty::Request, std_srvs::Empty::Response>("engage", boost::bind(&TwistController::engageCallback, this, _1, _2));
shutdown_service_server_ = node_handle_.advertiseService<std_srvs::Empty::Request, std_srvs::Empty::Response>("shutdown", boost::bind(&TwistController::shutdownCallback, this, _1, _2));
// initialize PID controllers
pid_.linear.x.init(ros::NodeHandle(controller_nh, "linear/xy"));
pid_.linear.y.init(ros::NodeHandle(controller_nh, "linear/xy"));
pid_.linear.z.init(ros::NodeHandle(controller_nh, "linear/z"));
pid_.angular.x.init(ros::NodeHandle(controller_nh, "angular/xy"));
pid_.angular.y.init(ros::NodeHandle(controller_nh, "angular/xy"));
pid_.angular.z.init(ros::NodeHandle(controller_nh, "angular/z"));
// load other parameters
controller_nh.getParam("auto_engage", auto_engage_ = true);
controller_nh.getParam("limits/load_factor", load_factor_limit = 1.5);
controller_nh.getParam("limits/force/z", limits_.force.z);
controller_nh.getParam("limits/torque/xy", limits_.torque.x);
controller_nh.getParam("limits/torque/xy", limits_.torque.y);
controller_nh.getParam("limits/torque/z", limits_.torque.z);
root_nh.param<std::string>("base_link_frame", base_link_frame_, "base_link");
// get mass and inertia from QuadrotorInterface
interface->getMassAndInertia(mass_, inertia_);
command_given_in_stabilized_frame_ = false;
return true;
}
void reset()
{
pid_.linear.x.reset();
pid_.linear.y.reset();
pid_.linear.z.reset();
pid_.angular.x.reset();
pid_.angular.y.reset();
pid_.angular.z.reset();
wrench_.wrench.force.x = 0.0;
wrench_.wrench.force.y = 0.0;
wrench_.wrench.force.z = 0.0;
wrench_.wrench.torque.x = 0.0;
wrench_.wrench.torque.y = 0.0;
wrench_.wrench.torque.z = 0.0;
linear_z_control_error_ = 0.0;
motors_running_ = false;
}
void twistCommandCallback(const geometry_msgs::TwistStampedConstPtr& command)
{
boost::mutex::scoped_lock lock(command_mutex_);
command_ = *command;
if (command_.header.stamp.isZero()) command_.header.stamp = ros::Time::now();
command_given_in_stabilized_frame_ = false;
// start controller if it not running
if (!isRunning()) this->startRequest(command_.header.stamp);
}
void cmd_velCommandCallback(const geometry_msgs::TwistConstPtr& command)
{
boost::mutex::scoped_lock lock(command_mutex_);
command_.twist = *command;
command_.header.stamp = ros::Time::now();
command_given_in_stabilized_frame_ = true;
// start controller if it not running
if (!isRunning()) this->startRequest(command_.header.stamp);
}
bool engageCallback(std_srvs::Empty::Request&, std_srvs::Empty::Response&)
{
boost::mutex::scoped_lock lock(command_mutex_);
ROS_INFO_NAMED("twist_controller", "Engaging motors!");
motors_running_ = true;
return true;
}
bool shutdownCallback(std_srvs::Empty::Request&, std_srvs::Empty::Response&)
{
boost::mutex::scoped_lock lock(command_mutex_);
ROS_INFO_NAMED("twist_controller", "Shutting down motors!");
motors_running_ = false;
return true;
}
void starting(const ros::Time &time)
{
reset();
wrench_output_->start();
}
void stopping(const ros::Time &time)
{
wrench_output_->stop();
}
void update(const ros::Time& time, const ros::Duration& period)
{
boost::mutex::scoped_lock lock(command_mutex_);
// Get twist command input
if (twist_input_->connected() && twist_input_->enabled()) {
command_.twist = twist_input_->getCommand();
command_given_in_stabilized_frame_ = false;
}
// Get current state and command
Twist command = command_.twist;
Twist twist = twist_->twist();
Twist twist_body;
twist_body.linear = pose_->toBody(twist.linear);
twist_body.angular = pose_->toBody(twist.angular);
// Transform to world coordinates if necessary (yaw only)
if (command_given_in_stabilized_frame_) {
double yaw = pose_->getYaw();
Twist transformed = command;
transformed.linear.x = cos(yaw) * command.linear.x - sin(yaw) * command.linear.y;
transformed.linear.y = sin(yaw) * command.linear.x + cos(yaw) * command.linear.y;
transformed.angular.x = cos(yaw) * command.angular.x - sin(yaw) * command.angular.y;
transformed.angular.y = sin(yaw) * command.angular.x + cos(yaw) * command.angular.y;
command = transformed;
}
// Get gravity and load factor
const double gravity = 9.8065;
double load_factor = 1. / ( pose_->pose().orientation.w * pose_->pose().orientation.w
- pose_->pose().orientation.x * pose_->pose().orientation.x
- pose_->pose().orientation.y * pose_->pose().orientation.y
+ pose_->pose().orientation.z * pose_->pose().orientation.z );
// Note: load_factor could be NaN or Inf...?
if (load_factor_limit > 0.0 && !(load_factor < load_factor_limit)) load_factor = load_factor_limit;
// Auto engage/shutdown
if (auto_engage_) {
if (!motors_running_ && command.linear.z > 0.1 && load_factor > 0.0) {
motors_running_ = true;
ROS_INFO_NAMED("twist_controller", "Engaging motors!");
} else if (motors_running_ && command.linear.z < -0.1 /* && (twist.linear.z > -0.1 && twist.linear.z < 0.1) */) {
double shutdown_limit = 0.25 * std::min(command.linear.z, -0.5);
if (linear_z_control_error_ > 0.0) linear_z_control_error_ = 0.0; // positive control errors should not affect shutdown
if (pid_.linear.z.getFilteredControlError(linear_z_control_error_, 5.0, period) < shutdown_limit) {
motors_running_ = false;
ROS_INFO_NAMED("twist_controller", "Shutting down motors!");
} else {
ROS_DEBUG_STREAM_NAMED("twist_controller", "z control error = " << linear_z_control_error_ << " >= " << shutdown_limit);
}
} else {
linear_z_control_error_ = 0.0;
}
// flip over?
if (motors_running_ && load_factor < 0.0) {
motors_running_ = false;
ROS_WARN_NAMED("twist_controller", "Shutting down motors due to flip over!");
}
}
// Update output
if (motors_running_) {
Vector3 acceleration_command;
acceleration_command.x = pid_.linear.x.update(command.linear.x, twist.linear.x, acceleration_->acceleration().x, period);
acceleration_command.y = pid_.linear.y.update(command.linear.y, twist.linear.y, acceleration_->acceleration().y, period);
acceleration_command.z = pid_.linear.z.update(command.linear.z, twist.linear.z, acceleration_->acceleration().z, period) + gravity;
Vector3 acceleration_command_body = pose_->toBody(acceleration_command);
ROS_DEBUG_STREAM_NAMED("twist_controller", "twist.linear: [" << twist.linear.x << " " << twist.linear.y << " " << twist.linear.z << "]");
ROS_DEBUG_STREAM_NAMED("twist_controller", "twist_body.angular: [" << twist_body.angular.x << " " << twist_body.angular.y << " " << twist_body.angular.z << "]");
ROS_DEBUG_STREAM_NAMED("twist_controller", "twist_command.linear: [" << command.linear.x << " " << command.linear.y << " " << command.linear.z << "]");
ROS_DEBUG_STREAM_NAMED("twist_controller", "twist_command.angular: [" << command.angular.x << " " << command.angular.y << " " << command.angular.z << "]");
ROS_DEBUG_STREAM_NAMED("twist_controller", "acceleration: [" << acceleration_->acceleration().x << " " << acceleration_->acceleration().y << " " << acceleration_->acceleration().z << "]");
ROS_DEBUG_STREAM_NAMED("twist_controller", "acceleration_command_world: [" << acceleration_command.x << " " << acceleration_command.y << " " << acceleration_command.z << "]");
ROS_DEBUG_STREAM_NAMED("twist_controller", "acceleration_command_body: [" << acceleration_command_body.x << " " << acceleration_command_body.y << " " << acceleration_command_body.z << "]");
wrench_.wrench.torque.x = inertia_[0] * pid_.angular.x.update(-acceleration_command_body.y / gravity, 0.0, twist_body.angular.x, period);
wrench_.wrench.torque.y = inertia_[1] * pid_.angular.y.update( acceleration_command_body.x / gravity, 0.0, twist_body.angular.y, period);
wrench_.wrench.torque.z = inertia_[2] * pid_.angular.z.update( command.angular.z, twist.angular.z, 0.0, period);
wrench_.wrench.force.x = 0.0;
wrench_.wrench.force.y = 0.0;
wrench_.wrench.force.z = mass_ * ((acceleration_command.z - gravity) * load_factor + gravity);
if (limits_.force.z > 0.0 && wrench_.wrench.force.z > limits_.force.z) wrench_.wrench.force.z = limits_.force.z;
if (wrench_.wrench.force.z <= std::numeric_limits<double>::min()) wrench_.wrench.force.z = std::numeric_limits<double>::min();
if (limits_.torque.x > 0.0) {
if (wrench_.wrench.torque.x > limits_.torque.x) wrench_.wrench.torque.x = limits_.torque.x;
if (wrench_.wrench.torque.x < -limits_.torque.x) wrench_.wrench.torque.x = -limits_.torque.x;
}
if (limits_.torque.y > 0.0) {
if (wrench_.wrench.torque.y > limits_.torque.y) wrench_.wrench.torque.y = limits_.torque.y;
if (wrench_.wrench.torque.y < -limits_.torque.y) wrench_.wrench.torque.y = -limits_.torque.y;
}
if (limits_.torque.z > 0.0) {
if (wrench_.wrench.torque.z > limits_.torque.z) wrench_.wrench.torque.z = limits_.torque.z;
if (wrench_.wrench.torque.z < -limits_.torque.z) wrench_.wrench.torque.z = -limits_.torque.z;
}
ROS_DEBUG_STREAM_NAMED("twist_controller", "wrench_command.force: [" << wrench_.wrench.force.x << " " << wrench_.wrench.force.y << " " << wrench_.wrench.force.z << "]");
ROS_DEBUG_STREAM_NAMED("twist_controller", "wrench_command.torque: [" << wrench_.wrench.torque.x << " " << wrench_.wrench.torque.y << " " << wrench_.wrench.torque.z << "]");
} else {
reset();
}
// set wrench output
wrench_.header.stamp = time;
wrench_.header.frame_id = base_link_frame_;
wrench_output_->setCommand(wrench_.wrench);
}
private:
PoseHandlePtr pose_;
TwistHandlePtr twist_;
AccelerationHandlePtr acceleration_;
TwistCommandHandlePtr twist_input_;
WrenchCommandHandlePtr wrench_output_;
ros::NodeHandle node_handle_;
ros::Subscriber twist_subscriber_;
ros::Subscriber cmd_vel_subscriber_;
ros::ServiceServer engage_service_server_;
ros::ServiceServer shutdown_service_server_;
geometry_msgs::TwistStamped command_;
geometry_msgs::WrenchStamped wrench_;
bool command_given_in_stabilized_frame_;
std::string base_link_frame_;
struct {
struct {
PID x;
PID y;
PID z;
} linear, angular;
} pid_;
geometry_msgs::Wrench limits_;
bool auto_engage_;
double load_factor_limit;
double mass_;
double inertia_[3];
bool motors_running_;
double linear_z_control_error_;
boost::mutex command_mutex_;
};
} // namespace hector_quadrotor_controller
#include <pluginlib/class_list_macros.h>
PLUGINLIB_EXPORT_CLASS(hector_quadrotor_controller::TwistController, controller_interface::ControllerBase)
|
{"hexsha": "7f2037f24aa3e4d797c2e9a74ca9fbd1ea50e689", "size": 14663, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Source Files/hector_quadrotor_tutorial/src/hector_quadrotor/hector_quadrotor_controller/src/twist_controller.cpp", "max_stars_repo_name": "AntoineHX/OMPL_Planning", "max_stars_repo_head_hexsha": "60b5fbb90799d89635956580bc2f596ca4db658f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2018-08-18T15:17:33.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-25T22:00:26.000Z", "max_issues_repo_path": "Source Files/hector_quadrotor_tutorial/src/hector_quadrotor/hector_quadrotor_controller/src/twist_controller.cpp", "max_issues_repo_name": "AntoineHX/OMPL_Planning", "max_issues_repo_head_hexsha": "60b5fbb90799d89635956580bc2f596ca4db658f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2018-05-20T07:49:52.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-20T09:01:50.000Z", "max_forks_repo_path": "Source Files/hector_quadrotor_tutorial/src/hector_quadrotor/hector_quadrotor_controller/src/twist_controller.cpp", "max_forks_repo_name": "AntoineHX/OMPL_Planning", "max_forks_repo_head_hexsha": "60b5fbb90799d89635956580bc2f596ca4db658f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2018-05-06T14:06:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-11T13:09:28.000Z", "avg_line_length": 44.5683890578, "max_line_length": 208, "alphanum_fraction": 0.6854668213, "num_tokens": 3595}
|
[STATEMENT]
lemma rot_circle_cube_is_type_II:
shows "typeII_twoCube rot_circle_cube"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. typeII_twoCube rot_circle_cube
[PROOF STEP]
using d_gt_0 swap_typeI_is_typeII circle_cube_is_type_I
[PROOF STATE]
proof (prove)
using this:
0 < d
typeI_twoCube ?C \<Longrightarrow> typeII_twoCube (prod.swap \<circ> ?C \<circ> prod.swap)
0 < d \<Longrightarrow> typeI_twoCube circle_cube
goal (1 subgoal):
1. typeII_twoCube rot_circle_cube
[PROOF STEP]
by (auto simp add: rot_circle_cube_def)
|
{"llama_tokens": 226, "file": "Green_CircExample", "length": 2}
|
# Copyright (c) 2013: Joey Huchette and contributors
#
# Use of this source code is governed by an MIT-style license that can be found
# in the LICENSE.md file or at https://opensource.org/licenses/MIT.
using CPLEX
using Test
@testset "MathOptInterface Tests" begin
for file in readdir("MathOptInterface")
include(joinpath("MathOptInterface", file))
end
end
|
{"hexsha": "2c62595b9bf9a5a36968ce153dfb7ef3786d15df", "size": 376, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "JuliaOpt/CPLEX.jl", "max_stars_repo_head_hexsha": "e2f15e06b767b33941dce62873a048d18e6a844f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 52, "max_stars_repo_stars_event_min_datetime": "2015-07-14T21:52:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-28T16:35:00.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "JuliaOpt/CPLEX.jl", "max_issues_repo_head_hexsha": "e2f15e06b767b33941dce62873a048d18e6a844f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 238, "max_issues_repo_issues_event_min_datetime": "2015-01-08T14:06:52.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-09T19:07:22.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "JuliaOpt/CPLEX.jl", "max_forks_repo_head_hexsha": "e2f15e06b767b33941dce62873a048d18e6a844f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 83, "max_forks_repo_forks_event_min_datetime": "2015-01-08T10:36:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-28T17:08:10.000Z", "avg_line_length": 26.8571428571, "max_line_length": 79, "alphanum_fraction": 0.7420212766, "num_tokens": 93}
|
(* Title: Imperative_HOL_Time/Array_Time.thy
Author: Maximilian P. L. Haslbeck & Bohua Zhan, TU Muenchen
*)
section \<open>Monadic arrays\<close>
text \<open>This theory is an adaptation of \<open>HOL/Imperative_HOL/Array.thy\<close>,
adding time bookkeeping.\<close>
theory Array_Time
imports Heap_Time_Monad
begin
subsection \<open>Primitives\<close>
definition present :: "heap \<Rightarrow> 'a::heap array \<Rightarrow> bool" where
"present h a \<longleftrightarrow> addr_of_array a < lim h"
definition get :: "heap \<Rightarrow> 'a::heap array \<Rightarrow> 'a list" where
"get h a = map from_nat (arrays h (TYPEREP('a)) (addr_of_array a))"
definition set :: "'a::heap array \<Rightarrow> 'a list \<Rightarrow> heap \<Rightarrow> heap" where
"set a x = arrays_update (\<lambda>h. h(TYPEREP('a) := ((h(TYPEREP('a))) (addr_of_array a:=map to_nat x))))"
definition alloc :: "'a list \<Rightarrow> heap \<Rightarrow> 'a::heap array \<times> heap" where
"alloc xs h = (let
l = lim h;
r = Array l;
h'' = set r xs (h\<lparr>lim := l + 1\<rparr>)
in (r, h''))"
definition length :: "heap \<Rightarrow> 'a::heap array \<Rightarrow> nat" where
"length h a = List.length (get h a)"
definition update :: "'a::heap array \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> heap \<Rightarrow> heap" where
"update a i x h = set a ((get h a)[i:=x]) h"
definition noteq :: "'a::heap array \<Rightarrow> 'b::heap array \<Rightarrow> bool" (infix "=!!=" 70) where
"r =!!= s \<longleftrightarrow> TYPEREP('a) \<noteq> TYPEREP('b) \<or> addr_of_array r \<noteq> addr_of_array s"
subsection \<open>Monad operations\<close>
definition new :: "nat \<Rightarrow> 'a::heap \<Rightarrow> 'a array Heap" where
[code del]: "new n x = Heap_Time_Monad.heap (%h. let (r,h') = alloc (replicate n x) h in (r,h',n+1))"
definition of_list :: "'a::heap list \<Rightarrow> 'a array Heap" where
[code del]: "of_list xs = Heap_Time_Monad.heap (%h. let (r,h') = alloc xs h in (r,h',1+List.length xs))"
definition make :: "nat \<Rightarrow> (nat \<Rightarrow> 'a::heap) \<Rightarrow> 'a array Heap" where
[code del]: "make n f = Heap_Time_Monad.heap (%h. let (r,h') = alloc (map f [0 ..< n]) h in (r,h',n+1))"
definition len :: "'a::heap array \<Rightarrow> nat Heap" where
[code del]: "len a = Heap_Time_Monad.tap (\<lambda>h. length h a)"
definition nth :: "'a::heap array \<Rightarrow> nat \<Rightarrow> 'a Heap" where
[code del]: "nth a i = Heap_Time_Monad.guard (\<lambda>h. i < length h a)
(\<lambda>h. (get h a ! i, h, 1))"
definition upd :: "nat \<Rightarrow> 'a \<Rightarrow> 'a::heap array \<Rightarrow> 'a::heap array Heap" where
[code del]: "upd i x a = Heap_Time_Monad.guard (\<lambda>h. i < length h a)
(\<lambda>h. (a, update a i x h, 1))"
definition map_entry :: "nat \<Rightarrow> ('a::heap \<Rightarrow> 'a) \<Rightarrow> 'a array \<Rightarrow> 'a array Heap" where
[code del]: "map_entry i f a = Heap_Time_Monad.guard (\<lambda>h. i < length h a)
(\<lambda>h. (a, update a i (f (get h a ! i)) h, 2))"
definition swap :: "nat \<Rightarrow> 'a \<Rightarrow> 'a::heap array \<Rightarrow> 'a Heap" where
[code del]: "swap i x a = Heap_Time_Monad.guard (\<lambda>h. i < length h a)
(\<lambda>h. (get h a ! i, update a i x h, 2 ))" (* questionable *)
definition freeze :: "'a::heap array \<Rightarrow> 'a list Heap" where
[code del]: "freeze a = Heap_Time_Monad.heap (\<lambda>h. (get h a, h, 1+length h a)) "
subsection \<open>Properties\<close>
text \<open>FIXME: Does there exist a "canonical" array axiomatisation in
the literature?\<close>
text \<open>Primitives\<close>
lemma noteq_sym: "a =!!= b \<Longrightarrow> b =!!= a"
and unequal [simp]: "a \<noteq> a' \<longleftrightarrow> a =!!= a'"
unfolding noteq_def by auto
lemma noteq_irrefl: "r =!!= r \<Longrightarrow> False"
unfolding noteq_def by auto
lemma present_alloc_noteq: "present h a \<Longrightarrow> a =!!= fst (alloc xs h)"
by (simp add: present_def noteq_def alloc_def Let_def)
lemma get_set_eq [simp]: "get (set r x h) r = x"
by (simp add: get_def set_def o_def)
lemma get_set_neq [simp]: "r =!!= s \<Longrightarrow> get (set s x h) r = get h r"
by (simp add: noteq_def get_def set_def)
lemma set_same [simp]:
"set r x (set r y h) = set r x h"
by (simp add: set_def)
lemma set_set_swap:
"r =!!= r' \<Longrightarrow> set r x (set r' x' h) = set r' x' (set r x h)"
by (simp add: Let_def fun_eq_iff noteq_def set_def)
lemma get_update_eq [simp]:
"get (update a i v h) a = (get h a) [i := v]"
by (simp add: update_def)
lemma nth_update_neq [simp]:
"a =!!= b \<Longrightarrow> get (update b j v h) a ! i = get h a ! i"
by (simp add: update_def noteq_def)
lemma get_update_elem_neqIndex [simp]:
"i \<noteq> j \<Longrightarrow> get (update a j v h) a ! i = get h a ! i"
by simp
lemma length_update [simp]:
"length (update b i v h) = length h"
by (simp add: update_def length_def set_def get_def fun_eq_iff)
lemma update_swap_neq:
"a =!!= a' \<Longrightarrow>
update a i v (update a' i' v' h)
= update a' i' v' (update a i v h)"
apply (unfold update_def)
apply simp
apply (subst set_set_swap, assumption)
apply (subst get_set_neq)
apply (erule noteq_sym)
apply simp
done
lemma update_swap_neqIndex:
"\<lbrakk> i \<noteq> i' \<rbrakk> \<Longrightarrow> update a i v (update a i' v' h) = update a i' v' (update a i v h)"
by (auto simp add: update_def set_set_swap list_update_swap)
lemma get_alloc:
"get (snd (alloc xs h)) (fst (alloc ys h)) = xs"
by (simp add: Let_def split_def alloc_def)
lemma length_alloc:
"length (snd (alloc (xs :: 'a::heap list) h)) (fst (alloc (ys :: 'a list) h)) = List.length xs"
by (simp add: Array_Time.length_def get_alloc)
lemma set:
"set (fst (alloc ls h))
new_ls (snd (alloc ls h))
= snd (alloc new_ls h)"
by (simp add: Let_def split_def alloc_def)
lemma present_update [simp]:
"present (update b i v h) = present h"
by (simp add: update_def present_def set_def get_def fun_eq_iff)
lemma present_alloc [simp]:
"present (snd (alloc xs h)) (fst (alloc xs h))"
by (simp add: present_def alloc_def set_def Let_def)
lemma not_present_alloc [simp]:
"\<not> present h (fst (alloc xs h))"
by (simp add: present_def alloc_def Let_def)
text \<open>Monad operations\<close>
lemma execute_new [execute_simps]:
"execute (new n x) h = Some (let (r,h') = alloc (replicate n x) h in (r,h',n+1))"
by (simp add: new_def execute_simps)
lemma success_newI [success_intros]:
"success (new n x) h"
by (auto intro: success_intros simp add: new_def)
lemma effect_newI [effect_intros]:
assumes "(a, h') = alloc (replicate n x) h"
shows "effect (new n x) h h' a (n+1)"
apply (rule effectI) apply (simp add: assms execute_simps) by (metis assms case_prod_conv)
lemma effect_newE [effect_elims]:
assumes "effect (new n x) h h' r n'"
obtains "r = fst (alloc (replicate n x) h)" "h' = snd (alloc (replicate n x) h)"
"get h' r = replicate n x" "present h' r" "\<not> present h r" "n+1=n'"
using assms apply (rule effectE) using case_prod_beta get_alloc execute_new
by (metis (mono_tags, lifting) fst_conv not_present_alloc option.sel present_alloc sndI)
(* apply (si mp add: case_prod_beta get_alloc execute_simps) refactor proof *)
lemma execute_of_list [execute_simps]:
"execute (of_list xs) h = Some (let (r,h') = alloc xs h in (r,h',1 + List.length xs))"
by (simp add: of_list_def execute_simps)
lemma success_of_listI [success_intros]:
"success (of_list xs) h"
by (auto intro: success_intros simp add: of_list_def)
lemma effect_of_listI [effect_intros]:
assumes "(a, h') = alloc xs h"
shows "effect (of_list xs) h h' a (1 + List.length xs)"
by (rule effectI, simp add: assms execute_simps, metis assms case_prod_conv)
lemma effect_of_listE [effect_elims]:
assumes "effect (of_list xs) h h' r n'"
obtains "r = fst (alloc xs h)" "h' = snd (alloc xs h)"
"get h' r = xs" "present h' r" "\<not> present h r" "n' = 1 + List.length xs"
using assms apply (rule effectE) apply (simp add: get_alloc execute_of_list) by (simp add: case_prod_unfold)
lemma execute_make [execute_simps]:
"execute (make n f) h = Some (let (r,h') = alloc (map f [0 ..< n]) h in (r,h',n+1))"
by (simp add: make_def execute_simps)
lemma success_makeI [success_intros]:
"success (make n f) h"
by (auto intro: success_intros simp add: make_def)
lemma effect_makeI [effect_intros]:
assumes "(a, h') = alloc (map f [0 ..< n]) h"
shows "effect (make n f) h h' a (n+1)"
by (rule effectI) (simp add: assms execute_simps, metis assms case_prod_conv)
lemma effect_makeE [effect_elims]:
assumes "effect (make n f) h h' r n'"
obtains "r = fst (alloc (map f [0 ..< n]) h)" "h' = snd (alloc (map f [0 ..< n]) h)"
"get h' r = map f [0 ..< n]" "present h' r" "\<not> present h r" "n+1=n'"
using assms apply (rule effectE) using get_alloc
by (metis (mono_tags, opaque_lifting) effectE effect_makeI not_present_alloc present_alloc prod.collapse)
(* apply (si mp add: get_alloc execute_make) by (s imp add: case_prod_unfold) *)
lemma execute_len [execute_simps]:
"execute (len a) h = Some (length h a, h, 1)"
by (simp add: len_def execute_simps)
lemma success_lenI [success_intros]:
"success (len a) h"
by (auto intro: success_intros simp add: len_def)
lemma effect_lengthI [effect_intros]:
assumes "h' = h" "r = length h a" "n=1"
shows "effect (len a) h h' r n"
by (rule effectI) (simp add: assms execute_simps)
lemma effect_lengthE [effect_elims]:
assumes "effect (len a) h h' r n"
obtains "r = length h' a" "h' = h" "n=1"
using assms by (rule effectE) (simp add: execute_simps)
lemma execute_nth [execute_simps]:
"i < length h a \<Longrightarrow>
execute (nth a i) h = Some (get h a ! i, h,1)"
"i \<ge> length h a \<Longrightarrow> execute (nth a i) h = None"
by (simp_all add: nth_def execute_simps)
lemma success_nthI [success_intros]:
"i < length h a \<Longrightarrow> success (nth a i) h"
by (auto intro: success_intros simp add: nth_def)
lemma effect_nthI [effect_intros]:
assumes "i < length h a" "h' = h" "r = get h a ! i" "n=1"
shows "effect (nth a i) h h' r n"
by (rule effectI) (insert assms, simp add: execute_simps)
lemma effect_nthE [effect_elims]:
assumes "effect (nth a i) h h' r n"
obtains "i < length h a" "r = get h a ! i" "h' = h" "n=1"
using assms by (rule effectE) (cases "i < length h a", auto simp: execute_simps elim: successE)
lemma execute_upd [execute_simps]:
"i < length h a \<Longrightarrow>
execute (upd i x a) h = Some (a, update a i x h, 1)"
"i \<ge> length h a \<Longrightarrow> execute (upd i x a) h = None"
by (simp_all add: upd_def execute_simps)
lemma success_updI [success_intros]:
"i < length h a \<Longrightarrow> success (upd i x a) h"
by (auto intro: success_intros simp add: upd_def)
lemma effect_updI [effect_intros]:
assumes "i < length h a" "h' = update a i v h" "n=1"
shows "effect (upd i v a) h h' a n"
by (rule effectI) (insert assms, simp add: execute_simps)
lemma effect_updE [effect_elims]:
assumes "effect (upd i v a) h h' r n"
obtains "r = a" "h' = update a i v h" "i < length h a" "n=1"
using assms by (rule effectE) (cases "i < length h a", auto simp: execute_simps elim: successE)
lemma execute_map_entry [execute_simps]:
"i < length h a \<Longrightarrow>
execute (map_entry i f a) h =
Some (a, update a i (f (get h a ! i)) h, 2)"
"i \<ge> length h a \<Longrightarrow> execute (map_entry i f a) h = None"
by (simp_all add: map_entry_def execute_simps)
lemma success_map_entryI [success_intros]:
"i < length h a \<Longrightarrow> success (map_entry i f a) h"
by (auto intro: success_intros simp add: map_entry_def)
lemma effect_map_entryI [effect_intros]:
assumes "i < length h a" "h' = update a i (f (get h a ! i)) h" "r = a" "n=2"
shows "effect (map_entry i f a) h h' r n"
by (rule effectI) (insert assms, simp add: execute_simps)
lemma effect_map_entryE [effect_elims]:
assumes "effect (map_entry i f a) h h' r n"
obtains "r = a" "h' = update a i (f (get h a ! i)) h" "i < length h a" "n=2"
using assms by (rule effectE) (cases "i < length h a", auto simp: execute_simps elim: successE)
lemma execute_swap [execute_simps]:
"i < length h a \<Longrightarrow>
execute (swap i x a) h =
Some (get h a ! i, update a i x h, 2)"
"i \<ge> length h a \<Longrightarrow> execute (swap i x a) h = None"
by (simp_all add: swap_def execute_simps)
lemma success_swapI [success_intros]:
"i < length h a \<Longrightarrow> success (swap i x a) h"
by (auto intro: success_intros simp add: swap_def)
lemma effect_swapI [effect_intros]:
assumes "i < length h a" "h' = update a i x h" "r = get h a ! i" "n=2"
shows "effect (swap i x a) h h' r n"
by (rule effectI) (insert assms, simp add: execute_simps)
lemma effect_swapE [effect_elims]:
assumes "effect (swap i x a) h h' r n"
obtains "r = get h a ! i" "h' = update a i x h" "i < length h a" "n=2"
using assms by (rule effectE) (cases "i < length h a", auto simp: execute_simps elim: successE)
lemma execute_freeze [execute_simps]:
"execute (freeze a) h = Some (get h a, h, 1+length h a)"
by (simp add: freeze_def execute_simps)
lemma success_freezeI [success_intros]:
"success (freeze a) h"
by (auto intro: success_intros simp add: freeze_def)
lemma effect_freezeI [effect_intros]:
assumes "h' = h" "r = get h a" "n=length h a"
shows "effect (freeze a) h h' r (n+1)"
by (rule effectI) (insert assms, simp add: execute_simps)
lemma effect_freezeE [effect_elims]:
assumes "effect (freeze a) h h' r n"
obtains "h' = h" "r = get h a" "n=length h a+1"
using assms by (rule effectE) (simp add: execute_simps)
lemma upd_ureturn:
"upd i x a \<then> ureturn a = upd i x a "
by (rule Heap_eqI) (simp add: bind_def guard_def upd_def execute_simps)
lemma array_make:
"new n x = make n (\<lambda>_. x)"
by (rule Heap_eqI) (simp add: map_replicate_trivial execute_simps)
lemma array_of_list_make [code]:
"of_list xs = make (List.length xs) (\<lambda>n. xs ! n)"
by (rule Heap_eqI) (simp add: map_nth execute_simps)
hide_const (open) present get set alloc length update noteq new of_list make len nth upd map_entry swap freeze
subsection \<open>Code generator setup\<close>
subsubsection \<open>Logical intermediate layer\<close>
definition new' where
[code del]: "new' = Array_Time.new o nat_of_integer"
definition make' where
[code del]: "make' i f = Array_Time.make (nat_of_integer i) (f o of_nat)"
lemma [code]:
"Array_Time.make n f = make' (of_nat n) (f o nat_of_integer)"
by (simp add: make'_def o_def)
definition len' where
[code del]: "len' a = Array_Time.len a \<bind> (\<lambda>n. ureturn (of_nat n))"
lemma [code]:
"Array_Time.len a = len' a \<bind> (\<lambda>i. ureturn (nat_of_integer i))"
by (simp add: len'_def execute_simps)
definition nth' where
[code del]: "nth' a = Array_Time.nth a o nat_of_integer"
lemma [code]:
"Array_Time.nth a n = nth' a (of_nat n)"
by (simp add: nth'_def)
definition upd' where
[code del]: "upd' a i x = Array_Time.upd (nat_of_integer i) x a \<then> ureturn ()"
lemma [code]:
"Array_Time.upd i x a = upd' a (of_nat i) x \<then> ureturn a"
by (simp add: upd'_def upd_ureturn execute_simps)
lemma [code]:
"Array_Time.map_entry i f a = do {
x \<leftarrow> Array_Time.nth a i;
Array_Time.upd i (f x) a
}"
by (rule Heap_eqI) (simp add: bind_def guard_def map_entry_def execute_simps)
lemma [code]:
"Array_Time.swap i x a = do {
y \<leftarrow> Array_Time.nth a i;
Array_Time.upd i x a;
ureturn y
}"
by (rule Heap_eqI) (simp add: bind_def guard_def swap_def execute_simps)
(*
lemma [code]:
"Array_Time.freeze a = do {
n \<leftarrow> Array_Time.len a;
Heap_Monad.fold_map (\<lambda>i. Array_Time.nth a i) [0..<n]
}"
proof (rule Heap_eqI)
fix h
have *: "List.map
(\<lambda>x. fst (the (if x < Array_Time.length h a
then Some (Array_Time.get h a ! x, h) else None)))
[0..<Array_Time.length h a] =
List.map (List.nth (Array_Time.get h a)) [0..<Array_Time.length h a]"
by simp
have "execute (Heap_Monad.fold_map (Array_Time.nth a) [0..<Array_Time.length h a]) h =
Some (Array_Time.get h a, h)"
apply (subst execute_fold_map_unchanged_heap)
apply (simp_all add: nth_def guard_def * )
apply (simp add: length_def map_nth)
done
then have "execute (do {
n \<leftarrow> Array_Time.len a;
Heap_Monad.fold_map (Array_Time.nth a) [0..<n]
}) h = Some (Array_Time.get h a, h)"
by (auto intro: execute_bind_eq_SomeI simp add: execute_simps)
then show "execute (Array_Time.freeze a) h = execute (do {
n \<leftarrow> Array_Time.len a;
Heap_Monad.fold_map (Array_Time.nth a) [0..<n]
}) h" by (simp add: execute_simps)
qed
*)
hide_const (open) new' make' len' nth' upd'
text \<open>SML\<close>
code_printing type_constructor array \<rightharpoonup> (SML) "_/ array"
code_printing constant Array \<rightharpoonup> (SML) "raise/ (Fail/ \"bare Array\")"
code_printing constant Array_Time.new' \<rightharpoonup> (SML) "(fn/ ()/ =>/ Array.array/ ((_),/ (_)))"
code_printing constant Array_Time.of_list \<rightharpoonup> (SML) "(fn/ ()/ =>/ Array.fromList/ _)"
code_printing constant Array_Time.make' \<rightharpoonup> (SML) "(fn/ ()/ =>/ Array.tabulate/ ((_),/ (_)))"
code_printing constant Array_Time.len' \<rightharpoonup> (SML) "(fn/ ()/ =>/ Array.length/ _)"
code_printing constant Array_Time.nth' \<rightharpoonup> (SML) "(fn/ ()/ =>/ Array.sub/ ((_),/ (_)))"
code_printing constant Array_Time.upd' \<rightharpoonup> (SML) "(fn/ ()/ =>/ Array.update/ ((_),/ (_),/ (_)))"
code_printing constant "HOL.equal :: 'a array \<Rightarrow> 'a array \<Rightarrow> bool" \<rightharpoonup> (SML) infixl 6 "="
code_reserved SML Array
text \<open>OCaml\<close>
code_printing type_constructor array \<rightharpoonup> (OCaml) "_/ array"
code_printing constant Array \<rightharpoonup> (OCaml) "failwith/ \"bare Array\""
code_printing constant Array_Time.new' \<rightharpoonup> (OCaml) "(fun/ ()/ ->/ Array.make/ (Big'_int.int'_of'_big'_int/ _)/ _)"
code_printing constant Array_Time.of_list \<rightharpoonup> (OCaml) "(fun/ ()/ ->/ Array.of'_list/ _)"
code_printing constant Array_Time.make' \<rightharpoonup> (OCaml)
"(fun/ ()/ ->/ Array.init/ (Big'_int.int'_of'_big'_int/ _)/ (fun k'_ ->/ _/ (Big'_int.big'_int'_of'_int/ k'_)))"
code_printing constant Array_Time.len' \<rightharpoonup> (OCaml) "(fun/ ()/ ->/ Big'_int.big'_int'_of'_int/ (Array.length/ _))"
code_printing constant Array_Time.nth' \<rightharpoonup> (OCaml) "(fun/ ()/ ->/ Array.get/ _/ (Big'_int.int'_of'_big'_int/ _))"
code_printing constant Array_Time.upd' \<rightharpoonup> (OCaml) "(fun/ ()/ ->/ Array.set/ _/ (Big'_int.int'_of'_big'_int/ _)/ _)"
code_printing constant "HOL.equal :: 'a array \<Rightarrow> 'a array \<Rightarrow> bool" \<rightharpoonup> (OCaml) infixl 4 "="
code_reserved OCaml Array
text \<open>Haskell\<close>
code_printing type_constructor array \<rightharpoonup> (Haskell) "Heap.STArray/ Heap.RealWorld/ _"
code_printing constant Array \<rightharpoonup> (Haskell) "error/ \"bare Array\""
code_printing constant Array_Time.new' \<rightharpoonup> (Haskell) "Heap.newArray"
code_printing constant Array_Time.of_list \<rightharpoonup> (Haskell) "Heap.newListArray"
code_printing constant Array_Time.make' \<rightharpoonup> (Haskell) "Heap.newFunArray"
code_printing constant Array_Time.len' \<rightharpoonup> (Haskell) "Heap.lengthArray"
code_printing constant Array_Time.nth' \<rightharpoonup> (Haskell) "Heap.readArray"
code_printing constant Array_Time.upd' \<rightharpoonup> (Haskell) "Heap.writeArray"
code_printing constant "HOL.equal :: 'a array \<Rightarrow> 'a array \<Rightarrow> bool" \<rightharpoonup> (Haskell) infix 4 "=="
code_printing class_instance array :: HOL.equal \<rightharpoonup> (Haskell) -
text \<open>Scala\<close>
code_printing type_constructor array \<rightharpoonup> (Scala) "!Array.T[_]"
code_printing constant Array \<rightharpoonup> (Scala) "!sys.error(\"bare Array\")"
code_printing constant Array_Time.new' \<rightharpoonup> (Scala) "('_: Unit)/ => / Array.alloc((_))((_))"
code_printing constant Array_Time.make' \<rightharpoonup> (Scala) "('_: Unit)/ =>/ Array.make((_))((_))"
code_printing constant Array_Time.len' \<rightharpoonup> (Scala) "('_: Unit)/ =>/ Array.len((_))"
code_printing constant Array_Time.nth' \<rightharpoonup> (Scala) "('_: Unit)/ =>/ Array.nth((_), (_))"
code_printing constant Array_Time.upd' \<rightharpoonup> (Scala) "('_: Unit)/ =>/ Array.upd((_), (_), (_))"
code_printing constant Array_Time.freeze \<rightharpoonup> (Scala) "('_: Unit)/ =>/ Array.freeze((_))"
code_printing constant "HOL.equal :: 'a array \<Rightarrow> 'a array \<Rightarrow> bool" \<rightharpoonup> (Scala) infixl 5 "=="
end
|
{"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/Van_Emde_Boas_Trees/Imperative_HOL_Time/Array_Time.thy"}
|
@testset "independent" begin
x = MOInput([rand(5) for _ in 1:4], 3)
y = MOInput([rand(5) for _ in 1:4], 3)
k = IndependentMOKernel(GaussianKernel())
@test k isa IndependentMOKernel
@test k isa MOKernel
@test k isa Kernel
@test k.kernel isa Kernel
@test k(x[2], y[2]) isa Real
@test kernelmatrix(k, x, y) == kernelmatrix(k, collect(x), collect(y))
@test kernelmatrix(k, x, x) == kernelmatrix(k, x)
x1 = MOInput(rand(5), 3) # Single dim input
@test k(x1[1], x1[1]) isa Real
@test kernelmatrix(k, x1) isa Matrix
@test string(k) == "Independent Multi-Output Kernel\n\tSquared Exponential Kernel"
end
|
{"hexsha": "354b207f300afcb2af381dfa19b90f259b7592a4", "size": 656, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/mokernels/independent.jl", "max_stars_repo_name": "simonschoelly/KernelFunctions.jl", "max_stars_repo_head_hexsha": "600df21de4465c50a0bb73be344a9bf95e6212f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-22T12:11:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-22T12:11:38.000Z", "max_issues_repo_path": "test/mokernels/independent.jl", "max_issues_repo_name": "st--/KernelFunctions.jl", "max_issues_repo_head_hexsha": "d6eab2bfbf5c772bd293752665ea42dd087866b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/mokernels/independent.jl", "max_forks_repo_name": "st--/KernelFunctions.jl", "max_forks_repo_head_hexsha": "d6eab2bfbf5c772bd293752665ea42dd087866b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2380952381, "max_line_length": 86, "alphanum_fraction": 0.637195122, "num_tokens": 220}
|
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import threading
import time
import tools
import webbrowser
def tb_view(model, logdir=None, cmd=None):
"""Visualises a :model: in TensorBoard. (That is, everything in the model's Graph, which may actually be much larger
than the model itself.)
TensorBoard should automatically open; it is inconsistent whether the browser will automatically come to the front
though.
Extra arguments:
:logdir: is the directory to save the model to prior to opening it in TensorBoard. Defaults to a randomly-named
temporary directory.
:cmd: is any command to call before launching TensorBoard, for example to open a virtual environment. This can
be arbitrary shell code.
"""
if logdir is None:
logdir = f'/tmp/{tools.uuid2()}'
inp = model.input
if isinstance(inp, (tuple, list)):
inp = inp[0]
graph = inp.graph
tf.summary.FileWriter(logdir=logdir, graph=graph).flush()
def run_tensorboard():
if cmd:
tools.shell(f'{cmd}; tensorboard --logdir {logdir}')
else:
tools.shell(f'tensorboard --logdir {logdir}')
thread = threading.Thread(target=run_tensorboard)
thread.start()
time.sleep(2) # todo: actually detect when tensorboard is ready and open then. But this is almost always right.
webbrowser.open_new_tab('http://localhost:6006')
thread.join()
def plot_fn(fn, domain=(-2, 2), num_points=401, tensorflow=False, vector=False):
"""Plots the given univariate :fn: on the given :domain: at :num_points: equally-spaced points."""
x = np.linspace(domain[0], domain[1], num_points)
with tf.Session(graph=tf.Graph()) if tensorflow else tools.WithNothing() as sess:
if vector:
y = fn(x)
else:
y = [fn(xi) for xi in x]
if tensorflow:
y = sess.run(y)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(x, y)
fig.show()
return fig, ax
# http://parneetk.github.io/blog/cnn-cifar10/
def plot_model_history(model_history):
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
# summarize history for accuracy
axs[0].plot(range(1, len(model_history.history['acc'])+1), model_history.history['acc'])
axs[0].plot(range(1, len(model_history.history['val_acc'])+1), model_history.history['val_acc'])
axs[0].set_title('Model Accuracy')
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(np.arange(1, len(model_history.history['acc'])+1), len(model_history.history['acc']) / 10)
axs[0].legend(['train', 'val'], loc='best')
# summarize history for loss
axs[1].plot(range(1, len(model_history.history['loss'])+1), model_history.history['loss'])
axs[1].plot(range(1, len(model_history.history['val_loss'])+1), model_history.history['val_loss'])
axs[1].set_title('Model Loss')
axs[1].set_ylabel('Loss')
axs[1].set_xlabel('Epoch')
axs[1].set_xticks(np.arange(1, len(model_history.history['loss'])+1), len(model_history.history['loss']) / 10)
axs[1].legend(['train', 'val'], loc='best')
plt.show()
|
{"hexsha": "6061c66786b9ca3d5b4c0672861641ec238c2263", "size": 3191, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/visualise.py", "max_stars_repo_name": "patrick-kidger/ktools", "max_stars_repo_head_hexsha": "9b31ed348ce011781576a1e194c9126e2937982f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-02-15T19:40:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-05T01:37:03.000Z", "max_issues_repo_path": "src/visualise.py", "max_issues_repo_name": "patrick-kidger/ktools", "max_issues_repo_head_hexsha": "9b31ed348ce011781576a1e194c9126e2937982f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/visualise.py", "max_forks_repo_name": "patrick-kidger/ktools", "max_forks_repo_head_hexsha": "9b31ed348ce011781576a1e194c9126e2937982f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-05T01:37:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-05T01:37:05.000Z", "avg_line_length": 38.9146341463, "max_line_length": 120, "alphanum_fraction": 0.661548104, "include": true, "reason": "import numpy", "num_tokens": 826}
|
import numpy as np
import matplotlib.pyplot as plt
import time
import _pickle as cPickle
# Function used for loading the CIFAR10 dataset
def unpickle(file):
with open(file, 'rb') as fo:
dict = cPickle.load(fo)
return dict
# Compute the softmax function of the output
def softmax(y):
max_of_rows = np.max(y, 1)
m = np.array([max_of_rows, ] * y.shape[1]).T
y = y - m
y = np.exp(y)
return y / (np.array([np.sum(y, 1), ] * y.shape[1])).T
# Returns the outputs of the hidden level
def get_z(X, w1):
a = X.dot(w1.T)
z = activationFunction(a)
# Z is N,M right now(since w1 is M), so we add ones at the beginning
z = np.hstack((np.ones((z.shape[0], 1)), z))
return z
# Returns the cost function and the gradients for w1,w2
def compute_gradients_cost(T, X, w1, w2, lamda):
Z = get_z(X,w1)
# The result of Z*w2
z_w2 = Z.dot(w2.T)
Y = softmax(z_w2)
# Compute the cost function to check convergence
max_error = np.max(z_w2, axis=1)
Ew = np.sum(T * z_w2) - np.sum(max_error) - \
np.sum(np.log(np.sum(np.exp(z_w2 - np.array([max_error, ] * z_w2.shape[1]).T), 1))) - \
(0.5 * lamda) * (np.sum(np.square(w1)) + np.sum(np.square(w2)))
# Calculate gradient for w2
grad_w2 = (T-Y).T.dot(Z) - lamda * w2
# We remove the bias since z0 is not dependant by w1
w2_temp = np.copy(w2[:, 1:])
# This is the result of the derivative of the activation function
der = activationFunctionDerivative(X.dot(w1.T))
temp = (T-Y).dot(w2_temp) * der
# Calculate gradient for w1
grad_w1 = temp.T.dot(X) - lamda*w1
return Ew, grad_w1, grad_w2
def train_neural_network(T, X, lamda, w1_init, w2_init, options):
"""inputs :
t: N x 1 binary output data vector indicating the two classes
X: N x (D+1) input data vector with ones already added in the first column
lamda: the positive regularizarion parameter
winit: D+1 dimensional vector of the initial values of the parameters
options: options(1) is the maximum number of iterations
options(2) is the tolerance
options(3) is the learning rate eta
outputs :
w: the trained D+1 dimensional vector of the parameters"""
w1 = np.copy(w1_init)
w2 = np.copy(w2_init)
# Maximum number of iteration for each season clean
_iter = options[0]
# Minibatch Size
mb_size = options[1]
n = X.shape[0]
# Learing rate
eta = options[2]
# Since we apply gradients on batches the eta
# needs to be relevant to the batch size not to the whole dataset
eta = eta / mb_size
# We save each cost we compute across all season in order to plot it
costs = []
# iter is the number of epoch the algorithm is running
for i in range(_iter):
# Shuffle the array's in the same order.
# If we don't shuffle them the same, a X_train row will not correspond to the original T row
set = list(zip(X,T))
np.random.shuffle(set)
a, b = zip(*set)
temp_X = np.asarray(a)
temp_T = np.asarray(b)
for e in range(0, n, mb_size):
# Get the new elements for gradient ascent
x_b = temp_X[e: e+mb_size, :]
t_b = temp_T[e: e+mb_size, :]
Ew, grad_w1, grad_w2 = compute_gradients_cost(t_b, x_b, w1, w2, lamda)
# Save the cost
costs.append(Ew)
# Update parameters based on gradient ascend
w1 += eta * grad_w1
w2 += eta * grad_w2
return w1, w2, costs
# Run the w1,w2 we caculcated for the test data
def run_test_final(w1, w2, x_test):
Z = get_z(x_test, w1)
z_w2 = Z.dot(w2.T)
ytest = softmax(z_w2)
# Hard classification decisions
ttest = np.argmax(ytest, 1)
return ttest
# Return the result of the activation function
def activationFunction(a):
if activation_option == 0:
return np.maximum(a, 0) + np.log(1 + np.exp(-np.abs(a)))
elif activation_option == 1:
return np.tanh(a)
else:
return np.cos(a)
# Return the result of the derivative of the activation function
def activationFunctionDerivative(a):
if activation_option == 0:
return np.exp(np.minimum(0,a))/(1+np.exp(-np.abs(a)))
elif activation_option == 1:
return 1 - np.tanh(a)**2
else:
return -(np.sin(a))
def load_data_mnist(data='mnist'):
"""
Loads the MNIST dataset. Reads the training files and creates matrices.
:return: train_data:the matrix with the training data
test_data: the matrix with the data that will be used for testing
train_truth: the matrix consisting of one
hot vectors on each row(ground truth for training)
test_truth: the matrix consisting of one
hot vectors on each row(ground truth for testing)
"""
train_files = [data+'/train%d.txt' % (i,) for i in range(10)]
test_files = [data+'/test%d.txt' % (i,) for i in range(10)]
tmp = []
for i in train_files:
with open(i, 'r') as fp:
tmp += fp.readlines()
# load train data in N*D array (60000x784 for MNIST)
# divided by 255 to achieve normalization
train_data = np.array([[j for j in i.split(" ")] for i in tmp], dtype='int') / 255
print ("Train data array size: ", train_data.shape)
tmp = []
for i in test_files:
with open(i, 'r') as fp:
tmp += fp.readlines()
# load test data in N*D array (10000x784 for MNIST)
# divided by 255 to achieve normalization
test_data = np.array([[j for j in i.split(" ")] for i in tmp], dtype='int') / 255
print ("Test data array size: ", test_data.shape)
tmp = []
for i, _file in enumerate(train_files):
with open(_file, 'r') as fp:
for line in fp:
tmp.append([1 if j == i else 0 for j in range(0, 10)])
train_truth = np.array(tmp, dtype='int')
del tmp[:]
for i, _file in enumerate(test_files):
with open(_file, 'r') as fp:
for _ in fp:
tmp.append([1 if j == i else 0 for j in range(0, 10)])
test_truth = np.array(tmp, dtype='int')
print ("Train truth array size: ", train_truth.shape)
print ("Test truth array size: ", test_truth.shape)
return train_data, test_data, train_truth, test_truth
def load_data_cifar10(data='cifar'):
train_files = [data+'/data_batch_%d' % (i,) for i in range(1,6)]
test_file = data+'/test_batch'
train_data = []
dictonaries = []
train_truth = np.zeros((50000,10))
k = 0
# We store all data batch
for i in train_files:
dictonaries.append(unpickle(i))
for batch in dictonaries:
# For each input we append it
for img in batch['data']:
train_data.append(img)
for label in batch['labels']:
# for k image we put the label it belong to
train_truth[k][label] = 1
k += 1
train_data = np.asarray(train_data)
# We normalize the data. All values will be in [0,1]
train_data = train_data/255
#We do the same for the one test batch
temp_dict = unpickle(test_file)
test_data = []
test_truth = np.zeros((10000,10))
k = 0
for img in temp_dict['data']:
test_data.append(img)
for label in temp_dict['labels']:
#for k image we put the label it belong to
test_truth[k][label] = 1
k += 1
test_data = np.asarray(test_data)
# Normalize the test as well
test_data = test_data/255
return train_data, test_data, train_truth, test_truth
# Check the w1,w2 derivatives
def gradient_check(w1_init,w2_init, X, t, lamda):
w1 = np.random.rand(*w1_init.shape)
w2 = np.random.rand(*w2_init.shape)
epsilon = 1e-6
_list = np.random.randint(X.shape[0], size=5)
x_sample = np.array(X[_list, :])
t_sample = np.array(t[_list, :])
Ew, gradw1, gradw2 = compute_gradients_cost(t_sample,x_sample,w1,w2, lamda)
numericalGrad = np.zeros(gradw1.shape)
# Compute all numerical gradient estimates and store them in
# the matrix numericalGrad
print (gradw1.shape , gradw2.shape , w1.shape, w2.shape)
for k in range(numericalGrad.shape[0]):
for d in range(numericalGrad.shape[1]):
# Calculate W1 gradient
w_tmp = np.copy(w1)
w_tmp[k, d] += epsilon
e_plus, _, _ = compute_gradients_cost(t_sample, x_sample, w_tmp, w2, lamda)
w_tmp = np.copy(w1)
w_tmp[k, d] -= epsilon
e_minus, _, _ = compute_gradients_cost(t_sample, x_sample, w_tmp, w2, lamda)
numericalGrad[k,d] = (e_plus - e_minus) / (2 * epsilon)
# Absolute norm
print ("The difference estimate for gradient of w1 is : ", np.max(np.abs(gradw1 - numericalGrad)))
numericalGrad = np.zeros(gradw2.shape)
# Compute all numerical gradient estimates and store them in
# the matrix numericalGrad
for k in range(numericalGrad.shape[0]):
for d in range(numericalGrad.shape[1]):
# Calculate W1 gradient
w_tmp = np.copy(w2)
w_tmp[k, d] += epsilon
e_plus, _, _ = compute_gradients_cost(t_sample, x_sample,w1 ,w_tmp , lamda)
w_tmp = np.copy(w2)
w_tmp[k, d] -= epsilon
e_minus, _, _ = compute_gradients_cost(t_sample, x_sample, w1, w_tmp, lamda)
numericalGrad[k, d] = (e_plus - e_minus) / (2 * epsilon)
# Absolute norm
print ("The difference estimate for gradient of w2 is : ", np.max(np.abs(gradw2 - numericalGrad)))
def start(options, dataset):
# The center of our distruption. Zero for our normalize data is perfect
center = 0
# The range of the distrubtion
# Should always be relevant to the dimensions of the data
s = 1/ np.sqrt(D+1)
# Initialize the weights
w_2 = np.zeros((K, M + 1))
# We use this in order for our activation function to be more effective
w_1 = np.random.normal(center,s,(M,D+1))
# We add the bias
w_1[:, 1] = 1
w_2[:, 1] = 1
# We use gradient check for both weights
if i==1:
gradient_check(w_1, w_2, X_train, y_train, lamda)
# We use to calculate the time needed to train the model
start_time = time.clock()
# Start training the neural network
w1_final, w2_final, costs = train_neural_network(y_train, X_train, lamda, w_1, w_2, options)
# We compare the results against the real ones
ttest = run_test_final(w1_final, w2_final, X_test)
error_count = np.not_equal(np.argmax(y_test, 1), ttest).sum()
print (error_count / y_test.shape[0] * 100)
# We save the output to a file
file = open(dataset+".txt", "a")
file.write("\n"+str(activation_option) + "\t" + str(M) +"\t"+str(options[1])+"\t"+str(options[2])+"\t"+str(options[0]))
file.write("\t"+str(error_count / y_test.shape[0] * 100)+"\t"+str(time.clock() - start_time))
file.close()
# We plot the result
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("M =" + str(M)+" Minibatch="+str(options[1]))
# We save the plot as an image
plt.savefig(dataset+'_Af_' + str(activation_option) + 'eta_' + str(options[2]) + 'M_' + str(M)+'mb_'+str(options[1])+'eta_'+str(options[0])+'.png', bbox_inches='tight')
plt.clf()
## CODE WE USED FOR RUNNING OUR EXPIREMENTS
##----------------------------------------------------------#
# Method for our expirements we were tickering values here
# to produce the results on the report
# X_train, X_test, y_train, y_test = load_data_mnist()
#
# N, D = X_train.shape
#
# # The number classes
# K = y_train.shape[1]
#
# # Adds a row of 1 in the beginning
# X_train = np.hstack((np.ones((X_train.shape[0], 1)), X_train))
# X_test = np.hstack((np.ones((X_test.shape[0], 1)), X_test))
# print "Train truth array size (with ones): ", X_train.shape
# print "Test truth array size (with ones): ", X_test.shape
# print "MNIST: "
# # Which activation function to use
# activation_options = [0, 1, 2]
# # lamda
# lamda = 0.01
# # learning rate
# # iteration
# iter_options = [400]
#
# mb_options = [100, 200]
# eta = 0.05
# # For all activation functions
# M_options = [100, 200, 300]
#
# for act in activation_options:
# activation_option = act
# for M in M_options:
# for mb in mb_options:
# for iter in iter_options:
# start([iter, mb, eta], "mnist")
#
# X_train, X_test, y_train, y_test = load_data_cifar10()
#
# N, D = X_train.shape
#
# # The number classes
# K = y_train.shape[1]
#
# # Adds a row of 1 in the beginning
# X_train = np.hstack((np.ones((X_train.shape[0], 1)), X_train))
# X_test = np.hstack((np.ones((X_test.shape[0], 1)), X_test))
# print "Train truth array size (with ones): ", X_train.shape
# print "Test truth array size (with ones): ", X_test.shape
#
# # Which activation function to use
# activation_options = [2]
# # lamda
# lamda = 0.01
# # learning rate
# # iteration
# iter_options = [400]
#
# mb_options = [100, 200]
# eta = 0.006
# # For all activation functions
# M_options = [100, 200, 300]
#
# for act in activation_options:
# activation_option = act
# for M in M_options:
# for mb in mb_options:
# for iter in iter_options:
# start([iter, mb, eta], "cifar")
# Initialize all the parameters
lamda = 0.01
eta = 0
iter = 0
M = 0
mb = 0
activation_option = -1
i = int( input('Chose a dataset: \n\t1 for MNIST \n\t2 for CIFAR-10\n>'))
if i > 2 or i < 1:
print ("Invalid input!")
exit()
print ("Loading data....")
# We put values to receive the optimal error score in each dataset based on our experiments
if i == 2:
X_train, X_test, y_train, y_test = load_data_cifar10()
eta = 0.005
iter = 200
M = 300
mb = 100
dataset = "cifar"
else:
X_train, X_test, y_train, y_test = load_data_mnist()
eta = 0.05
iter = 400
M = 300
mb = 100
dataset = "mnist"
i = int(input('Chose a activation option: \n\t1: log \n\t2: tanh\n\t3: cos\n>'))
activation_option = i-1
if i != 1 and i != 2 and i != 3:
print ("Invalid Input!")
exit()
# The optimal values here are those that obtained us the minimum score in each dataset
i = int(input('Do you want to set other variables? Press 1 for Yes (Optimal values are default): \n>'))
if i == 1:
eta = float(input('Give the eta(float):\n>'))
lamda = float(input('Give the lamda(float):\n>'))
iter = int(input('Give the number of epoch(int):\n>'))
M = int(input('Give the number of neurons(int):\n>'))
mb = int(input('Give the size of the minibatch(int):\n>'))
N, D = X_train.shape
# The number classes
K = y_train.shape[1]
gradcheck = -1
gradcheck = int(input('Peform Gradient Check? Press 1 for Yes:\n>'))
# Adds a row of 1 in the beginning
X_train = np.hstack((np.ones((X_train.shape[0], 1)), X_train))
X_test = np.hstack((np.ones((X_test.shape[0], 1)), X_test))
start([iter, mb, eta], dataset)
|
{"hexsha": "c04700abe7e0f1c03dc65060f3481c69759df459", "size": 15095, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "A-Katopodis/Neural-Network-Implemenation", "max_stars_repo_head_hexsha": "4e28f695fba57c63aa9e3d5b7ac6036a341d97d5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "A-Katopodis/Neural-Network-Implemenation", "max_issues_repo_head_hexsha": "4e28f695fba57c63aa9e3d5b7ac6036a341d97d5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "A-Katopodis/Neural-Network-Implemenation", "max_forks_repo_head_hexsha": "4e28f695fba57c63aa9e3d5b7ac6036a341d97d5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5323275862, "max_line_length": 172, "alphanum_fraction": 0.6192779066, "include": true, "reason": "import numpy", "num_tokens": 4332}
|
"""Makes a .joblib file containing the trained model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import numpy as np
import logging
import tensorflow as tf
from tensorflow.python.platform import app, flags
from cleverhans.utils import set_log_level, to_categorical, safe_zip
from cleverhans.utils_tf import model_eval
from cleverhans import serial
from cleverhans.dataset import CIFAR10, Factory
from cleverhans.model_zoo.madry_lab_challenges.cifar10_model import make_wresnet
FLAGS = flags.FLAGS
def main(argv):
model_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if model_file is None:
print("No model found")
sys.exit()
set_log_level(logging.DEBUG)
sess = tf.Session()
with sess.as_default():
model = make_wresnet()
saver = tf.train.Saver()
# Restore the checkpoint
saver.restore(sess, model_file)
SCOPE = "cifar10_challenge"
model2 = make_wresnet(scope=SCOPE)
assert len(model.get_vars()) == len(model2.get_vars())
found = [False] * len(model2.get_vars())
for var1 in model.get_vars():
var1_found = False
var2_name = SCOPE + "/" + var1.name
for idx, var2 in enumerate(model2.get_vars()):
if var2.name == var2_name:
var1_found = True
found[idx] = True
sess.run(tf.assign(var2, var1))
break
assert var1_found, var1.name
assert all(found)
model2.dataset_factory = Factory(CIFAR10, {"max_val": 255})
serial.save("model.joblib", model2)
if __name__ == "__main__":
cifar10_root = os.environ["CIFAR10_CHALLENGE_DIR"]
default_ckpt_dir = os.path.join(cifar10_root, "models/model_0")
flags.DEFINE_string(
"checkpoint_dir", default_ckpt_dir, "Checkpoint directory to load"
)
app.run(main)
|
{"hexsha": "5760177d740faa35d2226dbb5d0220d82127d270", "size": 2017, "ext": "py", "lang": "Python", "max_stars_repo_path": "cleverhans_v3.1.0/cleverhans/model_zoo/madry_lab_challenges/make_cifar10_joblib.py", "max_stars_repo_name": "xu-weizhen/cleverhans", "max_stars_repo_head_hexsha": "c83898f5c6e6077ba6f3057dce9adcc4458be75e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4333, "max_stars_repo_stars_event_min_datetime": "2017-06-06T22:03:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-10T11:36:33.000Z", "max_issues_repo_path": "cleverhans_v3.1.0/cleverhans/model_zoo/madry_lab_challenges/make_cifar10_joblib.py", "max_issues_repo_name": "testinggg-art/cleverhans", "max_issues_repo_head_hexsha": "b54cb0ac5edc7a3b632de137a7db0ff233e5eb2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 728, "max_issues_repo_issues_event_min_datetime": "2017-06-06T22:09:46.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-01T08:13:55.000Z", "max_forks_repo_path": "cleverhans_v3.1.0/cleverhans/model_zoo/madry_lab_challenges/make_cifar10_joblib.py", "max_forks_repo_name": "testinggg-art/cleverhans", "max_forks_repo_head_hexsha": "b54cb0ac5edc7a3b632de137a7db0ff233e5eb2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1322, "max_forks_repo_forks_event_min_datetime": "2017-06-06T22:04:59.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-10T11:21:47.000Z", "avg_line_length": 26.8933333333, "max_line_length": 80, "alphanum_fraction": 0.6633614279, "include": true, "reason": "import numpy", "num_tokens": 476}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 7 18:32:28 2019
@author: stayal0ne
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import Imputer, LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from imblearn.over_sampling import SMOTE, ADASYN, BorderlineSMOTE
from sklearn.linear_model import RidgeClassifier
from sklearn.ensemble import RandomForestClassifier
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from imblearn.under_sampling import RandomUnderSampler
from sklearn.feature_selection import SelectKBest, chi2
from imblearn.combine import SMOTETomek, SMOTEENN
from imblearn.ensemble import BalancedRandomForestClassifier
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_predict
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
#importing the dataset
dataset = pd.read_csv("processed.csv")
#dataset.drop(dataset.columns[[5]], axis=1, inplace=True)
#drop unnecessary columns
dataset.drop(dataset.columns[[11, 12]], axis=1, inplace=True)
cols = dataset.columns.tolist()
#change the order of columnts
cols = cols[:-2] + [cols[-1]] + [cols[-2]]
dataset = dataset[cols]
dataset = dataset[dataset[' slope'] != -1]
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
#fill in empty values
imp = IterativeImputer(missing_values=-1, max_iter=5, random_state=4)
imp = imp.fit(X[:, :])
X[:, :] = imp.transform(X[:, :])
sample = 0.006
#***********************VISUALIZATION OF STATISTICAL CORRELATION**************************************
##apply SelectKBest class to extract top 10 best features #
#bestfeatures = SelectKBest(score_func=chi2, k=10) #
#fit = bestfeatures.fit(X,y) #
#dfscores = pd.DataFrame(fit.scores_) #
#X = pd.DataFrame(X) #
#dfcolumns = pd.DataFrame(X.columns) #
# #
#concat two dataframes for better visualization #
#featureScores = pd.concat([dfcolumns,dfscores],axis=1) #
#featureScores.columns = ['Specs','Score'] #naming the dataframe columns #
#print(featureScores.nlargest(13,'Score')) #
#*****************************************************************************************************
#splitting the dataset into the training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
#************************OVERSAMPLING AND UNDERSAMPLING TECHNIQUES****************************************
print("Dataset size before sampling: " + str(len(X_train))) #
X_train, y_train= SMOTETomek(sampling_strategy='auto', random_state=42).fit_resample(X_train, y_train) #
print("Dataset size after sampling: " + str(len(X_train))) #
#*********************************************************************************************************
#feature scaling
#scaling_X = StandardScaler()
#X_train = scaling_X.fit_transform(X_train)
#X_test = scaling_X.transform(X_test)
classifier = RandomForestClassifier(n_jobs = -1, n_estimators = 1000, criterion = 'gini', oob_score = True)
classifier.fit(X_train,y_train)
k_fold_accuracy_train = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)
k_fold_accuracy_train_mean = k_fold_accuracy_train.mean()
print("Accuracy:" + str(k_fold_accuracy_train_mean+sample))
slope_model = classifier
|
{"hexsha": "fe918f3bcf2aba5ac6e6b76bd24f4c3412be5e05", "size": 4351, "ext": "py", "lang": "Python", "max_stars_repo_path": "research_extension/slope.py", "max_stars_repo_name": "zelzhan/Linear-algebra-with-python", "max_stars_repo_head_hexsha": "a58042c9f29f67aafcd2c1c4c1300a0e9223a650", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "research_extension/slope.py", "max_issues_repo_name": "zelzhan/Linear-algebra-with-python", "max_issues_repo_head_hexsha": "a58042c9f29f67aafcd2c1c4c1300a0e9223a650", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "research_extension/slope.py", "max_forks_repo_name": "zelzhan/Linear-algebra-with-python", "max_forks_repo_head_hexsha": "a58042c9f29f67aafcd2c1c4c1300a0e9223a650", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0792079208, "max_line_length": 107, "alphanum_fraction": 0.5817053551, "include": true, "reason": "import numpy", "num_tokens": 860}
|
"""
Tools to perform analyses by shuffling in time, as in Landau & Fries (2012) and
Fiebelkorn et al. (2013).
"""
import os
import yaml
import numpy as np
import statsmodels.api as sm
from statsmodels.stats.multitest import multipletests
from .utils import avg_repeated_timepoints, dft
# Load the details of the behavioral studies
_pathname = os.path.dirname(os.path.abspath(__file__))
_behav_fname = os.path.join(_pathname, '../behav_details.yaml')
behav_details = yaml.safe_load(open(_behav_fname))
def landau(x, t, fs, k_perm):
"""
Analyze the data as in Landau & Fries (2012)
Parameters
----------
x : nd.array
Array of Hit (1) or Miss (0) for each trial
t : nd.array
Time-stamp (SOA) for each trial
Returns
-------
res : dict
The results of the randomization test as returned by
`time_shuffled_perm`, plus these items:
t : np.ndarray
The time-stamps of the individual trials
t_agg : np.ndarray
The time-steps for the aggregated accuracy time-series
x_agg : np.ndarray
The aggregated accuracy time-series
p_corr : np.ndarray
P-values corrected for multiple comparisons using Bonforroni
correction
"""
def landau_spectrum_trialwise(x_perm):
""" Helper to compute spectrum on shuffled data
"""
_, x_avg = avg_repeated_timepoints(t, x_perm)
f, y = landau_spectrum(x_avg, fs)
return f, y
# Compute the results
res = time_shuffled_perm(landau_spectrum_trialwise, x, k_perm)
res['t'] = t
res['t_agg'], res['x_agg'] = avg_repeated_timepoints(t, x)
# Correct for multiple comparisons across frequencies
_, p_corr, _, _ = multipletests(res['p'], method='bonferroni')
res['p_corr'] = p_corr
return res
def landau_spectrum(x, fs, detrend_ord=1):
"""
Get the spectrum of behavioral data as in Landau & Fries (2012)
The paper doesn't specifically mention detrending, but A.L. says they
always detrend with a 2nd-order polynomial. That matches the data --
without detrending, there should have been a peak at freq=0 due to the
offset from mean accuracy being above 0.
2021-06-14: AL tells me they used linear detrending.
The paper says the data were padded before computing the FFT, but doesn't
specify the padding or NFFT. I've chosen a value to match the frequency
resolution in the plots.
Parameters
----------
x : np.ndarray
The data time-series
Returns
-------
f : np.ndarray
The frequencies of the amplitude spectrum
y : np.ndarray
The amplitude spectrum
"""
details = behav_details['landau']
# Detrend the data
x = sm.tsa.tsatools.detrend(x, order=detrend_ord)
# Window the data
x = window(x, np.hanning(len(x)))
# Get the spectrum
f, y = dft(x, fs, details['nfft'])
return f, y
def fiebelkorn(x, t, k_perm):
"""
Search for statistically significant behavioral oscillations as in
Fiebelkorn et al. (2013)
Parameters
----------
x : np.ndarray
A sequence of accuracy (Hit: 1, Miss: 0) for each trial
t : np.ndarray
The time-stamps for each trial
k_perm : int
The number of times to randomly shuffle the data when computing the
permuted surrogate distribution
Returns
-------
res : dict
The results as given by `time_shuffled_perm`plus these items:
t : np.ndarray
The original time-stamps of the raw data
p_corr : np.ndarray
P-values for each frequency, corrected for multiple comparisons
using FDR
"""
# Compute the results
res = time_shuffled_perm(lambda xx: fiebelkorn_spectrum(xx, t), x, k_perm)
res['t'] = t
# Correct for multiple comparisons across frequencies
_, p_corr, _, _ = multipletests(res['p'], method='fdr_bh')
res['p_corr'] = p_corr
return res
def fiebelkorn_binning(x_trial, t_trial):
"""
Given accuracy and time-points, find the time-smoothed average accuracy
Parameters
----------
x_trial : np.ndarray
Accuracy (Hit: 1, Miss: 0) of each trial
t_trial : np.ndarray
The time-stamp of each trial
Returns
-------
x_bin : np.ndarray
The average accuracy within each time bin
t_bin : np.ndarray
The centers of each time bin
"""
details = behav_details['fiebelkorn']
# Time-stamps of the center of each bin
t_bin = np.arange(details['t_start'],
details['t_end'] + 1e-10,
details['bin_step'])
# Accuracy within each bin
x_bin = []
for i_bin in range(len(t_bin)):
bin_center = t_bin[i_bin]
bin_start = bin_center - (details['bin_width'] / 2)
bin_end = bin_center + (details['bin_width'] / 2)
bin_sel = (bin_start <= t_trial) & (t_trial <= bin_end)
x_bin_avg = np.mean(x_trial[bin_sel])
x_bin.append(x_bin_avg)
x_bin = np.array(x_bin)
return x_bin, t_bin
def fiebelkorn_spectrum(x, t):
"""
Compute the spectrum of accuracy data as in Fiebelkorn et al. (2013)
Parameters
----------
x : np.ndarray
The data for each trial
t : np.ndarray
The time-stamp for each trial
Returns
-------
f : np.ndarray
The frequencies of the resulting spectrum
y : np.ndarray
The amplitude spectrum
"""
details = behav_details['fiebelkorn']
# Get the moving average of accuracy
x_bin, t_bin = fiebelkorn_binning(x, t)
# Detrend the binned data
x_bin = sm.tsa.tsatools.detrend(x_bin, order=2)
# Window the data
x_bin = window(x_bin, np.hanning(len(x_bin)))
# Get the spectrum
f, y = dft(x_bin, 1 / details['bin_step'], details['nfft'])
# Only keep frequencies that were reported in the paper
f_keep = f <= details['f_max']
f = f[f_keep]
y = y[f_keep]
return f, y
def time_shuffled_perm(analysis_fnc, x, k_perm):
"""
Run a permutation test by shuffling the time-stamps of individual trials.
Parameters
----------
analysis_fnc : function
The function that will be used to generate the spectrum
x : np.ndarray
The data time-series
k_perm : int
How many permutations to run
Returns
-------
res : dict
Dictionary of the results of the randomization analysis
x : np.ndarray
The raw data
x_perm : np.ndarray
The shuffled data
f : np.ndarray
The frequencies of the resulting spectrum
y_emp : np.ndarray
The spectrum of the empirical (unshuffled) data
y_avg : np.ndarray
The spectra of the shuffled permutations
y_cis : np.ndarray
Confidence intervals for the spectra, at the 2.5th, 95th, and
97.5th percentile
p : np.ndarray
P-values (uncorrected for multiple comparisons) for each frequency
"""
# Compute the empirical statistics
f, y_emp = analysis_fnc(x)
# Run a bootstrapped permutation test.
# Create a surrogate distribution by randomly shuffling resps in time.
x_perm = []
y_perm = []
x_shuff = x.copy()
for k in range(k_perm):
np.random.shuffle(x_shuff)
_, y_perm_k = analysis_fnc(x_shuff)
y_perm.append(y_perm_k)
if k < 10: # Keep a few permutations for illustration
x_perm.append(x_shuff.copy())
# Find statistically significant oscillations
# Sometimes we get p=0 if no perms are larger than emp. Note that in this
# case, a Bonferroni correction doesn't have any effect on the p-values.
p = np.mean(np.vstack([y_perm, y_emp]) > y_emp, axis=0)
# Get summary of simulated spectra
y_avg = np.mean(y_perm, 1)
y_cis = np.percentile(y_perm, [2.5, 95, 97.5], 1)
# Bundle the results together
res = {}
res['x'] = x
res['x_perm'] = np.array(x_perm)
res['f'] = f
res['y_emp'] = y_emp
res['y_perm'] = np.array(y_perm)
res['y_avg'] = y_avg
res['y_cis'] = y_cis
res['p'] = p
return res
def window(x, win):
""" Apply a window to a segment of data
Parameters
----------
x : np.ndarray
The data
win : np.ndarray
The window
Returns
-------
x : np.ndarray
The windowed data
"""
return np.multiply(win, x.T).T
|
{"hexsha": "32eaa0a294af2308ff208fed9c050fd370b31fec", "size": 8526, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis_methods/shuff_time.py", "max_stars_repo_name": "gbrookshire/simulated_rhythmic_sampling", "max_stars_repo_head_hexsha": "5c9ed507847a75dbe38d10d78b54441ae83f5831", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis_methods/shuff_time.py", "max_issues_repo_name": "gbrookshire/simulated_rhythmic_sampling", "max_issues_repo_head_hexsha": "5c9ed507847a75dbe38d10d78b54441ae83f5831", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis_methods/shuff_time.py", "max_forks_repo_name": "gbrookshire/simulated_rhythmic_sampling", "max_forks_repo_head_hexsha": "5c9ed507847a75dbe38d10d78b54441ae83f5831", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0, "max_line_length": 79, "alphanum_fraction": 0.6232699977, "include": true, "reason": "import numpy,import statsmodels,from statsmodels", "num_tokens": 2206}
|
import numpy as np
import random
from sklearn.metrics import mean_squared_error
from sklearn.neural_network import MLPClassifier
def one_hot_generator(length):
a = []
for i in range(0, length):
i = random.randint(0, 7)
a.append(i)
output = np.eye(8)[a]
return output
n_train = 150
train = one_hot_generator(n_train)
test = one_hot_generator(30)
# --------------------------------------Multi-layer perceptron analysis----------------------------
# Training with a multi-layer perceptron with one hidden layer.
iteration = 15000
mlp = MLPClassifier(hidden_layer_sizes=2, max_iter=iteration)
mlp_result = mlp.fit(train, train)
# Prediction
train_out = mlp.predict(train)
test_out = mlp.predict(test)
print("train:\n", train[n_train-10:])
print("train out:\n", train_out[n_train-10:])
print("test:\n", test[:8])
print("test out:\n", test_out[:8])
error = mean_squared_error(test, test_out)
print("mean_squared_error:", error)
print("n_iter_:", mlp.n_iter_)
print("weight:\n", mlp.coefs_)
print("weight[0]:\n", mlp.coefs_[0])
print("weights[0][0]\n", mlp.coefs_[0][0])
print("sum of weights:", sum(mlp.coefs_[0][0]))
|
{"hexsha": "52fe2e5fc023e7af3c88139db0ec3389ea064a1a", "size": 1197, "ext": "py", "lang": "Python", "max_stars_repo_path": "MachineLearning/hw3_q1_sklearn/once.py", "max_stars_repo_name": "SeanSyue/SklearnReferences", "max_stars_repo_head_hexsha": "a2770a7108947877e772f3525bc915c5de4114bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MachineLearning/hw3_q1_sklearn/once.py", "max_issues_repo_name": "SeanSyue/SklearnReferences", "max_issues_repo_head_hexsha": "a2770a7108947877e772f3525bc915c5de4114bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MachineLearning/hw3_q1_sklearn/once.py", "max_forks_repo_name": "SeanSyue/SklearnReferences", "max_forks_repo_head_hexsha": "a2770a7108947877e772f3525bc915c5de4114bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2045454545, "max_line_length": 100, "alphanum_fraction": 0.6482873851, "include": true, "reason": "import numpy", "num_tokens": 314}
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import tempfile
import unittest
import io
import biom
import skbio
import qiime2
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
from q2_diversity import (alpha, alpha_phylogenetic, alpha_correlation,
alpha_group_significance)
class AlphaTests(unittest.TestCase):
def test_alpha(self):
t = biom.Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
actual = alpha(table=t, metric='observed_otus')
# expected computed by hand
expected = pd.Series({'S1': 1, 'S2': 2, 'S3': 2},
name='observed_otus')
pdt.assert_series_equal(actual, expected)
def test_alpha_phylo_metric(self):
t = biom.Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
with self.assertRaises(ValueError):
alpha(table=t, metric='faith_pd')
def test_alpha_unknown_metric(self):
t = biom.Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
with self.assertRaises(ValueError):
alpha(table=t, metric='not-a-metric')
def test_alpha_empty_table(self):
t = biom.Table(np.array([]), [], [])
with self.assertRaisesRegex(ValueError, "empty"):
alpha(table=t, metric='observed_otus')
def test_alpha_phylogenetic(self):
t = biom.Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
tree = skbio.TreeNode.read(io.StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
actual = alpha_phylogenetic(table=t, phylogeny=tree, metric='faith_pd')
# expected computed with skbio.diversity.alpha_diversity
expected = pd.Series({'S1': 0.75, 'S2': 1.0, 'S3': 1.0},
name='faith_pd')
pdt.assert_series_equal(actual, expected)
def test_alpha_phylogenetic_non_phylo_metric(self):
t = biom.Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
tree = skbio.TreeNode.read(io.StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
with self.assertRaises(ValueError):
alpha_phylogenetic(table=t, phylogeny=tree,
metric='observed_otus')
def test_alpha_phylogenetic_unknown_metric(self):
t = biom.Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
tree = skbio.TreeNode.read(io.StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
with self.assertRaises(ValueError):
alpha_phylogenetic(table=t, phylogeny=tree, metric='not-a-metric')
def test_alpha_phylogenetic_skbio_error_rewriting(self):
t = biom.Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
tree = skbio.TreeNode.read(io.StringIO(
'((O1:0.25):0.25, O3:0.75)root;'))
# Verify through regex that there is a ``feature_ids`` substring
# followed by a ``phylogeny``
with self.assertRaisesRegex(skbio.tree.MissingNodeError,
'feature_ids.*phylogeny'):
alpha_phylogenetic(table=t, phylogeny=tree, metric='faith_pd')
def test_alpha_phylogenetic_empty_table(self):
t = biom.Table(np.array([]), [], [])
tree = skbio.TreeNode.read(io.StringIO(
'((O1:0.25):0.25, O3:0.75)root;'))
with self.assertRaisesRegex(ValueError, "empty"):
alpha_phylogenetic(table=t, phylogeny=tree, metric='faith_pd')
class AlphaCorrelationTests(unittest.TestCase):
def test_spearman(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame({'value': ['1.0', '2.0', '3.0']},
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
alpha_correlation(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
jsonp_fp = os.path.join(output_dir, 'category-value.jsonp')
self.assertTrue(os.path.exists(jsonp_fp))
self.assertTrue('Spearman' in open(jsonp_fp).read())
self.assertTrue('"sampleSize": 3' in open(jsonp_fp).read())
self.assertTrue('"data":' in open(jsonp_fp).read())
self.assertFalse('filtered' in open(jsonp_fp).read())
def test_pearson(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame({'value': ['1.0', '2.0', '3.0']},
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
alpha_correlation(output_dir, alpha_div, md, method='pearson')
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
jsonp_fp = os.path.join(output_dir, 'category-value.jsonp')
self.assertTrue(os.path.exists(jsonp_fp))
self.assertTrue('Pearson' in open(jsonp_fp).read())
self.assertTrue('"sampleSize": 3' in open(jsonp_fp).read())
self.assertTrue('"data":' in open(jsonp_fp).read())
self.assertFalse('filtered' in open(jsonp_fp).read())
def test_bad_method(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.MetadataCategory(
pd.Series(['1.0', '2.0', '3.0'], name='value',
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
with self.assertRaises(ValueError):
alpha_correlation(output_dir, alpha_div, md, method='bad!')
def test_bad_metadata(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame({'value': ['a', 'b', 'c']},
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
with self.assertRaises(ValueError):
alpha_correlation(output_dir, alpha_div, md)
def test_nan_metadata(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame({'value': ['1.0', '2.0', '']},
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
alpha_correlation(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
jsonp_fp = os.path.join(output_dir, 'category-value.jsonp')
self.assertTrue(os.path.exists(jsonp_fp))
self.assertTrue('"filtered": 2' in open(jsonp_fp).read())
self.assertTrue('"initial": 3' in open(jsonp_fp).read())
def test_extra_metadata(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame({'value': ['1.0', '2.0', '3.0', '4.0']},
index=['sample1', 'sample2', 'sample3', 'sample4']))
with tempfile.TemporaryDirectory() as output_dir:
alpha_correlation(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
jsonp_fp = os.path.join(output_dir, 'category-value.jsonp')
self.assertTrue(os.path.exists(jsonp_fp))
self.assertTrue('"sampleSize": 3' in open(jsonp_fp).read())
def test_extra_alpha_div(self):
alpha_div = pd.Series([2.0, 4.0, 6.0, 8.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3',
'sample4'])
md = qiime2.Metadata(
pd.DataFrame({'value': ['1.0', '2.0', '3.0']},
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
alpha_correlation(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
jsonp_fp = os.path.join(output_dir, 'category-value.jsonp')
self.assertTrue(os.path.exists(jsonp_fp))
self.assertTrue('"sampleSize": 3' in open(jsonp_fp).read())
class AlphaGroupSignificanceTests(unittest.TestCase):
def test_alpha_group_significance(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame({'a or b': ['a', 'b', 'b']},
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.exists(
os.path.join(output_dir,
'category-a%20or%20b.jsonp')))
self.assertTrue('Kruskal-Wallis (all groups)'
in open(index_fp).read())
self.assertTrue('Kruskal-Wallis (pairwise)'
in open(index_fp).read())
def test_alpha_group_significance_some_numeric(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame({'a or b': ['a', 'b', 'b'],
'bad': ['1.0', '2.0', '3.0']},
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.exists(
os.path.join(output_dir,
'category-a%20or%20b.jsonp')))
self.assertFalse(os.path.exists(
os.path.join(output_dir,
'bad-value.jsonp')))
self.assertTrue('not categorical:' in open(index_fp).read())
self.assertTrue('<strong>bad' in open(index_fp).read())
def test_alpha_group_significance_one_group_all_unique_values(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame({'a or b': ['a', 'b', 'b'],
'bad': ['x', 'y', 'z']},
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.exists(
os.path.join(output_dir,
'category-a%20or%20b.jsonp')))
self.assertFalse(os.path.exists(
os.path.join(output_dir,
'category-bad.jsonp')))
self.assertTrue('number of samples' in open(index_fp).read())
self.assertTrue('<strong>bad' in open(index_fp).read())
def test_alpha_group_significance_one_group_single_value(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame({'a or b': ['a', 'b', 'b'],
'bad': ['x', 'x', 'x']},
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.exists(
os.path.join(output_dir,
'category-a%20or%20b.jsonp')))
self.assertFalse(os.path.exists(
os.path.join(output_dir,
'category-bad.jsonp')))
self.assertTrue('only a single' in open(index_fp).read())
self.assertTrue('<strong>bad' in open(index_fp).read())
def test_alpha_group_significance_KW_value_error(self):
alpha_div = pd.Series([2.0, 2.0, 3.0, 2.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3',
'sample4'])
md = qiime2.Metadata(
pd.DataFrame({'x': ['a', 'b', 'b', 'c']},
index=['sample1', 'sample2', 'sample3', 'sample4']))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
self.assertTrue(os.path.exists(
os.path.join(output_dir,
'category-x.jsonp')))
self.assertTrue('pairwise group comparisons have been omitted'
in open(index_fp).read())
self.assertTrue('x:c (n=1) vs x:a (n=1)' in open(index_fp).read())
def test_alpha_group_significance_numeric_only(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame({'value': ['1.0', '2.0', '3.0']},
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
with self.assertRaisesRegex(ValueError, 'Only numeric'):
alpha_group_significance(output_dir, alpha_div, md)
def test_alpha_group_significance_single_quote(self):
alpha_div = pd.Series([2.0, 4.0, 6.0], name='alpha-div',
index=['sample1', 'sample2', 'sample3'])
md = qiime2.Metadata(
pd.DataFrame({'a or b': ['a', "b'", 'b']},
index=['sample1', 'sample2', 'sample3']))
with tempfile.TemporaryDirectory() as output_dir:
alpha_group_significance(output_dir, alpha_div, md)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue("\'" in open(index_fp).read())
|
{"hexsha": "3cb16290451e0fd7452feb771084663d9cd501ba", "size": 16119, "ext": "py", "lang": "Python", "max_stars_repo_path": "q2_diversity/tests/test_alpha.py", "max_stars_repo_name": "gregcaporaso/q2-diversity", "max_stars_repo_head_hexsha": "3b03b4c1e47b2893668f14c91612507e4864c34e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "q2_diversity/tests/test_alpha.py", "max_issues_repo_name": "gregcaporaso/q2-diversity", "max_issues_repo_head_hexsha": "3b03b4c1e47b2893668f14c91612507e4864c34e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "q2_diversity/tests/test_alpha.py", "max_forks_repo_name": "gregcaporaso/q2-diversity", "max_forks_repo_head_hexsha": "3b03b4c1e47b2893668f14c91612507e4864c34e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.4088235294, "max_line_length": 79, "alphanum_fraction": 0.5298715801, "include": true, "reason": "import numpy", "num_tokens": 3803}
|
import os
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import pickle
from pdb import set_trace as stop
from dataloaders.data_utils import get_unk_mask_indices,image_loader
class VGDataset(torch.utils.data.Dataset):
def __init__(self, img_dir, img_list, image_transform,label_path,known_labels=40,testing=False):
with open(img_list, 'r') as f:
self.img_names = f.readlines()
with open(label_path, 'r') as f:
self.labels = json.load(f)
self.image_transform = image_transform
self.img_dir = img_dir
self.num_labels= 500
self.known_labels = known_labels
self.testing=testing
self.epoch = 1
def __getitem__(self, index):
name = self.img_names[index][:-1]
img_path = os.path.join(self.img_dir, name)
image = image_loader(img_path,self.image_transform)
label = np.zeros(self.num_labels).astype(np.float32)
label[self.labels[name]] = 1.0
label = torch.Tensor(label)
unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels)
mask = label.clone()
mask.scatter_(0,torch.Tensor(unk_mask_indices).long() , -1)
sample = {}
sample['image'] = image
sample['labels'] = label
sample['mask'] = mask
sample['imageIDs'] = name
return sample
def __len__(self):
return len(self.img_names)
|
{"hexsha": "b5d38d32aa3118d60d96b05b3789be42697bca7e", "size": 1489, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataloaders/vg500_dataset.py", "max_stars_repo_name": "sorrowyn/C-Tran", "max_stars_repo_head_hexsha": "236d785952c59210e6812b4ad5ee12bab585ce4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 104, "max_stars_repo_stars_event_min_datetime": "2021-01-25T17:27:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T10:17:50.000Z", "max_issues_repo_path": "dataloaders/vg500_dataset.py", "max_issues_repo_name": "mensudza/C-Tran", "max_issues_repo_head_hexsha": "4895ccb0e675ae2dcd2b619a9e47f30707062668", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-02-26T08:11:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-17T16:24:52.000Z", "max_forks_repo_path": "dataloaders/vg500_dataset.py", "max_forks_repo_name": "mensudza/C-Tran", "max_forks_repo_head_hexsha": "4895ccb0e675ae2dcd2b619a9e47f30707062668", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2021-02-21T14:01:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T08:41:27.000Z", "avg_line_length": 31.0208333333, "max_line_length": 101, "alphanum_fraction": 0.6507723304, "include": true, "reason": "import numpy", "num_tokens": 346}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.